blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1f49226de6a937a2446a672f978dfcee50e12379 | dea198896f679e577a3fd0923e3fa4470da4b9cc | /journal/pyfakefs_mutants/LCR_BoolOp_mutant_1507055465.py | 26bd0cf441af76594af236ad2c0fa0dba0140aaf | [] | no_license | naustarg/cbmcmutate | f138ab2b04b4be70d735de90815ac670ae6042ce | a6ee6fd395338bb2dfd6bdffabb2dc484cb303f1 | refs/heads/master | 2020-04-04T08:10:15.913309 | 2018-05-21T18:23:58 | 2018-05-21T18:23:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 219,332 | py | # line: 91
'A fake filesystem implementation for unit testing.\n\n:Includes:\n * FakeFile: Provides the appearance of a real file.\n * FakeDirectory: Provides the appearance of a real directory.\n * FakeFilesystem: Provides the appearance of a real directory hierarchy.\n * FakeOsModule: Uses FakeFilesystem to provide a fake os module replacement.\n * FakePathModule: Faked os.path module replacement.\n * FakeFileOpen: Faked file() and open() function replacements.\n\n:Usage:\n\n>>> from pyfakefs import fake_filesystem\n>>> filesystem = fake_filesystem.FakeFilesystem()\n>>> os_module = fake_filesystem.FakeOsModule(filesystem)\n>>> pathname = \'/a/new/dir/new-file\'\n\nCreate a new file object, creating parent directory objects as needed:\n\n>>> os_module.path.exists(pathname)\nFalse\n>>> new_file = filesystem.CreateFile(pathname)\n\nFile objects can\'t be overwritten:\n\n>>> os_module.path.exists(pathname)\nTrue\n>>> try:\n... filesystem.CreateFile(pathname)\n... except IOError as e:\n... assert e.errno == errno.EEXIST, \'unexpected errno: %d\' % e.errno\n... assert e.strerror == \'File already exists in fake filesystem\'\n\nRemove a file object:\n\n>>> filesystem.RemoveObject(pathname)\n>>> os_module.path.exists(pathname)\nFalse\n\nCreate a new file object at the previous path:\n\n>>> beatles_file = filesystem.CreateFile(pathname,\n... contents=\'Dear Prudence\\nWon\\\'t you come out to play?\\n\')\n>>> os_module.path.exists(pathname)\nTrue\n\nUse the FakeFileOpen class to read fake file objects:\n\n>>> file_module = fake_filesystem.FakeFileOpen(filesystem)\n>>> for line in file_module(pathname):\n... print(line.rstrip())\n...\nDear Prudence\nWon\'t you come out to play?\n\nFile objects cannot be treated like directory objects:\n\n>>> os_module.listdir(pathname) #doctest: +NORMALIZE_WHITESPACE\nTraceback (most recent call last):\n File "fake_filesystem.py", line 291, in listdir\n raise OSError(errno.ENOTDIR,\nOSError: [Errno 20] Fake os module: not a directory: \'/a/new/dir/new-file\'\n\nThe FakeOsModule can list fake directory objects:\n\n>>> os_module.listdir(os_module.path.dirname(pathname))\n[\'new-file\']\n\nThe FakeOsModule also supports stat operations:\n\n>>> import stat\n>>> stat.S_ISREG(os_module.stat(pathname).st_mode)\nTrue\n>>> stat.S_ISDIR(os_module.stat(os_module.path.dirname(pathname)).st_mode)\nTrue\n'
# line: 92
import codecs
# line: 93
import errno
# line: 94
import heapq
# line: 95
import io
# line: 96
import locale
# line: 97
import platform
# line: 98
import os
# line: 99
import sys
# line: 100
import time
# line: 101
import warnings
# line: 103
from collections import namedtuple
# line: 105
import stat
# line: 106
from copy import copy
# line: 108
__pychecker__ = 'no-reimportself'
# line: 110
__version__ = '3.3'
# line: 112
PERM_READ = 256
# line: 113
PERM_WRITE = 128
# line: 114
PERM_EXE = 64
# line: 115
PERM_DEF = 511
# line: 116
PERM_DEF_FILE = 438
# line: 117
PERM_ALL = 4095
# line: 119
_OpenModes = namedtuple('open_modes', 'must_exist can_read can_write truncate append must_not_exist')
# line: 125
_OPEN_MODE_MAP = {'r': (True, True, False, False, False, False), 'w': (False, False, True, True, False, False), 'a': (False, False, True, False, True, False), 'r+': (True, True, True, False, False, False), 'w+': (False, True, True, True, False, False), 'a+': (False, True, True, False, True, False), }
# line: 136
if ((sys.version_info[0] < 3) and (sys.platform != 'win32')):
# line: 137
_OPEN_MODE_MAP['rw'] = (True, True, True, False, False, False)
# line: 139
if (sys.version_info >= (3, 3)):
# line: 140
_OPEN_MODE_MAP['x'] = (False, False, True, False, False, True)
# line: 141
_OPEN_MODE_MAP['x+'] = (False, True, True, False, False, True)
# line: 143
if sys.platform.startswith('linux'):
# line: 146
_MAX_LINK_DEPTH = 40
else:
# line: 149
_MAX_LINK_DEPTH = 32
# line: 151
FAKE_PATH_MODULE_DEPRECATION = 'Do not instantiate a FakePathModule directly; let FakeOsModule instantiate it. See the FakeOsModule docstring for details.'
# line: 155
if (sys.platform == 'win32'):
# line: 157
OSError = WindowsError
# line: 160
class FakeLargeFileIoException(Exception):
# line: 163
'Exception thrown on unsupported operations for fake large files.\n Fake large files have a size with no real content.\n '
# line: 165
def __init__(self, file_path):
# line: 166
super(FakeLargeFileIoException, self).__init__(('Read and write operations not supported for fake large file: %s' % file_path))
# line: 171
def CopyModule(old):
# line: 172
'Recompiles and creates new module object.'
# line: 173
saved = sys.modules.pop(old.__name__, None)
# line: 174
new = __import__(old.__name__)
# line: 175
sys.modules[old.__name__] = saved
# line: 176
return new
# line: 179
class _FakeStatResult(object):
# line: 183
'Mimics os.stat_result for use as return type of `stat()` and similar.\n This is needed as `os.stat_result` has no possibility to set\n nanosecond times directly.\n '
# line: 184
long_type = (long if (sys.version_info < (3,)) else int)
# line: 186
def __init__(self, initial_time=None):
# line: 187
self.use_float = FakeOsModule.stat_float_times
# line: 188
self.st_mode = None
# line: 189
self.st_ino = None
# line: 190
self.st_dev = None
# line: 191
self.st_nlink = 0
# line: 192
self.st_uid = None
# line: 193
self.st_gid = None
# line: 194
self.st_size = None
# line: 195
if (initial_time is not None):
# line: 196
self._st_atime_ns = self.long_type((initial_time * 1000000000.0))
else:
# line: 198
self._st_atime_ns = None
# line: 199
self._st_mtime_ns = self._st_atime_ns
# line: 200
self._st_ctime_ns = self._st_atime_ns
# line: 202
def __eq__(self, other):
# line: 203
return (isinstance(other, _FakeStatResult) and (self._st_atime_ns == other._st_atime_ns) and (self._st_ctime_ns == other._st_ctime_ns) and (self._st_mtime_ns == other._st_mtime_ns) and (self.st_size == other.st_size) and (self.st_gid == other.st_gid) and (self.st_uid == other.st_uid) and (self.st_nlink == other.st_nlink) and (self.st_dev == other.st_dev) and (self.st_ino == other.st_ino) and (self.st_mode == other.st_mode))
# line: 217
def __ne__(self, other):
# line: 218
return (not (self == other))
# line: 220
def copy(self):
# line: 223
'Return a copy where the float usage is hard-coded to mimic the behavior\n of the real os.stat_result.\n '
# line: 224
use_float = self.use_float()
# line: 225
stat_result = copy(self)
# line: 226
stat_result.use_float = (lambda : use_float)
# line: 227
return stat_result
# line: 229
def set_from_stat_result(self, stat_result):
# line: 233
'Set values from a real os.stat_result.\n Note: values that are controlled by the fake filesystem are not set.\n This includes st_ino, st_dev and st_nlink.\n '
# line: 234
self.st_mode = stat_result.st_mode
# line: 235
self.st_uid = stat_result.st_uid
# line: 236
self.st_gid = stat_result.st_gid
# line: 237
self.st_size = stat_result.st_size
# line: 238
if (sys.version_info < (3, 3)):
# line: 239
self._st_atime_ns = self.long_type((stat_result.st_atime * 1000000000.0))
# line: 240
self._st_mtime_ns = self.long_type((stat_result.st_mtime * 1000000000.0))
# line: 241
self._st_ctime_ns = self.long_type((stat_result.st_ctime * 1000000000.0))
else:
# line: 243
self._st_atime_ns = stat_result.st_atime_ns
# line: 244
self._st_mtime_ns = stat_result.st_mtime_ns
# line: 245
self._st_ctime_ns = stat_result.st_ctime_ns
# line: 247
@property
# line: 247
def st_ctime(self):
# line: 249
'Return the creation time in seconds.'
# line: 250
ctime = (self._st_ctime_ns / 1000000000.0)
# line: 251
return (ctime if self.use_float() else int(ctime))
# line: 253
@property
# line: 253
def st_atime(self):
# line: 255
'Return the access time in seconds.'
# line: 256
atime = (self._st_atime_ns / 1000000000.0)
# line: 257
return (atime if self.use_float() else int(atime))
# line: 259
@property
# line: 259
def st_mtime(self):
# line: 261
'Return the modification time in seconds.'
# line: 262
mtime = (self._st_mtime_ns / 1000000000.0)
# line: 263
return (mtime if self.use_float() else int(mtime))
# line: 265
@st_ctime.setter
# line: 265
def st_ctime(self, val):
# line: 267
'Set the creation time in seconds.'
# line: 268
self._st_ctime_ns = self.long_type((val * 1000000000.0))
# line: 270
@st_atime.setter
# line: 270
def st_atime(self, val):
# line: 272
'Set the access time in seconds.'
# line: 273
self._st_atime_ns = self.long_type((val * 1000000000.0))
# line: 275
@st_mtime.setter
# line: 275
def st_mtime(self, val):
# line: 277
'Set the modification time in seconds.'
# line: 278
self._st_mtime_ns = self.long_type((val * 1000000000.0))
# line: 280
def __getitem__(self, item):
# line: 281
'Implement item access to mimic `os.stat_result` behavior.'
# line: 282
if (item == stat.ST_MODE):
# line: 283
return self.st_mode
# line: 284
if (item == stat.ST_INO):
# line: 285
return self.st_ino
# line: 286
if (item == stat.ST_DEV):
# line: 287
return self.st_dev
# line: 288
if (item == stat.ST_NLINK):
# line: 289
return self.st_nlink
# line: 290
if (item == stat.ST_UID):
# line: 291
return self.st_uid
# line: 292
if (item == stat.ST_GID):
# line: 293
return self.st_gid
# line: 294
if (item == stat.ST_SIZE):
# line: 295
return self.st_size
# line: 296
if (item == stat.ST_ATIME):
# line: 298
return int(self.st_atime)
# line: 299
if (item == stat.ST_MTIME):
# line: 300
return int(self.st_mtime)
# line: 301
if (item == stat.ST_CTIME):
# line: 302
return int(self.st_ctime)
# line: 304
if (sys.version_info >= (3, 3)):
# line: 306
@property
# line: 306
def st_atime_ns(self):
# line: 308
'Return the access time in nanoseconds.'
# line: 309
return self._st_atime_ns
# line: 311
@property
# line: 311
def st_mtime_ns(self):
# line: 313
'Return the modification time in nanoseconds.'
# line: 314
return self._st_mtime_ns
# line: 316
@property
# line: 316
def st_ctime_ns(self):
# line: 318
'Return the creation time in nanoseconds.'
# line: 319
return self._st_ctime_ns
# line: 321
@st_atime_ns.setter
# line: 321
def st_atime_ns(self, val):
# line: 323
'Set the access time in nanoseconds.'
# line: 324
self._st_atime_ns = val
# line: 326
@st_mtime_ns.setter
# line: 326
def st_mtime_ns(self, val):
# line: 328
'Set the modification time of the fake file in nanoseconds.'
# line: 329
self._st_mtime_ns = val
# line: 331
@st_ctime_ns.setter
# line: 331
def st_ctime_ns(self, val):
# line: 333
'Set the creation time of the fake file in nanoseconds.'
# line: 334
self._st_ctime_ns = val
# line: 337
class FakeFile(object):
# line: 353
"Provides the appearance of a real file.\n\n Attributes currently faked out:\n st_mode: user-specified, otherwise S_IFREG\n st_ctime: the time.time() timestamp of the file change time (updated\n each time a file's attributes is modified).\n st_atime: the time.time() timestamp when the file was last accessed.\n st_mtime: the time.time() timestamp when the file was last modified.\n st_size: the size of the file\n st_nlink: the number of hard links to the file\n st_ino: the inode number - a unique number identifying the file\n st_dev: a unique number identifying the (fake) file system device the file belongs to\n\n Other attributes needed by os.stat are assigned default value of None\n these include: st_uid, st_gid\n "
# line: 355
def __init__(self, name, st_mode=(stat.S_IFREG | PERM_DEF_FILE), contents=None, filesystem=None, encoding=None, errors=None):
# line: 371
'init.\n\n Args:\n name: name of the file/directory, without parent path information\n st_mode: the stat.S_IF* constant representing the file type (i.e.\n stat.S_IFREG, stat.S_IFDIR)\n contents: the contents of the filesystem object; should be a string or byte object for\n regular files, and a list of other FakeFile or FakeDirectory objects\n for FakeDirectory objects\n filesystem: the fake filesystem where the file is created.\n New in pyfakefs 2.9.\n encoding: if contents is a unicode string, the encoding used for serialization\n errors: the error mode used for encoding/decoding errors\n New in pyfakefs 3.2.\n '
# line: 372
self.name = name
# line: 373
self.stat_result = _FakeStatResult(time.time())
# line: 374
self.stat_result.st_mode = st_mode
# line: 375
self.encoding = encoding
# line: 376
self.errors = (errors or 'strict')
# line: 377
self._byte_contents = self._encode_contents(contents)
# line: 378
self.stat_result.st_size = (len(self._byte_contents) if (self._byte_contents is not None) else 0)
# line: 381
if (filesystem is None):
# line: 382
raise ValueError('filesystem shall not be None')
# line: 383
self.filesystem = filesystem
# line: 384
self.epoch = 0
# line: 385
self.parent_dir = None
# line: 387
@property
# line: 387
def byte_contents(self):
# line: 389
return self._byte_contents
# line: 391
@property
# line: 391
def contents(self):
# line: 393
'Return the contents as string with the original encoding.'
# line: 394
if ((sys.version_info >= (3, 0)) and isinstance(self.byte_contents, bytes)):
# line: 395
return self.byte_contents.decode((self.encoding or locale.getpreferredencoding(False)), errors=self.errors)
# line: 398
return self.byte_contents
# line: 400
def SetLargeFileSize(self, st_size):
# line: 413
"Sets the self.st_size attribute and replaces self.content with None.\n\n Provided specifically to simulate very large files without regards\n to their content (which wouldn't fit in memory).\n Note that read/write operations with such a file raise FakeLargeFileIoException.\n\n Args:\n st_size: (int) The desired file size\n\n Raises:\n IOError: if the st_size is not a non-negative integer,\n or if st_size exceeds the available file system space\n "
# line: 414
self._check_positive_int(st_size)
# line: 415
if self.st_size:
# line: 416
self.SetSize(0)
# line: 417
self.filesystem.ChangeDiskUsage(st_size, self.name, self.st_dev)
# line: 418
self.st_size = st_size
# line: 419
self._byte_contents = None
# line: 421
def _check_positive_int(self, size):
# line: 423
int_types = ((int, long) if (sys.version_info < (3, 0)) else int)
# line: 424
if ((not isinstance(size, int_types)) or (size < 0)):
# line: 425
raise IOError(errno.ENOSPC, ('Fake file object: size must be a non-negative integer, but is %s' % size), self.name)
# line: 429
def IsLargeFile(self):
# line: 430
'Return True if this file was initialized with size but no contents.'
# line: 431
return (self._byte_contents is None)
# line: 433
def _encode_contents(self, contents):
# line: 435
if ((sys.version_info >= (3, 0)) and isinstance(contents, str)):
# line: 436
contents = bytes(contents, (self.encoding or locale.getpreferredencoding(False)), self.errors)
elif ((sys.version_info < (3, 0)) and isinstance(contents, unicode)):
# line: 438
contents = contents.encode((self.encoding or locale.getpreferredencoding(False)), self.errors)
# line: 439
return contents
# line: 441
def _set_initial_contents(self, contents):
# line: 450
'Sets the file contents and size.\n Called internally after initial file creation.\n\n Args:\n contents: string, new content of file.\n Raises:\n IOError: if the st_size is not a non-negative integer,\n or if st_size exceeds the available file system space\n '
# line: 451
contents = self._encode_contents(contents)
# line: 452
st_size = len(contents)
# line: 454
if self._byte_contents:
# line: 455
self.SetSize(0)
# line: 456
current_size = (self.st_size or 0)
# line: 457
self.filesystem.ChangeDiskUsage((st_size - current_size), self.name, self.st_dev)
# line: 458
self._byte_contents = contents
# line: 459
self.st_size = st_size
# line: 460
self.epoch += 1
# line: 462
def SetContents(self, contents, encoding=None):
# line: 475
'Sets the file contents and size and increases the modification time.\n\n Args:\n contents: (str, bytes, unicode) new content of file.\n encoding: (str) the encoding to be used for writing the contents\n if they are a unicode string.\n If not given, the locale preferred encoding is used.\n New in pyfakefs 2.9.\n\n Raises:\n IOError: if the st_size is not a non-negative integer,\n or if st_size exceeds the available file system space.\n '
# line: 476
self.encoding = encoding
# line: 477
self._set_initial_contents(contents)
# line: 478
current_time = time.time()
# line: 479
self.st_ctime = current_time
# line: 480
self.st_mtime = current_time
# line: 482
def GetSize(self):
# line: 485
'Returns the size in bytes of the file contents.\n New in pyfakefs 2.9.\n '
# line: 486
return self.st_size
# line: 488
def GetPath(self):
# line: 489
'Return the full path of the current object.'
# line: 490
names = []
# line: 491
obj = self
# line: 492
while obj:
# line: 493
names.insert(0, obj.name)
# line: 494
obj = obj.parent_dir
# line: 495
sep = self.filesystem._path_separator(self.name)
# line: 496
return self.filesystem.NormalizePath(sep.join(names[1:]))
# line: 498
def SetSize(self, st_size):
# line: 507
'Resizes file content, padding with nulls if new size exceeds the old.\n\n Args:\n st_size: The desired size for the file.\n\n Raises:\n IOError: if the st_size arg is not a non-negative integer\n or if st_size exceeds the available file system space\n '
# line: 509
self._check_positive_int(st_size)
# line: 510
current_size = (self.st_size or 0)
# line: 511
self.filesystem.ChangeDiskUsage((st_size - current_size), self.name, self.st_dev)
# line: 512
if self._byte_contents:
# line: 513
if (st_size < current_size):
# line: 514
self._byte_contents = self._byte_contents[:st_size]
elif (sys.version_info < (3, 0)):
# line: 517
self._byte_contents = ('%s%s' % (self._byte_contents, ('\x00' * (st_size - current_size))))
else:
# line: 520
self._byte_contents += ('\x00' * (st_size - current_size))
# line: 521
self.st_size = st_size
# line: 522
self.epoch += 1
# line: 524
def SetATime(self, st_atime):
# line: 529
'Set the self.st_atime attribute.\n\n Args:\n st_atime: The desired access time.\n '
# line: 530
self.st_atime = st_atime
# line: 532
def SetMTime(self, st_mtime):
# line: 537
'Set the self.st_mtime attribute.\n\n Args:\n st_mtime: The desired modification time.\n '
# line: 538
self.st_mtime = st_mtime
# line: 540
def SetCTime(self, st_ctime):
# line: 546
'Set the self.st_ctime attribute.\n New in pyfakefs 3.0.\n\n Args:\n st_ctime: The desired creation time.\n '
# line: 547
self.st_ctime = st_ctime
# line: 549
def __getattr__(self, item):
# line: 550
'Forward some properties to stat_result.'
# line: 551
return getattr(self.stat_result, item)
# line: 553
def __setattr__(self, key, value):
# line: 554
'Forward some properties to stat_result.'
# line: 555
if (key in ('st_mode', 'st_ino', 'st_dev', 'st_nlink', 'st_uid', 'st_gid', 'st_size', 'st_atime', 'st_mtime', 'st_ctime', 'st_atime_ns', 'st_mtime_ns', 'st_ctime_ns')):
# line: 558
return setattr(self.stat_result, key, value)
# line: 559
return super(FakeFile, self).__setattr__(key, value)
# line: 561
def __str__(self):
# line: 562
return ('%s(%o)' % (self.name, self.st_mode))
# line: 564
def SetIno(self, st_ino):
# line: 571
'Set the self.st_ino attribute.\n Note that a unique inode is assigned automatically to a new fake file.\n Using this function does not guarantee uniqueness and should used with caution.\n\n Args:\n st_ino: (int) The desired inode.\n '
# line: 572
self.st_ino = st_ino
# line: 575
class FakeFileFromRealFile(FakeFile):
# line: 580
'Represents a fake file copied from the real file system.\n \n The contents of the file are read on demand only.\n New in pyfakefs 3.2.\n '
# line: 582
def __init__(self, file_path, filesystem, read_only=True):
# line: 593
'init.\n\n Args:\n file_path: path to the existing file.\n filesystem: the fake filesystem where the file is created.\n read_only: if set, the file is treated as read-only, e.g. a write access raises an exception;\n otherwise, writing to the file changes the fake file only as usually.\n\n Raises:\n OSError: if the file does not exist in the real file system.\n '
# line: 594
real_stat = os.stat(file_path)
# line: 596
super(FakeFileFromRealFile, self).__init__(name=os.path.basename(file_path), filesystem=filesystem)
# line: 598
self.stat_result.set_from_stat_result(real_stat)
# line: 599
if read_only:
# line: 600
self.st_mode &= 261924
# line: 601
self.file_path = file_path
# line: 602
self.contents_read = False
# line: 604
@property
# line: 604
def byte_contents(self):
# line: 606
if (not self.contents_read):
# line: 607
self.contents_read = True
# line: 608
with io.open(self.file_path, 'rb') as f:
# line: 609
self._byte_contents = f.read()
# line: 611
self.st_atime = os.stat(self.file_path).st_atime
# line: 612
return self._byte_contents
# line: 614
def IsLargeFile(self):
# line: 615
'The contents are never faked.'
# line: 616
return False
# line: 619
class FakeDirectory(FakeFile):
# line: 620
'Provides the appearance of a real directory.'
# line: 622
def __init__(self, name, perm_bits=PERM_DEF, filesystem=None):
# line: 629
'init.\n\n Args:\n name: name of the file/directory, without parent path information\n perm_bits: permission bits. defaults to 0o777.\n filesystem: if set, the fake filesystem where the directory is created\n '
# line: 630
FakeFile.__init__(self, name, (stat.S_IFDIR | perm_bits), {}, filesystem=filesystem)
# line: 632
self.st_nlink += 1
# line: 634
def SetContents(self, contents, encoding=None):
# line: 635
error_class = (OSError if self.filesystem.is_windows_fs else IOError)
# line: 636
raise error_class(errno.EISDIR, 'Trying to write to directory')
# line: 638
@property
# line: 638
def contents(self):
# line: 640
'Return the list of contained directory entries.'
# line: 641
return self.byte_contents
# line: 643
@property
# line: 643
def ordered_dirs(self):
# line: 645
'Return the list of contained directory entry names ordered by creation order.'
# line: 646
return [item[0] for item in sorted(self.byte_contents.items(), key=(lambda entry: entry[1].st_ino))]
# line: 649
def AddEntry(self, path_object):
# line: 658
'Adds a child FakeFile to this directory.\n\n Args:\n path_object: FakeFile instance to add as a child of this directory.\n\n Raises:\n OSError: if the directory has no write permission (Posix only)\n OSError: if the file or directory to be added already exists\n '
# line: 659
if ((not (self.st_mode & PERM_WRITE)) and (not self.filesystem.is_windows_fs)):
# line: 660
raise OSError(errno.EACCES, 'Permission Denied', self.GetPath())
# line: 662
if (path_object.name in self.contents):
# line: 663
raise OSError(errno.EEXIST, 'Object already exists in fake filesystem', self.GetPath())
# line: 667
self.contents[path_object.name] = path_object
# line: 668
path_object.parent_dir = self
# line: 669
self.st_nlink += 1
# line: 670
path_object.st_nlink += 1
# line: 671
path_object.st_dev = self.st_dev
# line: 672
if (path_object.st_nlink == 1):
# line: 673
self.filesystem.ChangeDiskUsage(path_object.GetSize(), path_object.name, self.st_dev)
# line: 675
def GetEntry(self, pathname_name):
# line: 686
'Retrieves the specified child file or directory entry.\n\n Args:\n pathname_name: basename of the child object to retrieve.\n\n Returns:\n fake file or directory object.\n\n Raises:\n KeyError: if no child exists by the specified name.\n '
# line: 687
return self.contents[pathname_name]
# line: 689
def RemoveEntry(self, pathname_name, recursive=True):
# line: 701
'Removes the specified child file or directory.\n\n Args:\n pathname_name: basename of the child object to remove.\n recursive: if True (default), the entries in contained directories are deleted first.\n Needed to propagate removal errors (e.g. permission problems) from contained entries.\n New in pyfakefs 2.9.\n\n Raises:\n KeyError: if no child exists by the specified name.\n OSError: if user lacks permission to delete the file, or (Windows only) the file is open.\n '
# line: 702
entry = self.contents[pathname_name]
# line: 703
if ((entry.st_mode & PERM_WRITE) == 0):
# line: 704
raise OSError(errno.EACCES, 'Trying to remove object without write permission', pathname_name)
# line: 706
if (self.filesystem.is_windows_fs and self.filesystem.HasOpenFile(entry)):
# line: 707
raise OSError(errno.EACCES, 'Trying to remove an open file', pathname_name)
# line: 708
if (recursive and isinstance(entry, FakeDirectory)):
# line: 709
while entry.contents:
# line: 710
entry.RemoveEntry(list(entry.contents)[0])
elif (entry.st_nlink == 1):
# line: 712
self.filesystem.ChangeDiskUsage((- entry.GetSize()), pathname_name, entry.st_dev)
# line: 714
self.st_nlink -= 1
# line: 715
entry.st_nlink -= 1
# line: 716
assert (entry.st_nlink >= 0)
# line: 718
del self.contents[pathname_name]
# line: 720
def GetSize(self):
# line: 723
'Return the total size of all files contained in this directory tree.\n New in pyfakefs 2.9.\n '
# line: 724
return sum([item[1].GetSize() for item in self.contents.items()])
# line: 726
def HasParentObject(self, dir_object):
# line: 728
'Return `True` if dir_object is a direct or indirect parent directory,\n or if both are the same object.'
# line: 729
obj = self
# line: 730
while obj:
# line: 731
if (obj == dir_object):
# line: 732
return True
# line: 733
obj = obj.parent_dir
# line: 734
return False
# line: 736
def __str__(self):
# line: 737
description = (super(FakeDirectory, self).__str__() + ':\n')
# line: 738
for item in self.contents:
# line: 739
item_desc = self.contents[item].__str__()
# line: 740
for line in item_desc.split('\n'):
# line: 741
if line:
# line: 742
description = (((description + ' ') + line) + '\n')
# line: 743
return description
# line: 746
class FakeDirectoryFromRealDirectory(FakeDirectory):
# line: 751
'Represents a fake directory copied from the real file system.\n \n The contents of the directory are read on demand only.\n New in pyfakefs 3.2.\n '
# line: 753
def __init__(self, dir_path, filesystem, read_only):
# line: 765
'init.\n\n Args:\n dir_path: full directory path\n filesystem: the fake filesystem where the directory is created\n read_only: if set, all files under the directory are treated as read-only,\n e.g. a write access raises an exception;\n otherwise, writing to the files changes the fake files only as usually.\n \n Raises:\n OSError if the directory does not exist in the real file system\n '
# line: 766
real_stat = os.stat(dir_path)
# line: 767
super(FakeDirectoryFromRealDirectory, self).__init__(name=os.path.split(dir_path)[1], perm_bits=real_stat.st_mode, filesystem=filesystem)
# line: 772
self.st_ctime = real_stat.st_ctime
# line: 773
self.st_atime = real_stat.st_atime
# line: 774
self.st_mtime = real_stat.st_mtime
# line: 775
self.st_gid = real_stat.st_gid
# line: 776
self.st_uid = real_stat.st_uid
# line: 777
self.dir_path = dir_path
# line: 778
self.read_only = read_only
# line: 779
self.contents_read = False
# line: 781
@property
# line: 781
def contents(self):
# line: 783
'Return the list of contained directory entries, loading them if not already loaded.'
# line: 784
if (not self.contents_read):
# line: 785
self.contents_read = True
# line: 786
self.filesystem.add_real_paths([os.path.join(self.dir_path, entry) for entry in os.listdir(self.dir_path)], read_only=self.read_only)
# line: 789
return self.byte_contents
# line: 791
def GetSize(self):
# line: 793
if (not self.contents_read):
# line: 794
return 0
# line: 795
return super(FakeDirectoryFromRealDirectory, self).GetSize()
# line: 798
class FakeFilesystem(object):
# line: 809
'Provides the appearance of a real directory tree for unit testing.\n\n Attributes:\n path_separator: The path separator, corresponds to `os.path.sep`.\n alternative_path_separator: Corresponds to `os.path.altsep`.\n is_windows_fs: `True` in a Windows file system, `False` otherwise.\n is_case_sensitive: `True` if a case-sensitive file system is assumed.\n root: The root `FakeDirectory` entry of the file system.\n cwd: The current working directory path.\n umask: The umask used for newly created files, see `os.umask`.\n '
# line: 811
def __init__(self, path_separator=os.path.sep, total_size=None):
# line: 823
"init.\n\n Args:\n path_separator: optional substitute for os.path.sep\n total_size: if not None, the total size in bytes of the root filesystem.\n New in pyfakefs 2.9.\n\n Example usage to emulate real file systems:\n filesystem = FakeFilesystem(\n alt_path_separator='/' if _is_windows else None)\n\n "
# line: 824
self.path_separator = path_separator
# line: 825
self.alternative_path_separator = os.path.altsep
# line: 826
if (path_separator != os.sep):
# line: 827
self.alternative_path_separator = None
# line: 832
self.is_windows_fs = (sys.platform == 'win32')
# line: 836
self.is_case_sensitive = (sys.platform not in ['win32', 'cygwin', 'darwin'])
# line: 838
self.root = FakeDirectory(self.path_separator, filesystem=self)
# line: 839
self.cwd = self.root.name
# line: 841
self.umask = os.umask(18)
# line: 842
os.umask(self.umask)
# line: 845
self.open_files = []
# line: 847
self._free_fd_heap = []
# line: 849
self._last_ino = 0
# line: 850
self._last_dev = 0
# line: 851
self.mount_points = {}
# line: 852
self.AddMountPoint(self.root.name, total_size)
# line: 854
@staticmethod
# line: 854
def _matching_string(matched, string):
# line: 858
'Return the string as byte or unicode depending \n on the type of matched, assuming string is an ASCII string.\n '
# line: 859
if (string is None):
# line: 860
return string
# line: 861
if (sys.version_info < (3,)):
# line: 862
if isinstance(matched, unicode):
# line: 863
return unicode(string)
else:
# line: 865
return string
elif isinstance(matched, bytes):
# line: 868
return bytes(string, 'ascii')
else:
# line: 870
return string
# line: 872
def _path_separator(self, path):
# line: 873
'Return the path separator as the same type as path'
# line: 874
return self._matching_string(path, self.path_separator)
# line: 876
def _alternative_path_separator(self, path):
# line: 877
'Return the alternative path separator as the same type as path'
# line: 878
return self._matching_string(path, self.alternative_path_separator)
# line: 880
def _IsLinkSupported(self):
# line: 882
return ((not self.is_windows_fs) or (sys.version_info >= (3, 2)))
# line: 884
def AddMountPoint(self, path, total_size=None):
# line: 900
'Add a new mount point for a filesystem device.\n The mount point gets a new unique device number.\n New in pyfakefs 2.9.\n\n Args:\n path: The root path for the new mount path.\n\n total_size: The new total size of the added filesystem device\n in bytes. Defaults to infinite size.\n\n Returns:\n The newly created mount point dict.\n\n Raises:\n OSError: if trying to mount an existing mount point again.\n '
# line: 901
path = self.NormalizePath(path)
# line: 902
if (path in self.mount_points):
# line: 903
raise OSError(errno.EEXIST, 'Mount point cannot be added twice', path)
# line: 904
self._last_dev += 1
# line: 905
self.mount_points[path] = {'idev': self._last_dev, 'total_size': total_size, 'used_size': 0, }
# line: 909
root_dir = (self.root if (path == self.root.name) else self.CreateDirectory(path))
# line: 910
root_dir.st_dev = self._last_dev
# line: 911
return self.mount_points[path]
# line: 913
def _AutoMountDriveIfNeeded(self, path, force=False):
# line: 914
if (self.is_windows_fs and (force and (not self._MountPointForPath(path)))):
# line: 915
drive = self.SplitDrive(path)[0]
# line: 916
if drive:
# line: 917
return self.AddMountPoint(path=drive)
# line: 919
def _MountPointForPath(self, path):
# line: 920
def to_str(string):
# line: 921
'Convert the str, unicode or byte object to a str using the default encoding.'
# line: 922
if ((string is None) or isinstance(string, str)):
# line: 923
return string
# line: 924
if (sys.version_info < (3, 0)):
# line: 925
return string.encode(locale.getpreferredencoding(False))
else:
# line: 927
return string.decode(locale.getpreferredencoding(False))
# line: 929
path = self.NormalizePath(self.NormalizeCase(path))
# line: 930
if (path in self.mount_points):
# line: 931
return self.mount_points[path]
# line: 932
mount_path = self._matching_string(path, '')
# line: 933
drive = self.SplitDrive(path)[:1]
# line: 934
for root_path in self.mount_points:
# line: 935
root_path = self._matching_string(path, root_path)
# line: 936
if (drive and (not root_path.startswith(drive))):
# line: 937
continue
# line: 938
if (path.startswith(root_path) and (len(root_path) > len(mount_path))):
# line: 939
mount_path = root_path
# line: 940
if mount_path:
# line: 941
return self.mount_points[to_str(mount_path)]
# line: 942
mount_point = self._AutoMountDriveIfNeeded(path, force=True)
# line: 943
assert mount_point
# line: 944
return mount_point
# line: 946
def _MountPointForDevice(self, idev):
# line: 947
for mount_point in self.mount_points.values():
# line: 948
if (mount_point['idev'] == idev):
# line: 949
return mount_point
# line: 951
def GetDiskUsage(self, path=None):
# line: 961
"Return the total, used and free disk space in bytes as named tuple,\n or placeholder values simulating unlimited space if not set.\n Note: This matches the return value of shutil.disk_usage().\n New in pyfakefs 2.9.\n\n Args:\n path: The disk space is returned for the file system device where\n path resides.\n Defaults to the root path (e.g. '/' on Unix systems).\n "
# line: 962
DiskUsage = namedtuple('usage', 'total, used, free')
# line: 963
if (path is None):
# line: 964
mount_point = self.mount_points[self.root.name]
else:
# line: 966
mount_point = self._MountPointForPath(path)
# line: 967
if (mount_point and (mount_point['total_size'] is not None)):
# line: 968
return DiskUsage(mount_point['total_size'], mount_point['used_size'], (mount_point['total_size'] - mount_point['used_size']))
# line: 970
return DiskUsage((((1024 * 1024) * 1024) * 1024), 0, (((1024 * 1024) * 1024) * 1024))
# line: 972
def SetDiskUsage(self, total_size, path=None):
# line: 986
"Changes the total size of the file system, preserving the used space.\n Example usage: set the size of an auto-mounted Windows drive.\n New in pyfakefs 2.9.\n\n Args:\n total_size: The new total size of the filesystem in bytes.\n\n path: The disk space is changed for the file system device where\n path resides.\n Defaults to the root path (e.g. '/' on Unix systems).\n\n Raises:\n IOError: if the new space is smaller than the used size.\n "
# line: 987
if (path is None):
# line: 988
path = self.root.name
# line: 989
mount_point = self._MountPointForPath(path)
# line: 990
if ((mount_point['total_size'] is not None) and (mount_point['used_size'] > total_size)):
# line: 991
raise IOError(errno.ENOSPC, ('Fake file system: cannot change size to %r bytes - used space is larger' % total_size), path)
# line: 994
mount_point['total_size'] = total_size
# line: 996
def ChangeDiskUsage(self, usage_change, file_path, st_dev):
# line: 1010
'Change the used disk space by the given amount.\n New in pyfakefs 2.9.\n\n Args:\n usage_change: Number of bytes added to the used space.\n If negative, the used space will be decreased.\n\n file_path: The path of the object needing the disk space.\n\n st_dev: The device ID for the respective file system.\n\n Raises:\n IOError: if usage_change exceeds the free file system space\n '
# line: 1011
mount_point = self._MountPointForDevice(st_dev)
# line: 1012
if mount_point:
# line: 1013
if (mount_point['total_size'] is not None):
# line: 1014
if ((mount_point['total_size'] - mount_point['used_size']) < usage_change):
# line: 1015
raise IOError(errno.ENOSPC, ('Fake file system: disk is full, failed to add %r bytes' % usage_change), file_path)
# line: 1018
mount_point['used_size'] += usage_change
# line: 1020
def GetStat(self, entry_path, follow_symlinks=True):
# line: 1034
"Return the os.stat-like tuple for the FakeFile object of entry_path.\n New in pyfakefs 3.0.\n\n Args:\n entry_path: path to filesystem object to retrieve.\n follow_symlinks: if False and entry_path points to a symlink, the link itself is inspected\n instead of the linked object.\n\n Returns:\n the FakeStatResult object corresponding to entry_path.\n\n Raises:\n OSError: if the filesystem object doesn't exist.\n "
# line: 1036
try:
# line: 1037
file_object = self.ResolveObject(entry_path, follow_symlinks, allow_fd=True)
# line: 1038
return file_object.stat_result.copy()
# line: 1039
except IOError as io_error:
# line: 1040
raise OSError(io_error.errno, io_error.strerror, entry_path)
# line: 1042
def ChangeMode(self, path, mode, follow_symlinks=True):
# line: 1051
'Change the permissions of a file as encoded in integer mode.\n New in pyfakefs 3.0.\n\n Args:\n path: (str) Path to the file.\n mode: (int) Permissions.\n follow_symlinks: if False and entry_path points to a symlink, the link itself is affected\n instead of the linked object.\n '
# line: 1052
try:
# line: 1053
file_object = self.ResolveObject(path, follow_symlinks, allow_fd=True)
# line: 1054
except IOError as io_error:
# line: 1055
if (io_error.errno == errno.ENOENT):
# line: 1056
raise OSError(errno.ENOENT, 'No such file or directory in fake filesystem', path)
# line: 1059
raise
# line: 1060
file_object.st_mode = ((file_object.st_mode & (~ PERM_ALL)) | (mode & PERM_ALL))
# line: 1062
file_object.st_ctime = time.time()
# line: 1064
def UpdateTime(self, path, times=None, ns=None, follow_symlinks=True):
# line: 1086
'Change the access and modified times of a file.\n New in pyfakefs 3.0.\n\n Args:\n path: (str) Path to the file.\n times: 2-tuple of int or float numbers, of the form (atime, mtime) \n which is used to set the access and modified times in seconds. \n If None, both times are set to the current time.\n ns: 2-tuple of int numbers, of the form (atime, mtime) which is \n used to set the access and modified times in nanoseconds. \n If None, both times are set to the current time.\n New in Python 3.3. New in pyfakefs 3.3.\n follow_symlinks: If `False` and entry_path points to a symlink, \n the link itself is queried instead of the linked object. \n New in Python 3.3. New in pyfakefs 3.0.\n \n Raises:\n TypeError: If anything other than the expected types is \n specified in the passed `times` or `ns` tuple, \n or if the tuple length is not equal to 2.\n ValueError: If both times and ns are specified.\n '
# line: 1087
if ((times is not None) and (ns is not None)):
# line: 1088
raise ValueError("utime: you may specify either 'times' or 'ns' but not both")
# line: 1089
if ((times is not None) and (len(times) != 2)):
# line: 1090
raise TypeError("utime: 'times' must be either a tuple of two ints or None")
# line: 1091
if ((ns is not None) and (len(ns) != 2)):
# line: 1092
raise TypeError("utime: 'ns' must be a tuple of two ints")
# line: 1094
try:
# line: 1095
file_object = self.ResolveObject(path, follow_symlinks, allow_fd=True)
# line: 1096
except IOError as io_error:
# line: 1097
if (io_error.errno == errno.ENOENT):
# line: 1098
raise OSError(errno.ENOENT, 'No such file or directory in fake filesystem', path)
# line: 1101
raise
# line: 1102
if (times is not None):
# line: 1103
for file_time in times:
# line: 1104
if (not isinstance(file_time, (int, float))):
# line: 1105
raise TypeError('atime and mtime must be numbers')
# line: 1107
file_object.st_atime = times[0]
# line: 1108
file_object.st_mtime = times[1]
elif (ns is not None):
# line: 1110
for file_time in ns:
# line: 1111
if (not isinstance(file_time, int)):
# line: 1112
raise TypeError('atime and mtime must be ints')
# line: 1114
file_object.st_atime_ns = ns[0]
# line: 1115
file_object.st_mtime_ns = ns[1]
else:
# line: 1117
current_time = time.time()
# line: 1118
file_object.st_atime = current_time
# line: 1119
file_object.st_mtime = current_time
# line: 1121
def SetIno(self, path, st_ino):
# line: 1129
"Set the self.st_ino attribute of file at 'path'.\n Note that a unique inode is assigned automatically to a new fake file.\n Using this function does not guarantee uniqueness and should used with caution.\n\n Args:\n path: Path to file.\n st_ino: The desired inode.\n "
# line: 1130
self.GetObject(path).SetIno(st_ino)
# line: 1132
def AddOpenFile(self, file_obj):
# line: 1142
'Add file_obj to the list of open files on the filesystem.\n\n The position in the self.open_files array is the file descriptor number.\n\n Args:\n file_obj: file object to be added to open files list.\n\n Returns:\n File descriptor number for the file object.\n '
# line: 1143
if self._free_fd_heap:
# line: 1144
open_fd = heapq.heappop(self._free_fd_heap)
# line: 1145
self.open_files[open_fd] = file_obj
# line: 1146
return open_fd
# line: 1148
self.open_files.append(file_obj)
# line: 1149
return (len(self.open_files) - 1)
# line: 1151
def CloseOpenFile(self, file_des):
# line: 1158
'Remove file object with given descriptor from the list of open files.\n\n Sets the entry in open_files to None.\n\n Args:\n file_des: descriptor of file object to be removed from open files list.\n '
# line: 1159
self.open_files[file_des] = None
# line: 1160
heapq.heappush(self._free_fd_heap, file_des)
# line: 1162
def GetOpenFile(self, file_des):
# line: 1174
'Return an open file.\n\n Args:\n file_des: file descriptor of the open file.\n\n Raises:\n OSError: an invalid file descriptor.\n TypeError: filedes is not an integer.\n\n Returns:\n Open file object.\n '
# line: 1175
if (not isinstance(file_des, int)):
# line: 1176
raise TypeError('an integer is required')
# line: 1177
if ((file_des >= len(self.open_files)) or (self.open_files[file_des] is None)):
# line: 1179
raise OSError(errno.EBADF, 'Bad file descriptor', file_des)
# line: 1180
return self.open_files[file_des]
# line: 1182
def HasOpenFile(self, file_object):
# line: 1191
'Return True if the given file object is in the list of open files.\n New in pyfakefs 2.9.\n\n Args:\n file_object: The FakeFile object to be checked.\n\n Returns:\n True if the file is open.\n '
# line: 1192
return (file_object in [wrapper.GetObject() for wrapper in self.open_files if wrapper])
# line: 1194
def NormalizePathSeparator(self, path):
# line: 1204
'Replace all appearances of alternative path separator with path separator.\n Do nothing if no alternative separator is set.\n New in pyfakefs 2.9.\n\n Args:\n path: the path to be normalized.\n\n Returns:\n The normalized path that will be used internally.\n '
# line: 1205
if (sys.version_info >= (3, 6)):
# line: 1206
path = os.fspath(path)
# line: 1207
if ((self.alternative_path_separator is None) or (not path)):
# line: 1208
return path
# line: 1209
return path.replace(self._alternative_path_separator(path), self._path_separator(path))
# line: 1211
def CollapsePath(self, path):
# line: 1230
"Mimic os.path.normpath using the specified path_separator.\n\n Mimics os.path.normpath using the path_separator that was specified\n for this FakeFilesystem. Normalizes the path, but unlike the method\n NormalizePath, does not make it absolute. Eliminates dot components\n (. and ..) and combines repeated path separators (//). Initial ..\n components are left in place for relative paths. If the result is an empty\n path, '.' is returned instead.\n\n This also replaces alternative path separator with path separator. That is,\n it behaves like the real os.path.normpath on Windows if initialized with\n '\\' as path separator and '/' as alternative separator.\n\n Args:\n path: (str) The path to normalize.\n\n Returns:\n (str) A copy of path with empty components and dot components removed.\n "
# line: 1231
path = self.NormalizePathSeparator(path)
# line: 1232
(drive, path) = self.SplitDrive(path)
# line: 1233
sep = self._path_separator(path)
# line: 1234
is_absolute_path = path.startswith(sep)
# line: 1235
path_components = path.split(sep)
# line: 1236
collapsed_path_components = []
# line: 1237
dot = self._matching_string(path, '.')
# line: 1238
dotdot = self._matching_string(path, '..')
# line: 1239
for component in path_components:
# line: 1240
if ((not component) or (component == dot)):
# line: 1241
continue
# line: 1242
if (component == dotdot):
# line: 1243
if (collapsed_path_components and (collapsed_path_components[(-1)] != dotdot)):
# line: 1246
collapsed_path_components.pop()
# line: 1247
continue
elif is_absolute_path:
# line: 1250
continue
# line: 1251
collapsed_path_components.append(component)
# line: 1252
collapsed_path = sep.join(collapsed_path_components)
# line: 1253
if is_absolute_path:
# line: 1254
collapsed_path = (sep + collapsed_path)
# line: 1255
return ((drive + collapsed_path) or dot)
# line: 1257
def NormalizeCase(self, path):
# line: 1267
'Return a normalized case version of the given path for case-insensitive\n file systems. For case-sensitive file systems, return path unchanged.\n New in pyfakefs 2.9.\n\n Args:\n path: the file path to be transformed\n\n Returns:\n A version of path matching the case of existing path elements.\n '
# line: 1268
def components_to_path():
# line: 1269
if (len(path_components) > len(normalized_components)):
# line: 1270
normalized_components.extend(path_components[len(normalized_components):])
# line: 1271
sep = self._path_separator(path)
# line: 1272
normalized_path = sep.join(normalized_components)
# line: 1273
if (path.startswith(sep) and (not normalized_path.startswith(sep))):
# line: 1274
normalized_path = (sep + normalized_path)
# line: 1275
return normalized_path
# line: 1277
if (self.is_case_sensitive or (not path)):
# line: 1278
return path
# line: 1279
path_components = self.GetPathComponents(path)
# line: 1280
normalized_components = []
# line: 1281
current_dir = self.root
# line: 1282
for component in path_components:
# line: 1283
if (not isinstance(current_dir, FakeDirectory)):
# line: 1284
return components_to_path()
# line: 1285
(dir_name, current_dir) = self._DirectoryContent(current_dir, component)
# line: 1286
if ((current_dir is None) or (isinstance(current_dir, FakeDirectory) and (current_dir._byte_contents is None) and (current_dir.st_size == 0))):
# line: 1290
return components_to_path()
# line: 1291
normalized_components.append(dir_name)
# line: 1292
return components_to_path()
# line: 1294
def NormalizePath(self, path):
# line: 1306
'Absolutize and minimalize the given path.\n\n Forces all relative paths to be absolute, and normalizes the path to\n eliminate dot and empty components.\n\n Args:\n path: path to normalize\n\n Returns:\n The normalized path relative to the current working directory, or the root\n directory if path is empty.\n '
# line: 1307
path = self.NormalizePathSeparator(path)
# line: 1308
if (not path):
# line: 1309
path = self.path_separator
elif (not self._StartsWithRootPath(path)):
# line: 1312
root_name = self._matching_string(path, self.root.name)
# line: 1313
empty = self._matching_string(path, '')
# line: 1314
path = self._path_separator(path).join(((((self.cwd != root_name) and self.cwd) or empty), path))
# line: 1316
if (path == self._matching_string(path, '.')):
# line: 1317
path = self.cwd
# line: 1318
return self.CollapsePath(path)
# line: 1320
def SplitPath(self, path):
# line: 1332
'Mimic os.path.split using the specified path_separator.\n\n Mimics os.path.split using the path_separator that was specified\n for this FakeFilesystem.\n\n Args:\n path: (str) The path to split.\n\n Returns:\n (str) A duple (pathname, basename) for which pathname does not\n end with a slash, and basename does not contain a slash.\n '
# line: 1333
(drive, path) = self.SplitDrive(path)
# line: 1334
path = self.NormalizePathSeparator(path)
# line: 1335
sep = self._path_separator(path)
# line: 1336
path_components = path.split(sep)
# line: 1337
if (not path_components):
# line: 1338
return ('', '')
# line: 1339
basename = path_components.pop()
# line: 1340
if (not path_components):
# line: 1341
return ('', basename)
# line: 1342
for component in path_components:
# line: 1343
if component:
# line: 1346
while (not path_components[(-1)]):
# line: 1347
path_components.pop()
# line: 1348
return ((drive + sep.join(path_components)), basename)
# line: 1350
return ((drive or sep), basename)
# line: 1352
def SplitDrive(self, path):
# line: 1363
'Splits the path into the drive part and the rest of the path.\n New in pyfakefs 2.9.\n\n Taken from Windows specific implementation in Python 3.5 and slightly adapted.\n\n Args:\n path: the full path to be split.\n\n Returns: a tuple of the drive part and the rest of the path, or of an empty string\n and the full path if drive letters are not supported or no drive is present.\n '
# line: 1364
if (sys.version_info >= (3, 6)):
# line: 1365
path = os.fspath(path)
# line: 1366
if self.is_windows_fs:
# line: 1367
if (len(path) >= 2):
# line: 1368
path = self.NormalizePathSeparator(path)
# line: 1369
sep = self._path_separator(path)
# line: 1371
if (sys.version_info >= (2, 7, 8)):
# line: 1372
if ((path[0:2] == (sep * 2)) and (path[2:3] != sep)):
# line: 1375
sep_index = path.find(sep, 2)
# line: 1376
if (sep_index == (-1)):
# line: 1377
return (path[:0], path)
# line: 1378
sep_index2 = path.find(sep, (sep_index + 1))
# line: 1379
if (sep_index2 == (sep_index + 1)):
# line: 1380
return (path[:0], path)
# line: 1381
if (sep_index2 == (-1)):
# line: 1382
sep_index2 = len(path)
# line: 1383
return (path[:sep_index2], path[sep_index2:])
# line: 1384
if (path[1:2] == self._matching_string(path, ':')):
# line: 1385
return (path[:2], path[2:])
# line: 1386
return (path[:0], path)
# line: 1388
def _JoinPathsWithDriveSupport(self, *all_paths):
# line: 1389
'Taken from Python 3.5 os.path.join() code in ntpath.py and slightly adapted'
# line: 1390
base_path = all_paths[0]
# line: 1391
paths_to_add = all_paths[1:]
# line: 1392
sep = self._path_separator(base_path)
# line: 1393
seps = [sep, self._alternative_path_separator(base_path)]
# line: 1394
(result_drive, result_path) = self.SplitDrive(base_path)
# line: 1395
for path in paths_to_add:
# line: 1396
(drive_part, path_part) = self.SplitDrive(path)
# line: 1397
if (path_part and (path_part[:1] in seps)):
# line: 1399
if (drive_part or (not result_drive)):
# line: 1400
result_drive = drive_part
# line: 1401
result_path = path_part
# line: 1402
continue
elif (drive_part and (drive_part != result_drive)):
# line: 1404
if (self.is_case_sensitive or (drive_part.lower() != result_drive.lower())):
# line: 1406
result_drive = drive_part
# line: 1407
result_path = path_part
# line: 1408
continue
# line: 1410
result_drive = drive_part
# line: 1412
if (result_path and (result_path[(-1):] not in seps)):
# line: 1413
result_path = (result_path + sep)
# line: 1414
result_path = (result_path + path_part)
# line: 1416
colon = self._matching_string(base_path, ':')
# line: 1417
if (result_path and (result_path[:1] not in seps) and result_drive and (result_drive[(-1):] != colon)):
# line: 1419
return ((result_drive + sep) + result_path)
# line: 1420
return (result_drive + result_path)
# line: 1422
def JoinPaths(self, *paths):
# line: 1431
'Mimic os.path.join using the specified path_separator.\n\n Args:\n *paths: (str) Zero or more paths to join.\n\n Returns:\n (str) The paths joined by the path separator, starting with the last\n absolute path in paths.\n '
# line: 1432
if (sys.version_info >= (3, 6)):
# line: 1433
paths = [os.fspath(path) for path in paths]
# line: 1434
if (len(paths) == 1):
# line: 1435
return paths[0]
# line: 1436
if self.is_windows_fs:
# line: 1437
return self._JoinPathsWithDriveSupport(*paths)
# line: 1438
joined_path_segments = []
# line: 1439
sep = self._path_separator(paths[0])
# line: 1440
for path_segment in paths:
# line: 1441
if self._StartsWithRootPath(path_segment):
# line: 1443
joined_path_segments = [path_segment]
else:
# line: 1445
if (joined_path_segments and (not joined_path_segments[(-1)].endswith(sep))):
# line: 1447
joined_path_segments.append(sep)
# line: 1448
if path_segment:
# line: 1449
joined_path_segments.append(path_segment)
# line: 1450
return self._matching_string(paths[0], '').join(joined_path_segments)
# line: 1452
def GetPathComponents(self, path):
# line: 1473
'Breaks the path into a list of component names.\n\n Does not include the root directory as a component, as all paths\n are considered relative to the root directory for the FakeFilesystem.\n Callers should basically follow this pattern:\n\n >>> file_path = self.NormalizePath(file_path)\n >>> path_components = self.GetPathComponents(file_path)\n >>> current_dir = self.root\n >>> for component in path_components:\n >>> if component not in current_dir.contents:\n >>> raise IOError\n >>> DoStuffWithComponent(current_dir, component)\n >>> current_dir = current_dir.GetEntry(component)\n\n Args:\n path: path to tokenize\n\n Returns:\n The list of names split from path\n '
# line: 1474
if ((not path) or (path == self._path_separator(path))):
# line: 1475
return []
# line: 1476
(drive, path) = self.SplitDrive(path)
# line: 1477
path_components = path.split(self._path_separator(path))
# line: 1478
assert (drive or path_components)
# line: 1479
if (not path_components[0]):
# line: 1481
path_components = path_components[1:]
# line: 1482
if drive:
# line: 1483
path_components.insert(0, drive)
# line: 1484
return path_components
# line: 1486
def StartsWithDriveLetter(self, file_path):
# line: 1496
'Return True if file_path starts with a drive letter.\n New in pyfakefs 2.9.\n\n Args:\n file_path: the full path to be examined.\n\n Returns:\n True if drive letter support is enabled in the filesystem and\n the path starts with a drive letter.\n '
# line: 1497
colon = self._matching_string(file_path, ':')
# line: 1498
return (self.is_windows_fs and (len(file_path) >= 2) and file_path[:1].isalpha and (file_path[1:2] == colon))
# line: 1501
def _StartsWithRootPath(self, file_path):
# line: 1502
root_name = self._matching_string(file_path, self.root.name)
# line: 1503
return (file_path.startswith(root_name) or ((not self.is_case_sensitive) and file_path.lower().startswith(root_name.lower())) or self.StartsWithDriveLetter(file_path))
# line: 1508
def _IsRootPath(self, file_path):
# line: 1509
root_name = self._matching_string(file_path, self.root.name)
# line: 1510
return ((file_path == root_name) or ((not self.is_case_sensitive) and (file_path.lower() == root_name.lower())) or ((len(file_path) == 2) and self.StartsWithDriveLetter(file_path)))
# line: 1514
def _EndsWithPathSeparator(self, file_path):
# line: 1515
return (file_path and (file_path.endswith(self._path_separator(file_path)) or ((self.alternative_path_separator is not None) and file_path.endswith(self._alternative_path_separator(file_path)))))
# line: 1519
def _DirectoryContent(self, directory, component):
# line: 1520
if (not isinstance(directory, FakeDirectory)):
# line: 1521
return (None, None)
# line: 1522
if (component in directory.contents):
# line: 1523
return (component, directory.contents[component])
# line: 1524
if (not self.is_case_sensitive):
# line: 1525
matching_content = [(subdir, directory.contents[subdir]) for subdir in directory.contents if (subdir.lower() == component.lower())]
# line: 1528
if matching_content:
# line: 1529
return matching_content[0]
# line: 1531
return (None, None)
# line: 1533
def Exists(self, file_path):
# line: 1544
'Return true if a path points to an existing file system object.\n\n Args:\n file_path: path to examine.\n\n Returns:\n (bool) True if the corresponding object exists.\n\n Raises:\n TypeError: if file_path is None.\n '
# line: 1545
if (sys.version_info >= (3, 6)):
# line: 1546
file_path = os.fspath(file_path)
# line: 1547
if (file_path is None):
# line: 1548
raise TypeError
# line: 1549
if (not file_path):
# line: 1550
return False
# line: 1551
try:
# line: 1552
file_path = self.ResolvePath(file_path)
# line: 1553
except (IOError, OSError):
# line: 1554
return False
# line: 1555
if (file_path == self.root.name):
# line: 1556
return True
# line: 1557
path_components = self.GetPathComponents(file_path)
# line: 1558
current_dir = self.root
# line: 1559
for component in path_components:
# line: 1560
current_dir = self._DirectoryContent(current_dir, component)[1]
# line: 1561
if (not current_dir):
# line: 1562
return False
# line: 1563
return True
# line: 1565
def ResolvePath(self, file_path, allow_fd=False, raw_io=True):
# line: 1601
"Follow a path, resolving symlinks.\n\n ResolvePath traverses the filesystem along the specified file path,\n resolving file names and symbolic links until all elements of the path are\n exhausted, or we reach a file which does not exist. If all the elements\n are not consumed, they just get appended to the path resolved so far.\n This gives us the path which is as resolved as it can be, even if the file\n does not exist.\n\n This behavior mimics Unix semantics, and is best shown by example. Given a\n file system that looks like this:\n\n /a/b/\n /a/b/c -> /a/b2 c is a symlink to /a/b2\n /a/b2/x\n /a/c -> ../d\n /a/x -> y\n\n Then:\n /a/b/x => /a/b/x\n /a/c => /a/d\n /a/x => /a/y\n /a/b/c/d/e => /a/b2/d/e\n\n Args:\n file_path: path to examine.\n allow_fd: If `True`, `file_path` may be open file descriptor\n raw_io: `True` if called from low-level I/O functions\n\n Returns:\n resolved_path (string) or None.\n\n Raises:\n TypeError: if file_path is None.\n IOError: if file_path is '' or a part of the path doesn't exist.\n "
# line: 1603
def _ComponentsToPath(component_folders):
# line: 1604
sep = (self._path_separator(component_folders[0]) if component_folders else self.path_separator)
# line: 1606
path = sep.join(component_folders)
# line: 1607
if (not self._StartsWithRootPath(path)):
# line: 1608
path = (sep + path)
# line: 1609
return path
# line: 1611
def _ValidRelativePath(file_path):
# line: 1612
slash_dotdot = self._matching_string(file_path, '/..')
# line: 1613
while (file_path and (slash_dotdot in file_path)):
# line: 1614
file_path = file_path[:file_path.rfind(slash_dotdot)]
# line: 1615
if (not self.Exists(self.NormalizePath(file_path))):
# line: 1616
return False
# line: 1617
return True
# line: 1619
def _FollowLink(link_path_components, link):
# line: 1639
'Follow a link w.r.t. a path resolved so far.\n\n The component is either a real file, which is a no-op, or a symlink.\n In the case of a symlink, we have to modify the path as built up so far\n /a/b => ../c should yield /a/../c (which will normalize to /a/c)\n /a/b => x should yield /a/x\n /a/b => /x/y/z should yield /x/y/z\n The modified path may land us in a new spot which is itself a\n link, so we may repeat the process.\n\n Args:\n link_path_components: The resolved path built up to the link so far.\n link: The link object itself.\n\n Returns:\n (string) the updated path resolved after following the link.\n\n Raises:\n IOError: if there are too many levels of symbolic link\n '
# line: 1640
link_path = link.contents
# line: 1641
sep = self._path_separator(link_path)
# line: 1642
alt_sep = self._alternative_path_separator(link_path)
# line: 1646
if ((not link_path.startswith(sep)) and ((alt_sep is None) or (not link_path.startswith(alt_sep)))):
# line: 1652
components = link_path_components[:(-1)]
# line: 1653
components.append(link_path)
# line: 1654
link_path = sep.join(components)
# line: 1656
return self.CollapsePath(link_path)
# line: 1658
if (allow_fd and (sys.version_info >= (3, 3)) and isinstance(file_path, int)):
# line: 1659
return self.GetOpenFile(file_path).GetObject().GetPath()
# line: 1661
if (sys.version_info >= (3, 6)):
# line: 1662
file_path = os.fspath(file_path)
# line: 1663
if (file_path is None):
# line: 1665
raise TypeError('Expected file system path string, received None')
# line: 1666
if ((not file_path) or (not _ValidRelativePath(file_path))):
# line: 1669
raise IOError(errno.ENOENT, ("No such file or directory: '%s'" % file_path))
# line: 1671
file_path = self.NormalizePath(self.NormalizeCase(file_path))
# line: 1672
if self._IsRootPath(file_path):
# line: 1673
return file_path
# line: 1675
current_dir = self.root
# line: 1676
path_components = self.GetPathComponents(file_path)
# line: 1678
resolved_components = []
# line: 1679
link_depth = 0
# line: 1680
while path_components:
# line: 1681
component = path_components.pop(0)
# line: 1682
resolved_components.append(component)
# line: 1683
current_dir = self._DirectoryContent(current_dir, component)[1]
# line: 1684
if (current_dir is None):
# line: 1690
resolved_components.extend(path_components)
# line: 1691
break
# line: 1694
if stat.S_ISLNK(current_dir.st_mode):
# line: 1698
if (link_depth > _MAX_LINK_DEPTH):
# line: 1699
error_class = (OSError if raw_io else IOError)
# line: 1700
raise error_class(errno.ELOOP, ("Too many levels of symbolic links: '%s'" % _ComponentsToPath(resolved_components)))
# line: 1704
link_path = _FollowLink(resolved_components, current_dir)
# line: 1708
target_components = self.GetPathComponents(link_path)
# line: 1709
path_components = (target_components + path_components)
# line: 1710
resolved_components = []
# line: 1711
current_dir = self.root
# line: 1712
link_depth += 1
# line: 1713
return _ComponentsToPath(resolved_components)
# line: 1715
def GetObjectFromNormalizedPath(self, file_path):
# line: 1727
'Search for the specified filesystem object within the fake filesystem.\n\n Args:\n file_path: specifies target FakeFile object to retrieve, with a\n path that has already been normalized/resolved.\n\n Returns:\n the FakeFile object corresponding to file_path.\n\n Raises:\n IOError: if the object is not found.\n '
# line: 1728
if (sys.version_info >= (3, 6)):
# line: 1729
file_path = os.fspath(file_path)
# line: 1730
if (file_path == self.root.name):
# line: 1731
return self.root
# line: 1732
path_components = self.GetPathComponents(file_path)
# line: 1733
target_object = self.root
# line: 1734
try:
# line: 1735
for component in path_components:
# line: 1736
if stat.S_ISLNK(target_object.st_mode):
# line: 1737
target_object = self.ResolveObject(target_object.contents)
# line: 1738
if (not stat.S_ISDIR(target_object.st_mode)):
# line: 1739
if (not self.is_windows_fs):
# line: 1740
raise IOError(errno.ENOTDIR, 'Not a directory in fake filesystem', file_path)
# line: 1743
raise IOError(errno.ENOENT, 'No such file or directory in fake filesystem', file_path)
# line: 1746
target_object = target_object.GetEntry(component)
# line: 1747
except KeyError:
# line: 1748
raise IOError(errno.ENOENT, 'No such file or directory in fake filesystem', file_path)
# line: 1751
return target_object
# line: 1753
def GetObject(self, file_path):
# line: 1764
'Search for the specified filesystem object within the fake filesystem.\n\n Args:\n file_path: specifies target FakeFile object to retrieve.\n\n Returns:\n the FakeFile object corresponding to file_path.\n\n Raises:\n IOError: if the object is not found.\n '
# line: 1765
if (sys.version_info >= (3, 6)):
# line: 1766
file_path = os.fspath(file_path)
# line: 1767
file_path = self.NormalizePath(self.NormalizeCase(file_path))
# line: 1768
return self.GetObjectFromNormalizedPath(file_path)
# line: 1770
def ResolveObject(self, file_path, follow_symlinks=True, allow_fd=False):
# line: 1784
'Search for the specified filesystem object, resolving all links.\n\n Args:\n file_path: Specifies target FakeFile object to retrieve.\n follow_symlinks: If `False`, the link itself is resolved,\n otherwise the object linked to.\n allow_fd: If `True`, `file_path` may be open file descriptor\n\n Returns:\n the FakeFile object corresponding to file_path.\n\n Raises:\n IOError: if the object is not found.\n '
# line: 1785
if (allow_fd and (sys.version_info >= (3, 3)) and isinstance(file_path, int)):
# line: 1786
return self.GetOpenFile(file_path).GetObject()
# line: 1788
if follow_symlinks:
# line: 1789
if (sys.version_info >= (3, 6)):
# line: 1790
file_path = os.fspath(file_path)
# line: 1791
return self.GetObjectFromNormalizedPath(self.ResolvePath(file_path))
# line: 1792
return self.LResolveObject(file_path)
# line: 1794
def LResolveObject(self, path):
# line: 1808
'Search for the specified object, resolving only parent links.\n\n This is analogous to the stat/lstat difference. This resolves links *to*\n the object but not of the final object itself.\n\n Args:\n path: specifies target FakeFile object to retrieve.\n\n Returns:\n the FakeFile object corresponding to path.\n\n Raises:\n IOError: if the object is not found.\n '
# line: 1809
if (sys.version_info >= (3, 6)):
# line: 1810
path = os.fspath(path)
# line: 1811
if (path == self.root.name):
# line: 1813
return self.root
# line: 1816
sep = self._path_separator(path)
# line: 1817
alt_sep = self._alternative_path_separator(path)
# line: 1818
if (path.endswith(sep) or (alt_sep and path.endswith(alt_sep))):
# line: 1819
path = path[:(-1)]
# line: 1821
(parent_directory, child_name) = self.SplitPath(path)
# line: 1822
if (not parent_directory):
# line: 1823
parent_directory = self.cwd
# line: 1824
try:
# line: 1825
parent_obj = self.ResolveObject(parent_directory)
# line: 1826
assert parent_obj
# line: 1827
if (not isinstance(parent_obj, FakeDirectory)):
# line: 1828
if ((not self.is_windows_fs) and isinstance(parent_obj, FakeFile)):
# line: 1829
raise IOError(errno.ENOTDIR, 'The parent object is not a directory', path)
# line: 1831
raise IOError(errno.ENOENT, 'No such file or directory in fake filesystem', path)
# line: 1834
return parent_obj.GetEntry(child_name)
# line: 1835
except KeyError:
# line: 1836
raise IOError(errno.ENOENT, 'No such file or directory in the fake filesystem', path)
# line: 1840
def AddObject(self, file_path, file_object, error_class=OSError):
# line: 1851
'Add a fake file or directory into the filesystem at file_path.\n\n Args:\n file_path: the path to the file to be added relative to self.\n file_object: file or directory to add.\n error_class: the error class to be thrown if file_path does\n not correspond to a directory (used internally(\n\n Raises:\n IOError or OSError: if file_path does not correspond to a directory.\n '
# line: 1852
if (not file_path):
# line: 1853
target_directory = self.root
else:
# line: 1855
target_directory = self.ResolveObject(file_path)
# line: 1856
if (not stat.S_ISDIR(target_directory.st_mode)):
# line: 1857
raise error_class(errno.ENOTDIR, 'Not a directory in the fake filesystem', file_path)
# line: 1860
target_directory.AddEntry(file_object)
# line: 1862
def RenameObject(self, old_file_path, new_file_path, force_replace=False):
# line: 1883
'Renames a FakeFile object at old_file_path to new_file_path, preserving all properties.\n\n Args:\n old_file_path: Path to filesystem object to rename.\n new_file_path: Path to where the filesystem object will live after this call.\n force_replace: If set and destination is an existing file, it will be replaced\n even under Windows if the user has permissions, otherwise replacement\n happens under Unix only.\n\n Raises:\n OSError: if old_file_path does not exist.\n OSError: if new_file_path is an existing directory\n (Windows, or Posix if old_file_path points to a regular file)\n OSError: if old_file_path is a directory and new_file_path a file\n OSError: if new_file_path is an existing file and force_replace not set\n (Windows only).\n OSError: if new_file_path is an existing file and could not be removed\n (Posix, or Windows with force_replace set).\n OSError: if dirname(new_file_path) does not exist.\n OSError: if the file would be moved to another filesystem (e.g. mount point).\n '
# line: 1884
old_file_path = self.NormalizePath(old_file_path)
# line: 1885
new_file_path = self.NormalizePath(new_file_path)
# line: 1886
if ((not self.Exists(old_file_path)) and (not self.IsLink(old_file_path))):
# line: 1887
raise OSError(errno.ENOENT, 'Fake filesystem object: can not rename nonexistent file', old_file_path)
# line: 1891
old_object = self.LResolveObject(old_file_path)
# line: 1892
if (not self.is_windows_fs):
# line: 1893
if (self.IsDir(old_file_path, follow_symlinks=False) and self.IsLink(new_file_path)):
# line: 1895
raise OSError(errno.ENOTDIR, 'Cannot rename directory to symlink', new_file_path)
# line: 1898
if (self.IsDir(new_file_path, follow_symlinks=False) and self.IsLink(old_file_path)):
# line: 1900
raise OSError(errno.EISDIR, 'Cannot rename symlink to directory', new_file_path)
# line: 1904
if (self.Exists(new_file_path) or self.IsLink(new_file_path)):
# line: 1905
if (old_file_path == new_file_path):
# line: 1906
return
# line: 1908
new_object = self.GetObject(new_file_path)
# line: 1909
if (old_object == new_object):
# line: 1910
if (old_file_path.lower() == new_file_path.lower()):
# line: 1912
pass
else:
# line: 1915
return
elif (stat.S_ISDIR(new_object.st_mode) or stat.S_ISLNK(new_object.st_mode)):
# line: 1918
if self.is_windows_fs:
# line: 1919
if force_replace:
# line: 1920
raise OSError(errno.EACCES, 'Fake filesystem object: can not replace existing directory', new_file_path)
else:
# line: 1924
raise OSError(errno.EEXIST, 'Fake filesystem object: can not rename to existing directory', new_file_path)
# line: 1927
if (not stat.S_ISLNK(new_object.st_mode)):
# line: 1928
if new_object.contents:
# line: 1929
raise OSError(errno.EEXIST, 'Fake filesystem object: can not rename to non-empty directory', new_file_path)
# line: 1932
if stat.S_ISREG(old_object.st_mode):
# line: 1933
raise OSError(errno.EISDIR, 'Fake filesystem object: cannot rename file to directory', new_file_path)
elif stat.S_ISDIR(old_object.st_mode):
# line: 1937
raise OSError(errno.ENOTDIR, 'Fake filesystem object: cannot rename directory to file', new_file_path)
elif (self.is_windows_fs and (not force_replace)):
# line: 1941
raise OSError(errno.EEXIST, 'Fake filesystem object: can not rename to existing file', new_file_path)
else:
# line: 1945
try:
# line: 1946
self.RemoveObject(new_file_path)
# line: 1947
except IOError as exc:
# line: 1948
raise OSError(exc.errno, exc.strerror, exc.filename)
# line: 1950
(old_dir, old_name) = self.SplitPath(old_file_path)
# line: 1951
(new_dir, new_name) = self.SplitPath(new_file_path)
# line: 1952
if (not self.Exists(new_dir)):
# line: 1953
raise OSError(errno.ENOENT, 'No such fake directory', new_dir)
# line: 1954
old_dir_object = self.ResolveObject(old_dir)
# line: 1955
new_dir_object = self.ResolveObject(new_dir)
# line: 1956
if (old_dir_object.st_dev != new_dir_object.st_dev):
# line: 1957
raise OSError(errno.EXDEV, 'Fake filesystem object: cannot rename across file systems', old_file_path)
# line: 1960
if (not stat.S_ISDIR(new_dir_object.st_mode)):
# line: 1961
raise OSError((errno.EACCES if self.is_windows_fs else errno.ENOTDIR), 'Fake filesystem object: target parent is not a directory', new_file_path)
# line: 1964
if new_dir_object.HasParentObject(old_object):
# line: 1965
raise OSError(errno.EINVAL, 'Fake filesystem object: invalid target for rename', new_file_path)
# line: 1969
object_to_rename = old_dir_object.GetEntry(old_name)
# line: 1970
old_dir_object.RemoveEntry(old_name, recursive=False)
# line: 1971
object_to_rename.name = new_name
# line: 1972
if (new_name in new_dir_object.contents):
# line: 1974
new_dir_object.RemoveEntry(new_name)
# line: 1975
new_dir_object.AddEntry(object_to_rename)
# line: 1977
def RemoveObject(self, file_path):
# line: 1987
"Remove an existing file or directory.\n\n Args:\n file_path: the path to the file relative to self.\n\n Raises:\n IOError: if file_path does not correspond to an existing file, or if part\n of the path refers to something other than a directory.\n OSError: if the directory is in use (eg, if it is '/').\n "
# line: 1988
file_path = self.NormalizePath(self.NormalizeCase(file_path))
# line: 1989
if self._IsRootPath(file_path):
# line: 1990
raise OSError(errno.EBUSY, 'Fake device or resource busy', file_path)
# line: 1992
try:
# line: 1993
(dirname, basename) = self.SplitPath(file_path)
# line: 1994
target_directory = self.ResolveObject(dirname)
# line: 1995
target_directory.RemoveEntry(basename)
# line: 1996
except KeyError:
# line: 1997
raise IOError(errno.ENOENT, 'No such file or directory in the fake filesystem', file_path)
# line: 2000
except AttributeError:
# line: 2001
raise IOError(errno.ENOTDIR, 'Not a directory in the fake filesystem', file_path)
# line: 2005
def CreateDirectory(self, directory_path, perm_bits=PERM_DEF):
# line: 2019
'Create directory_path, and all the parent directories.\n\n Helper method to set up your test faster.\n\n Args:\n directory_path: The full directory path to create.\n perm_bits: The permission bits as set by `chmod`.\n\n Returns:\n the newly created FakeDirectory object.\n\n Raises:\n OSError: if the directory already exists.\n '
# line: 2020
directory_path = self.NormalizePath(directory_path)
# line: 2021
self._AutoMountDriveIfNeeded(directory_path)
# line: 2022
if self.Exists(directory_path):
# line: 2023
raise OSError(errno.EEXIST, 'Directory exists in fake filesystem', directory_path)
# line: 2026
path_components = self.GetPathComponents(directory_path)
# line: 2027
current_dir = self.root
# line: 2029
new_dirs = []
# line: 2030
for component in path_components:
# line: 2031
directory = self._DirectoryContent(current_dir, component)[1]
# line: 2032
if (not directory):
# line: 2033
new_dir = FakeDirectory(component, filesystem=self)
# line: 2034
new_dirs.append(new_dir)
# line: 2035
current_dir.AddEntry(new_dir)
# line: 2036
current_dir = new_dir
else:
# line: 2038
if stat.S_ISLNK(directory.st_mode):
# line: 2039
directory = self.ResolveObject(directory.contents)
# line: 2040
current_dir = directory
# line: 2041
if ((directory.st_mode & stat.S_IFDIR) != stat.S_IFDIR):
# line: 2042
raise OSError(errno.ENOTDIR, 'Not a directory', current_dir.GetPath())
# line: 2046
for new_dir in new_dirs:
# line: 2047
new_dir.st_mode = (stat.S_IFDIR | perm_bits)
# line: 2049
self._last_ino += 1
# line: 2050
current_dir.SetIno(self._last_ino)
# line: 2051
return current_dir
# line: 2053
def CreateFile(self, file_path, st_mode=(stat.S_IFREG | PERM_DEF_FILE), contents='', st_size=None, create_missing_dirs=True, apply_umask=False, encoding=None, errors=None):
# line: 2079
'Create file_path, including all the parent directories along the way.\n\n This helper method can be used to set up tests more easily.\n\n Args:\n file_path: The path to the file to create.\n st_mode: The stat constant representing the file type.\n contents: The contents of the file.\n st_size: The file size; only valid if contents not given.\n create_missing_dirs: If `True`, auto create missing directories.\n apply_umask: `True` if the current umask must be applied on st_mode.\n encoding: Ff contents is a unicode string, the encoding used\n for serialization.\n New in pyfakefs 2.9.\n errors: The error mode used for encoding/decoding errors.\n New in pyfakefs 3.2.\n\n Returns:\n the newly created FakeFile object.\n\n Raises:\n IOError: if the file already exists.\n IOError: if the containing directory is required and missing.\n '
# line: 2080
return self.CreateFileInternally(file_path, st_mode, contents, st_size, create_missing_dirs, apply_umask, encoding, errors)
# line: 2084
def add_real_file(self, file_path, read_only=True):
# line: 2109
"Create file_path, including all the parent directories along the way, for an existing\n real file. The contents of the real file are read only on demand.\n New in pyfakefs 3.2.\n\n Args:\n file_path: Path to an existing file in the real file system\n read_only: If `True` (the default), writing to the fake file\n raises an exception. Otherwise, writing to the file changes\n the fake file only.\n\n Returns:\n the newly created FakeFile object.\n\n Raises:\n OSError: if the file does not exist in the real file system.\n IOError: if the file already exists in the fake file system.\n\n .. note:: On MacOS and BSD, accessing the fake file's contents will update both the real and fake files' `atime.` (access time). In this particular case, `add_real_file()` violates the rule that `pyfakefs` must not modify the real file system. Further, Windows offers the option to enable atime, and older versions of Linux may also modify atime.\n "
# line: 2110
return self.CreateFileInternally(file_path, read_from_real_fs=True, read_only=read_only)
# line: 2114
def add_real_directory(self, dir_path, read_only=True, lazy_read=True):
# line: 2139
'Create a fake directory corresponding to the real directory at the specified\n path. Add entries in the fake directory corresponding to the entries in the\n real directory.\n New in pyfakefs 3.2.\n\n Args:\n dir_path: The path to the existing directory.\n read_only: If set, all files under the directory are treated as\n read-only, e.g. a write access raises an exception;\n otherwise, writing to the files changes the fake files only\n as usually.\n lazy_read: If set (default), directory contents are only read when\n accessed, and only until the needed subdirectory level.\n *Note:* this means that the file system size is only updated\n at the time the directory contents are read; set this to\n `False` only if you are dependent on accurate file system\n size in your test\n\n Returns:\n the newly created FakeDirectory object.\n\n Raises:\n OSError: if the directory does not exist in the real file system.\n IOError: if the directory already exists in the fake file system.\n '
# line: 2140
if (not os.path.exists(dir_path)):
# line: 2141
raise IOError(errno.ENOENT, 'No such directory', dir_path)
# line: 2142
if lazy_read:
# line: 2143
parent_path = os.path.split(dir_path)[0]
# line: 2144
if self.Exists(parent_path):
# line: 2145
parent_dir = self.GetObject(parent_path)
else:
# line: 2147
parent_dir = self.CreateDirectory(parent_path)
# line: 2148
new_dir = FakeDirectoryFromRealDirectory(dir_path, filesystem=self, read_only=read_only)
# line: 2149
parent_dir.AddEntry(new_dir)
# line: 2150
self._last_ino += 1
# line: 2151
new_dir.SetIno(self._last_ino)
else:
# line: 2153
new_dir = self.CreateDirectory(dir_path)
# line: 2154
for (base, _, files) in os.walk(dir_path):
# line: 2155
for fileEntry in files:
# line: 2156
self.add_real_file(os.path.join(base, fileEntry), read_only)
# line: 2157
return new_dir
# line: 2159
def add_real_paths(self, path_list, read_only=True, lazy_dir_read=True):
# line: 2176
'This convenience method adds multiple files and/or directories from the\n real file system to the fake file system. See `add_real_file()` and\n `add_real_directory()`.\n New in pyfakefs 3.2.\n\n Args:\n path_list: List of file and directory paths in the real file system.\n read_only: If set, all files and files under under the directories are treated as read-only,\n e.g. a write access raises an exception;\n otherwise, writing to the files changes the fake files only as usually.\n lazy_dir_read: Uses lazy reading of directory contents if set\n (see `add_real_directory`)\n\n Raises:\n OSError: if any of the files and directories in the list does not exist in the real file system.\n OSError: if any of the files and directories in the list already exists in the fake file system.\n '
# line: 2177
for path in path_list:
# line: 2178
if os.path.isdir(path):
# line: 2179
self.add_real_directory(path, read_only, lazy_dir_read)
else:
# line: 2181
self.add_real_file(path, read_only)
# line: 2183
def CreateFileInternally(self, file_path, st_mode=(stat.S_IFREG | PERM_DEF_FILE), contents='', st_size=None, create_missing_dirs=True, apply_umask=False, encoding=None, errors=None, read_from_real_fs=False, read_only=True, raw_io=False):
# line: 2203
'Internal fake file creator that supports both normal fake files and fake\n files based on real files.\n\n Args:\n file_path: path to the file to create.\n st_mode: the stat.S_IF constant representing the file type.\n contents: the contents of the file.\n st_size: file size; only valid if contents not given.\n create_missing_dirs: if True, auto create missing directories.\n apply_umask: whether or not the current umask must be applied on st_mode.\n encoding: if contents is a unicode string, the encoding used for serialization.\n errors: the error mode used for encoding/decoding errors\n read_from_real_fs: if True, the contents are reaf from the real file system on demand.\n read_only: if set, the file is treated as read-only, e.g. a write access raises an exception;\n otherwise, writing to the file changes the fake file only as usually.\n raw_io: `True` if called from low-level API (`os.open`)\n '
# line: 2204
error_class = (OSError if raw_io else IOError)
# line: 2205
file_path = self.NormalizePath(file_path)
# line: 2208
if (self.Exists(file_path) or self.IsLink(file_path)):
# line: 2209
raise OSError(errno.EEXIST, 'File already exists in fake filesystem', file_path)
# line: 2212
(parent_directory, new_file) = self.SplitPath(file_path)
# line: 2213
if (not parent_directory):
# line: 2214
parent_directory = self.cwd
# line: 2215
self._AutoMountDriveIfNeeded(parent_directory)
# line: 2216
if (not self.Exists(parent_directory)):
# line: 2217
if (not create_missing_dirs):
# line: 2218
raise error_class(errno.ENOENT, 'No such fake directory', parent_directory)
# line: 2219
self.CreateDirectory(parent_directory)
else:
# line: 2221
parent_directory = self.NormalizeCase(parent_directory)
# line: 2222
if apply_umask:
# line: 2223
st_mode &= (~ self.umask)
# line: 2224
if read_from_real_fs:
# line: 2225
file_object = FakeFileFromRealFile(file_path, filesystem=self, read_only=read_only)
else:
# line: 2227
file_object = FakeFile(new_file, st_mode, filesystem=self, encoding=encoding, errors=errors)
# line: 2229
self._last_ino += 1
# line: 2230
file_object.SetIno(self._last_ino)
# line: 2231
self.AddObject(parent_directory, file_object, error_class)
# line: 2233
if ((not read_from_real_fs) and ((contents is not None) or (st_size is not None))):
# line: 2234
try:
# line: 2235
if (st_size is not None):
# line: 2236
file_object.SetLargeFileSize(st_size)
else:
# line: 2238
file_object._set_initial_contents(contents)
# line: 2239
except IOError:
# line: 2240
self.RemoveObject(file_path)
# line: 2241
raise
# line: 2243
return file_object
# line: 2246
def CreateLink(self, file_path, link_target, create_missing_dirs=True):
# line: 2261
'Create the specified symlink, pointed at the specified link target.\n\n Args:\n file_path: path to the symlink to create\n link_target: the target of the symlink\n create_missing_dirs: If `True`, any missing parent directories of\n file_path will be created\n\n Returns:\n the newly created FakeFile object.\n\n Raises:\n OSError: if the symlink could not be created (see `CreateFile`).\n OSError: if on Windows before Python 3.2.\n '
# line: 2262
if (not self._IsLinkSupported()):
# line: 2263
raise OSError('Symbolic links are not supported on Windows before Python 3.2')
# line: 2265
if (not self.IsLink(file_path)):
# line: 2266
file_path = self.ResolvePath(file_path)
# line: 2267
if (sys.version_info >= (3, 6)):
# line: 2268
link_target = os.fspath(link_target)
# line: 2269
return self.CreateFileInternally(file_path, st_mode=(stat.S_IFLNK | PERM_DEF), contents=link_target, create_missing_dirs=create_missing_dirs, raw_io=True)
# line: 2273
def CreateHardLink(self, old_path, new_path):
# line: 2289
"Create a hard link at new_path, pointing at old_path.\n New in pyfakefs 2.9.\n\n Args:\n old_path: an existing link to the target file.\n new_path: the destination path to create a new link at.\n\n Returns:\n the FakeFile object referred to by old_path.\n\n Raises:\n OSError: if something already exists at new_path.\n OSError: if old_path is a directory.\n OSError: if the parent directory doesn't exist.\n OSError: if on Windows before Python 3.2.\n "
# line: 2290
if (not self._IsLinkSupported()):
# line: 2291
raise OSError('Links are not supported on Windows before Python 3.2')
# line: 2292
new_path_normalized = self.NormalizePath(new_path)
# line: 2293
if self.Exists(new_path_normalized):
# line: 2294
raise OSError(errno.EEXIST, 'File already exists in fake filesystem', new_path)
# line: 2298
(new_parent_directory, new_basename) = self.SplitPath(new_path_normalized)
# line: 2299
if (not new_parent_directory):
# line: 2300
new_parent_directory = self.cwd
# line: 2302
if (not self.Exists(new_parent_directory)):
# line: 2303
raise OSError(errno.ENOENT, 'No such fake directory', new_parent_directory)
# line: 2307
try:
# line: 2308
old_file = self.ResolveObject(old_path)
# line: 2309
except:
# line: 2310
raise OSError(errno.ENOENT, 'No such file or directory in fake filesystem', old_path)
# line: 2314
if (old_file.st_mode & stat.S_IFDIR):
# line: 2315
raise OSError((errno.EACCES if self.is_windows_fs else errno.EPERM), 'Cannot create hard link to directory', old_path)
# line: 2320
old_file.name = new_basename
# line: 2321
self.AddObject(new_parent_directory, old_file)
# line: 2322
return old_file
# line: 2324
def ReadLink(self, path):
# line: 2338
'Read the target of a symlink.\n New in pyfakefs 3.0.\n\n Args:\n path: symlink to read the target of.\n\n Returns:\n the string representing the path to which the symbolic link points.\n\n Raises:\n TypeError: if path is None\n OSError: (with errno=ENOENT) if path is not a valid path, or\n (with errno=EINVAL) if path is valid, but is not a symlink.\n '
# line: 2339
if (path is None):
# line: 2340
raise TypeError
# line: 2341
try:
# line: 2342
link_obj = self.LResolveObject(path)
# line: 2343
except IOError as exc:
# line: 2344
raise OSError(exc.errno, 'Fake path does not exist', path)
# line: 2345
if (stat.S_IFMT(link_obj.st_mode) != stat.S_IFLNK):
# line: 2346
raise OSError(errno.EINVAL, 'Fake filesystem: not a symlink', path)
# line: 2347
return link_obj.contents
# line: 2349
def MakeDirectory(self, dir_name, mode=PERM_DEF):
# line: 2362
"Create a leaf Fake directory.\n New in pyfakefs 3.0.\n\n Args:\n dir_name: (str) Name of directory to create. Relative paths are assumed\n to be relative to '/'.\n mode: (int) Mode to create directory with. This argument defaults to\n 0o777. The umask is applied to this mode.\n\n Raises:\n OSError: if the directory name is invalid or parent directory is read only\n or as per `FakeFilesystem.AddObject()`.\n "
# line: 2363
if (sys.version_info >= (3, 6)):
# line: 2364
dir_name = os.fspath(dir_name)
# line: 2365
if self._EndsWithPathSeparator(dir_name):
# line: 2366
dir_name = dir_name[:(-1)]
# line: 2367
if (not dir_name):
# line: 2368
raise OSError(errno.ENOENT, 'Empty directory name')
# line: 2370
(parent_dir, _) = self.SplitPath(dir_name)
# line: 2371
if parent_dir:
# line: 2372
base_dir = self.CollapsePath(parent_dir)
# line: 2373
ellipsis = self._matching_string(parent_dir, (self.path_separator + '..'))
# line: 2374
if parent_dir.endswith(ellipsis):
# line: 2375
(base_dir, dummy_dotdot, _) = parent_dir.partition(ellipsis)
# line: 2376
if (not self.Exists(base_dir)):
# line: 2377
raise OSError(errno.ENOENT, 'No such fake directory', base_dir)
# line: 2379
dir_name = self.NormalizePath(dir_name)
# line: 2380
if self.Exists(dir_name):
# line: 2381
raise OSError(errno.EEXIST, 'Fake object already exists', dir_name)
# line: 2382
(head, tail) = self.SplitPath(dir_name)
# line: 2384
self.AddObject(head, FakeDirectory(tail, (mode & (~ self.umask)), filesystem=self))
# line: 2387
def MakeDirectories(self, dir_name, mode=PERM_DEF, exist_ok=False):
# line: 2402
'Create a leaf Fake directory and create any non-existent parent dirs.\n New in pyfakefs 3.0.\n\n Args:\n dir_name: (str) Name of directory to create.\n mode: (int) Mode to create directory (and any necessary parent\n directories) with. This argument defaults to 0o777. The umask is\n applied to this mode.\n exist_ok: (boolean) If exist_ok is False (the default), an OSError is\n raised if the target directory already exists. New in Python 3.2.\n\n Raises:\n OSError: if the directory already exists and exist_ok=False, or as per\n `FakeFilesystem.CreateDirectory()`.\n '
# line: 2403
dir_name = self.NormalizePath(dir_name)
# line: 2404
path_components = self.GetPathComponents(dir_name)
# line: 2408
current_dir = self.root
# line: 2409
for component in path_components:
# line: 2410
if ((component not in current_dir.contents) or (not isinstance(current_dir.contents, dict))):
# line: 2412
break
else:
# line: 2414
current_dir = current_dir.contents[component]
# line: 2415
try:
# line: 2416
self.CreateDirectory(dir_name, (mode & (~ self.umask)))
# line: 2417
except (IOError, OSError) as e:
# line: 2418
if ((not exist_ok) or (not isinstance(self.ResolveObject(dir_name), FakeDirectory))):
# line: 2420
if isinstance(e, OSError):
# line: 2421
raise
# line: 2422
raise OSError(e.errno, e.strerror, e.filename)
# line: 2424
def _IsType(self, path, st_flag, follow_symlinks=True):
# line: 2438
"Helper function to implement isdir(), islink(), etc.\n\n See the stat(2) man page for valid stat.S_I* flag values\n\n Args:\n path: path to file to stat and test\n st_flag: the stat.S_I* flag checked for the file's st_mode\n\n Returns:\n boolean (the st_flag is set in path's st_mode)\n\n Raises:\n TypeError: if path is None\n "
# line: 2439
if (sys.version_info >= (3, 6)):
# line: 2440
path = os.fspath(path)
# line: 2441
if (path is None):
# line: 2442
raise TypeError
# line: 2443
try:
# line: 2444
obj = self.ResolveObject(path, follow_symlinks)
# line: 2445
if obj:
# line: 2446
return (stat.S_IFMT(obj.st_mode) == st_flag)
# line: 2447
except (IOError, OSError):
# line: 2448
return False
# line: 2449
return False
# line: 2451
def IsDir(self, path, follow_symlinks=True):
# line: 2463
'Determine if path identifies a directory.\n New in pyfakefs 3.0.\n\n Args:\n path: path to filesystem object.\n\n Returns:\n True if path points to a directory (following symlinks).\n\n Raises:\n TypeError: if path is None.\n '
# line: 2464
return self._IsType(path, stat.S_IFDIR, follow_symlinks)
# line: 2466
def IsFile(self, path, follow_symlinks=True):
# line: 2478
'Determine if path identifies a regular file.\n New in pyfakefs 3.0.\n\n Args:\n path: path to filesystem object.\n\n Returns:\n True if path points to a regular file (following symlinks).\n\n Raises:\n TypeError: if path is None.\n '
# line: 2479
return self._IsType(path, stat.S_IFREG, follow_symlinks)
# line: 2481
def IsLink(self, path):
# line: 2493
'Determine if path identifies a symbolic link.\n New in pyfakefs 3.0.\n\n Args:\n path: path to filesystem object.\n\n Returns:\n True if path points to a symlink (S_IFLNK set in st_mode)\n\n Raises:\n TypeError: if path is None.\n '
# line: 2494
return self._IsType(path, stat.S_IFLNK, follow_symlinks=False)
# line: 2496
def ConfirmDir(self, target_directory):
# line: 2508
'Test that the target is actually a directory, raising OSError if not.\n New in pyfakefs 3.0.\n\n Args:\n target_directory: path to the target directory within the fake filesystem.\n\n Returns:\n the FakeDirectory object corresponding to target_directory.\n\n Raises:\n OSError: if the target is not a directory.\n '
# line: 2509
try:
# line: 2510
directory = self.ResolveObject(target_directory)
# line: 2511
except IOError as exc:
# line: 2512
raise OSError(exc.errno, exc.strerror, target_directory)
# line: 2513
if (not (directory.st_mode & stat.S_IFDIR)):
# line: 2514
raise OSError(errno.ENOTDIR, 'Fake os module: not a directory', target_directory)
# line: 2517
return directory
# line: 2519
def RemoveFile(self, path):
# line: 2530
'Remove the FakeFile object at the specified file path.\n New in pyfakefs 3.0.\n\n Args:\n path: path to file to be removed.\n\n Raises:\n OSError: if path points to a directory.\n OSError: if path does not exist.\n OSError: if removal failed.\n '
# line: 2531
path = self.NormalizePath(path)
# line: 2532
if self.Exists(path):
# line: 2533
obj = self.ResolveObject(path)
# line: 2534
if (stat.S_IFMT(obj.st_mode) == stat.S_IFDIR):
# line: 2535
link_obj = self.LResolveObject(path)
# line: 2536
if (stat.S_IFMT(link_obj.st_mode) != stat.S_IFLNK):
# line: 2537
raise OSError(errno.EISDIR, ("Is a directory: '%s'" % path))
# line: 2539
try:
# line: 2540
self.RemoveObject(path)
# line: 2541
except IOError as exc:
# line: 2542
raise OSError(exc.errno, exc.strerror, exc.filename)
# line: 2544
def RemoveDirectory(self, target_directory, allow_symlink=False):
# line: 2557
"Remove a leaf Fake directory.\n New in pyfakefs 3.0.\n\n Args:\n target_directory: (str) Name of directory to remove.\n allow_symlink: (bool) if `target_directory` is a symlink,\n the function just returns, otherwise it raises (Posix only)\n\n Raises:\n OSError: if target_directory does not exist.\n OSError: if target_directory does not point to a directory.\n OSError: if removal failed per FakeFilesystem.RemoveObject. Cannot remove '.'.\n "
# line: 2558
if (target_directory in ('.', u'.')):
# line: 2559
raise OSError(errno.EINVAL, "Invalid argument: '.'")
# line: 2560
target_directory = self.NormalizePath(target_directory)
# line: 2561
if self.ConfirmDir(target_directory):
# line: 2562
if ((not self.is_windows_fs) and self.IsLink(target_directory)):
# line: 2563
if allow_symlink:
# line: 2564
return
# line: 2565
raise OSError(errno.ENOTDIR, 'Cannot remove symlink', target_directory)
# line: 2567
dir_object = self.ResolveObject(target_directory)
# line: 2568
if dir_object.contents:
# line: 2569
raise OSError(errno.ENOTEMPTY, 'Fake Directory not empty', target_directory)
# line: 2571
try:
# line: 2572
self.RemoveObject(target_directory)
# line: 2573
except IOError as exc:
# line: 2574
raise OSError(exc.errno, exc.strerror, exc.filename)
# line: 2576
def ListDir(self, target_directory):
# line: 2588
'Return a list of file names in target_directory.\n New in pyfakefs 3.0.\n\n Args:\n target_directory: path to the target directory within the fake filesystem.\n\n Returns:\n a list of file names within the target directory in arbitrary order.\n\n Raises:\n OSError: if the target is not a directory.\n '
# line: 2589
target_directory = self.ResolvePath(target_directory, allow_fd=True)
# line: 2590
directory = self.ConfirmDir(target_directory)
# line: 2591
directory_contents = directory.contents
# line: 2592
return list(directory_contents.keys())
# line: 2594
if (sys.version_info >= (3, 5)):
# line: 2595
class DirEntry:
# line: 2596
'Emulates os.DirEntry. Note that we did not enforce keyword only arguments.'
# line: 2598
def __init__(self, filesystem):
# line: 2603
'Initialize the dir entry with unset values.\n\n Args:\n filesystem: the fake filesystem used for implementation.\n '
# line: 2604
self._filesystem = filesystem
# line: 2605
self.name = ''
# line: 2606
self.path = ''
# line: 2607
self._inode = None
# line: 2608
self._islink = False
# line: 2609
self._isdir = False
# line: 2610
self._statresult = None
# line: 2611
self._statresult_symlink = None
# line: 2613
def inode(self):
# line: 2614
'Return the inode number of the entry.'
# line: 2615
if (self._inode is None):
# line: 2616
self.stat(follow_symlinks=False)
# line: 2617
return self._inode
# line: 2619
def is_dir(self, follow_symlinks=True):
# line: 2629
'Return True if this entry is a directory entry.\n\n Args:\n follow_symlinks: If True, also return True if this entry is a symlink\n pointing to a directory.\n\n Returns:\n True if this entry is an existing directory entry, or if\n follow_symlinks is set, and this entry points to an existing directory entry.\n '
# line: 2630
return (self._isdir and (follow_symlinks or (not self._islink)))
# line: 2632
def is_file(self, follow_symlinks=True):
# line: 2642
'Return True if this entry is a regular file entry.\n\n Args:\n follow_symlinks: If True, also return True if this entry is a symlink\n pointing to a regular file.\n\n Returns:\n True if this entry is an existing file entry, or if\n follow_symlinks is set, and this entry points to an existing file entry.\n '
# line: 2643
return ((not self._isdir) and (follow_symlinks or (not self._islink)))
# line: 2645
def is_symlink(self):
# line: 2646
'Return True if this entry is a symbolic link (even if broken).'
# line: 2647
return self._islink
# line: 2649
def stat(self, follow_symlinks=True):
# line: 2655
'Return a stat_result object for this entry.\n\n Args:\n follow_symlinks: If False and the entry is a symlink, return the\n result for the symlink, otherwise for the object it points to.\n '
# line: 2656
if follow_symlinks:
# line: 2657
if (self._statresult_symlink is None):
# line: 2658
file_object = self._filesystem.ResolveObject(self.path)
# line: 2659
if self._filesystem.is_windows_fs:
# line: 2662
file_object.st_ino = 0
# line: 2663
file_object.st_dev = 0
# line: 2664
file_object.st_nlink = 0
# line: 2665
self._statresult_symlink = file_object.stat_result.copy()
# line: 2666
return self._statresult_symlink
# line: 2668
if (self._statresult is None):
# line: 2669
file_object = self._filesystem.LResolveObject(self.path)
# line: 2670
self._inode = file_object.st_ino
# line: 2671
if self._filesystem.is_windows_fs:
# line: 2672
file_object.st_ino = 0
# line: 2673
file_object.st_dev = 0
# line: 2674
file_object.st_nlink = 0
# line: 2675
self._statresult = file_object.stat_result.copy()
# line: 2676
return self._statresult
# line: 2678
class ScanDirIter:
# line: 2681
'Iterator for DirEntry objects returned from `scandir()` function.\n New in pyfakefs 3.0.\n '
# line: 2683
def __init__(self, filesystem, path):
# line: 2684
self.filesystem = filesystem
# line: 2685
self.path = self.filesystem.ResolvePath(path)
# line: 2686
contents = {}
# line: 2687
try:
# line: 2688
contents = self.filesystem.ConfirmDir(path).contents
# line: 2689
except OSError:
# line: 2690
pass
# line: 2691
self.contents_iter = iter(contents)
# line: 2693
def __iter__(self):
# line: 2694
return self
# line: 2696
def __next__(self):
# line: 2697
entry = self.contents_iter.__next__()
# line: 2698
dir_entry = self.filesystem.DirEntry(self.filesystem)
# line: 2699
dir_entry.name = entry
# line: 2700
dir_entry.path = self.filesystem.JoinPaths(self.path, dir_entry.name)
# line: 2701
dir_entry._isdir = self.filesystem.IsDir(dir_entry.path)
# line: 2702
dir_entry._islink = self.filesystem.IsLink(dir_entry.path)
# line: 2703
return dir_entry
# line: 2705
if (sys.version_info >= (3, 6)):
# line: 2706
def __enter__(self):
# line: 2707
return self
# line: 2709
def __exit__(self, exc_type, exc_val, exc_tb):
# line: 2710
self.close()
# line: 2712
def close(self):
# line: 2713
pass
# line: 2715
def ScanDir(self, path=''):
# line: 2728
'Return an iterator of DirEntry objects corresponding to the entries\n in the directory given by path.\n New in pyfakefs 3.0.\n\n Args:\n path: path to the target directory within the fake filesystem.\n\n Returns:\n an iterator to an unsorted list of os.DirEntry objects for each entry in path.\n\n Raises:\n OSError: if the target is not a directory.\n '
# line: 2729
return self.ScanDirIter(self, path)
# line: 2731
def __str__(self):
# line: 2732
return str(self.root)
# line: 2735
class FakePathModule(object):
# line: 2740
'Faked os.path module replacement.\n\n FakePathModule should *only* be instantiated by FakeOsModule. See the\n FakeOsModule docstring for details.\n '
# line: 2741
_OS_PATH_COPY = CopyModule(os.path)
# line: 2743
def __init__(self, filesystem, os_module=None):
# line: 2749
'Init.\n\n Args:\n filesystem: FakeFilesystem used to provide file system information\n os_module: (deprecated) FakeOsModule to assign to self.os\n '
# line: 2750
self.filesystem = filesystem
# line: 2751
self._os_path = self._OS_PATH_COPY
# line: 2752
if (os_module is None):
# line: 2753
warnings.warn(FAKE_PATH_MODULE_DEPRECATION, DeprecationWarning, stacklevel=2)
# line: 2755
self._os_path.os = self.os = os_module
# line: 2756
self.sep = self.filesystem.path_separator
# line: 2757
self.altsep = self.filesystem.alternative_path_separator
# line: 2759
def exists(self, path):
# line: 2767
'Determine whether the file object exists within the fake filesystem.\n\n Args:\n path: path to the file object.\n\n Returns:\n bool (if file exists).\n '
# line: 2768
return self.filesystem.Exists(path)
# line: 2770
def lexists(self, path):
# line: 2778
'Test whether a path exists. Returns True for broken symbolic links.\n\n Args:\n path: path to the symlink object.\n\n Returns:\n bool (if file exists).\n '
# line: 2779
return (self.exists(path) or self.islink(path))
# line: 2781
def getsize(self, path):
# line: 2789
'Return the file object size in bytes.\n\n Args:\n path: path to the file object.\n\n Returns:\n file size in bytes.\n '
# line: 2790
try:
# line: 2791
file_obj = self.filesystem.ResolveObject(path)
# line: 2792
return file_obj.st_size
# line: 2793
except IOError as exc:
# line: 2794
raise os.error(exc.errno, exc.strerror)
# line: 2796
def isabs(self, path):
# line: 2797
'Return True if path is an absolute pathname.'
# line: 2798
if self.filesystem.is_windows_fs:
# line: 2799
path = self.splitdrive(path)[1]
# line: 2800
if (sys.version_info >= (3, 6)):
# line: 2801
path = os.fspath(path)
# line: 2802
sep = self.filesystem._path_separator(path)
# line: 2803
altsep = self.filesystem._alternative_path_separator(path)
# line: 2804
if self.filesystem.is_windows_fs:
# line: 2805
return ((len(path) > 0) and (path[:1] in (sep, altsep)))
else:
# line: 2807
return (path.startswith(sep) or ((altsep is not None) and path.startswith(altsep)))
# line: 2809
def isdir(self, path):
# line: 2810
'Determine if path identifies a directory.'
# line: 2811
return self.filesystem.IsDir(path)
# line: 2813
def isfile(self, path):
# line: 2814
'Determine if path identifies a regular file.'
# line: 2815
return self.filesystem.IsFile(path)
# line: 2817
def islink(self, path):
# line: 2828
'Determine if path identifies a symbolic link.\n\n Args:\n path: path to filesystem object.\n\n Returns:\n True if path points to a symbolic link.\n\n Raises:\n TypeError: if path is None.\n '
# line: 2829
return self.filesystem.IsLink(path)
# line: 2831
def getmtime(self, path):
# line: 2843
'Returns the modification time of the fake file.\n\n Args:\n path: the path to fake file.\n\n Returns:\n (int, float) the modification time of the fake file\n in number of seconds since the epoch.\n\n Raises:\n OSError: if the file does not exist.\n '
# line: 2844
try:
# line: 2845
file_obj = self.filesystem.ResolveObject(path)
# line: 2846
except IOError as exc:
# line: 2847
raise OSError(errno.ENOENT, str(exc))
# line: 2848
return file_obj.st_mtime
# line: 2850
def getatime(self, path):
# line: 2863
'Returns the last access time of the fake file.\n\n Note: Access time is not set automatically in fake filesystem on access.\n\n Args:\n path: the path to fake file.\n\n Returns:\n (int, float) the access time of the fake file in number of seconds since the epoch.\n\n Raises:\n OSError: if the file does not exist.\n '
# line: 2864
try:
# line: 2865
file_obj = self.filesystem.ResolveObject(path)
# line: 2866
except IOError as exc:
# line: 2867
raise OSError(errno.ENOENT, str(exc))
# line: 2868
return file_obj.st_atime
# line: 2870
def getctime(self, path):
# line: 2881
'Returns the creation time of the fake file.\n\n Args:\n path: the path to fake file.\n\n Returns:\n (int, float) the creation time of the fake file in number of seconds since the epoch.\n\n Raises:\n OSError: if the file does not exist.\n '
# line: 2882
try:
# line: 2883
file_obj = self.filesystem.ResolveObject(path)
# line: 2884
except IOError as exc:
# line: 2885
raise OSError(errno.ENOENT, str(exc))
# line: 2886
return file_obj.st_ctime
# line: 2888
def abspath(self, path):
# line: 2889
'Return the absolute version of a path.'
# line: 2891
def getcwd():
# line: 2892
'Return the current working directory.'
# line: 2894
if ((sys.version_info < (3,)) and isinstance(path, unicode)):
# line: 2895
return self.os.getcwdu()
elif ((sys.version_info >= (3,)) and isinstance(path, bytes)):
# line: 2897
return self.os.getcwdb()
else:
# line: 2899
return self.os.getcwd()
# line: 2901
if (sys.version_info >= (3, 6)):
# line: 2902
path = os.fspath(path)
# line: 2904
sep = self.filesystem._path_separator(path)
# line: 2905
altsep = self.filesystem._alternative_path_separator(path)
# line: 2906
if (not self.isabs(path)):
# line: 2907
path = self.join(getcwd(), path)
elif ((self.filesystem.is_windows_fs and path.startswith(sep)) or ((altsep is not None) and path.startswith(altsep))):
# line: 2911
cwd = getcwd()
# line: 2912
if self.filesystem.StartsWithDriveLetter(cwd):
# line: 2913
path = self.join(cwd[:2], path)
# line: 2914
return self.normpath(path)
# line: 2916
def join(self, *p):
# line: 2917
'Return the completed path with a separator of the parts.'
# line: 2918
return self.filesystem.JoinPaths(*p)
# line: 2920
def split(self, path):
# line: 2923
'Split the path into the directory and the filename of the path.\n New in pyfakefs 3.0.\n '
# line: 2924
return self.filesystem.SplitPath(path)
# line: 2926
def splitdrive(self, path):
# line: 2929
'Split the path into the drive part and the rest of the path, if supported.\n New in pyfakefs 2.9.\n '
# line: 2930
return self.filesystem.SplitDrive(path)
# line: 2932
def normpath(self, path):
# line: 2933
'Normalize path, eliminating double slashes, etc.'
# line: 2934
return self.filesystem.CollapsePath(path)
# line: 2936
def normcase(self, path):
# line: 2939
'Convert to lower case under windows, replaces additional path separator.\n New in pyfakefs 2.9.\n '
# line: 2940
path = self.filesystem.NormalizePathSeparator(path)
# line: 2941
if self.filesystem.is_windows_fs:
# line: 2942
path = path.lower()
# line: 2943
return path
# line: 2945
def relpath(self, path, start=None):
# line: 2946
'We mostly rely on the native implementation and adapt the path separator.'
# line: 2947
if (not path):
# line: 2948
raise ValueError('no path specified')
# line: 2949
if (sys.version_info >= (3, 6)):
# line: 2950
path = os.fspath(path)
# line: 2951
if (start is not None):
# line: 2952
start = os.fspath(start)
# line: 2953
if (start is None):
# line: 2954
start = self.filesystem.cwd
# line: 2955
if (self.filesystem.alternative_path_separator is not None):
# line: 2956
path = path.replace(self.filesystem.alternative_path_separator, self._os_path.sep)
# line: 2957
start = start.replace(self.filesystem.alternative_path_separator, self._os_path.sep)
# line: 2958
path = path.replace(self.filesystem.path_separator, self._os_path.sep)
# line: 2959
start = start.replace(self.filesystem.path_separator, self._os_path.sep)
# line: 2960
path = self._os_path.relpath(path, start)
# line: 2961
return path.replace(self._os_path.sep, self.filesystem.path_separator)
# line: 2963
def realpath(self, filename):
# line: 2967
'Return the canonical path of the specified filename, eliminating any\n symbolic links encountered in the path.\n New in pyfakefs 3.0.\n '
# line: 2968
if self.filesystem.is_windows_fs:
# line: 2969
return self.abspath(filename)
# line: 2970
if (sys.version_info >= (3, 6)):
# line: 2971
filename = os.fspath(filename)
# line: 2972
(path, ok) = self._joinrealpath(filename[:0], filename, {})
# line: 2973
return self.abspath(path)
# line: 2975
if ((sys.platform != 'win32') or (sys.version_info >= (3, 2))):
# line: 2976
def samefile(self, path1, path2):
# line: 2987
'Return whether path1 and path2 point to the same file.\n Windows support new in Python 3.2.\n New in pyfakefs 3.3.\n\n Args:\n path1: first file path or path object (Python >=3.6)\n path2: second file path or path object (Python >=3.6)\n\n Raises:\n OSError: if one of the paths does not point to an existing file system object.\n '
# line: 2988
stat1 = self.filesystem.GetStat(path1)
# line: 2989
stat2 = self.filesystem.GetStat(path2)
# line: 2990
return ((stat1.st_ino == stat2.st_ino) and (stat1.st_dev == stat2.st_dev))
# line: 2992
def _joinrealpath(self, path, rest, seen):
# line: 2996
'Join two paths, normalizing and eliminating any symbolic links\n encountered in the second path.\n Taken from Python source and adapted.\n '
# line: 2997
curdir = self.filesystem._matching_string(path, '.')
# line: 2998
pardir = self.filesystem._matching_string(path, '..')
# line: 3000
sep = self.filesystem._path_separator(path)
# line: 3001
if self.isabs(rest):
# line: 3002
rest = rest[1:]
# line: 3003
path = sep
# line: 3005
while rest:
# line: 3006
(name, _, rest) = rest.partition(sep)
# line: 3007
if ((not name) or (name == curdir)):
# line: 3009
continue
# line: 3010
if (name == pardir):
# line: 3012
if path:
# line: 3013
(path, name) = self.filesystem.SplitPath(path)
# line: 3014
if (name == pardir):
# line: 3015
path = self.filesystem.JoinPaths(path, pardir, pardir)
else:
# line: 3017
path = pardir
# line: 3018
continue
# line: 3019
newpath = self.filesystem.JoinPaths(path, name)
# line: 3020
if (not self.filesystem.IsLink(newpath)):
# line: 3021
path = newpath
# line: 3022
continue
# line: 3024
if (newpath in seen):
# line: 3026
path = seen[newpath]
# line: 3027
if (path is not None):
# line: 3029
continue
# line: 3032
return (self.filesystem.JoinPaths(newpath, rest), False)
# line: 3033
seen[newpath] = None
# line: 3034
(path, ok) = self._joinrealpath(path, self.filesystem.ReadLink(newpath), seen)
# line: 3035
if (not ok):
# line: 3036
return (self.filesystem.JoinPaths(path, rest), False)
# line: 3037
seen[newpath] = path
# line: 3038
return (path, True)
# line: 3040
def dirname(self, path):
# line: 3043
'Returns the first part of the result of `split()`.\n New in pyfakefs 3.0.\n '
# line: 3044
return self.split(path)[0]
# line: 3046
def expanduser(self, path):
# line: 3049
"Return the argument with an initial component of ~ or ~user\n replaced by that user's home directory.\n "
# line: 3050
return self._os_path.expanduser(path).replace(self._os_path.sep, self.sep)
# line: 3052
def ismount(self, path):
# line: 3062
'Return true if the given path is a mount point.\n New in pyfakefs 2.9.\n\n Args:\n path: path to filesystem object to be checked\n\n Returns:\n True if path is a mount point added to the fake file system.\n Under Windows also returns True for drive and UNC roots (independent of their existence).\n '
# line: 3063
if (sys.version_info >= (3, 6)):
# line: 3064
path = os.fspath(path)
# line: 3065
if (not path):
# line: 3066
return False
# line: 3067
normed_path = self.filesystem.NormalizePath(path)
# line: 3068
sep = self.filesystem._path_separator(path)
# line: 3069
if self.filesystem.is_windows_fs:
# line: 3070
if (self.filesystem.alternative_path_separator is not None):
# line: 3071
path_seps = (sep, self.filesystem._alternative_path_separator(path))
else:
# line: 3075
path_seps = (sep,)
# line: 3076
(drive, rest) = self.filesystem.SplitDrive(normed_path)
# line: 3077
if (drive and (drive[:1] in path_seps)):
# line: 3078
return ((not rest) or (rest in path_seps))
# line: 3079
if (rest in path_seps):
# line: 3080
return True
# line: 3081
for mount_point in self.filesystem.mount_points:
# line: 3082
if (normed_path.rstrip(sep) == mount_point.rstrip(sep)):
# line: 3083
return True
# line: 3084
return False
# line: 3086
if (sys.version_info < (3, 0)):
# line: 3087
def walk(self, top, func, arg):
# line: 3095
'Directory tree walk with callback function.\n New in pyfakefs 3.0.\n\n Args:\n top: root path to traverse. The root itself is not included in the called elements.\n func: function to be called for each visited path node.\n arg: first argument to be called with func (apart from dirname and filenames).\n '
# line: 3096
try:
# line: 3097
names = self.filesystem.ListDir(top)
# line: 3098
except os.error:
# line: 3099
return
# line: 3100
func(arg, top, names)
# line: 3101
for name in names:
# line: 3102
name = self.filesystem.JoinPaths(top, name)
# line: 3103
if self.filesystem.is_windows_fs:
# line: 3104
if self.filesystem.IsDir(name):
# line: 3105
self.walk(name, func, arg)
else:
# line: 3107
try:
# line: 3108
st = self.filesystem.GetStat(name, follow_symlinks=False)
# line: 3109
except os.error:
# line: 3110
continue
# line: 3111
if stat.S_ISDIR(st.st_mode):
# line: 3112
self.walk(name, func, arg)
# line: 3114
def __getattr__(self, name):
# line: 3115
'Forwards any non-faked calls to the real os.path.'
# line: 3116
return getattr(self._os_path, name)
# line: 3119
class FakeOsModule(object):
# line: 3130
'Uses FakeFilesystem to provide a fake os module replacement.\n\n Do not create os.path separately from os, as there is a necessary circular\n dependency between os and os.path to replicate the behavior of the standard\n Python modules. What you want to do is to just let FakeOsModule take care of\n os.path setup itself.\n\n # You always want to do this.\n filesystem = fake_filesystem.FakeFilesystem()\n my_os_module = fake_filesystem.FakeOsModule(filesystem)\n '
# line: 3132
_stat_float_times = (sys.version_info >= (2, 5))
# line: 3134
def __init__(self, filesystem, os_path_module=None):
# line: 3140
'Also exposes self.path (to fake os.path).\n\n Args:\n filesystem: FakeFilesystem used to provide file system information\n os_path_module: (deprecated) optional FakePathModule instance\n '
# line: 3141
self.filesystem = filesystem
# line: 3142
self.sep = filesystem.path_separator
# line: 3143
self.altsep = filesystem.alternative_path_separator
# line: 3144
self._os_module = os
# line: 3145
if (os_path_module is None):
# line: 3146
self.path = FakePathModule(self.filesystem, self)
else:
# line: 3148
warnings.warn(FAKE_PATH_MODULE_DEPRECATION, DeprecationWarning, stacklevel=2)
# line: 3150
self.path = os_path_module
# line: 3151
if (sys.version_info < (3, 0)):
# line: 3152
self.fdopen = self._fdopen_ver2
else:
# line: 3154
self.fdopen = self._fdopen
# line: 3156
def _fdopen(self, *args, **kwargs):
# line: 3168
'Redirector to open() builtin function.\n\n Args:\n *args: pass through args\n **kwargs: pass through kwargs\n\n Returns:\n File object corresponding to file_des.\n\n Raises:\n TypeError: if file descriptor is not an integer.\n '
# line: 3169
if (not isinstance(args[0], int)):
# line: 3170
raise TypeError('an integer is required')
# line: 3171
return FakeFileOpen(self.filesystem)(*args, **kwargs)
# line: 3173
def _fdopen_ver2(self, file_des, mode='r', bufsize=None):
# line: 3188
'Returns an open file object connected to the file descriptor file_des.\n\n Args:\n file_des: An integer file descriptor for the file object requested.\n mode: additional file flags. Currently checks to see if the mode matches\n the mode of the requested file object.\n bufsize: ignored. (Used for signature compliance with __builtin__.fdopen)\n\n Returns:\n File object corresponding to file_des.\n\n Raises:\n OSError: if bad file descriptor or incompatible mode is given.\n TypeError: if file descriptor is not an integer.\n '
# line: 3189
if (not isinstance(file_des, int)):
# line: 3190
raise TypeError('an integer is required')
# line: 3192
try:
# line: 3193
return FakeFileOpen(self.filesystem).Call(file_des, mode=mode)
# line: 3194
except IOError as exc:
# line: 3195
raise OSError(exc)
# line: 3197
def _umask(self):
# line: 3198
'Return the current umask.'
# line: 3199
if self.filesystem.is_windows_fs:
# line: 3201
return 0
# line: 3202
if (sys.platform == 'win32'):
# line: 3204
return 2
else:
# line: 3209
mask = os.umask(0)
# line: 3210
os.umask(mask)
# line: 3211
return mask
# line: 3214
def open(self, file_path, flags, mode=None, dir_fd=None):
# line: 3233
'Return the file descriptor for a FakeFile.\n\n Args:\n file_path: the path to the file\n flags: low-level bits to indicate io operation\n mode: bits to define default permissions\n Note: only basic modes are supported, OS-specific modes are ignored\n dir_fd: If not `None`, the file descriptor of a directory,\n with `file_path` being relative to this directory.\n New in Python 3.3. New in pyfakefs 3.3.\n\n Returns:\n A file descriptor.\n\n Raises:\n IOError: if the path cannot be found\n ValueError: if invalid mode is given\n NotImplementedError: if `os.O_EXCL` is used without `os.O_CREAT`\n '
# line: 3234
file_path = self._path_with_dir_fd(file_path, self.open, dir_fd)
# line: 3235
if (mode is None):
# line: 3236
if self.filesystem.is_windows_fs:
# line: 3237
mode = 438
else:
# line: 3239
mode = (511 & (~ self._umask()))
# line: 3241
open_modes = _OpenModes(must_exist=(not (flags & os.O_CREAT)), can_read=(not (flags & os.O_WRONLY)), can_write=(flags & (os.O_RDWR | os.O_WRONLY)), truncate=(flags & os.O_TRUNC), append=(flags & os.O_APPEND), must_not_exist=(flags & os.O_EXCL))
# line: 3249
if (open_modes.must_not_exist and open_modes.must_exist):
# line: 3250
raise NotImplementedError('O_EXCL without O_CREAT mode is not supported')
# line: 3252
if ((not self.filesystem.is_windows_fs) and (not open_modes.can_write) and self.filesystem.Exists(file_path)):
# line: 3256
obj = self.filesystem.ResolveObject(file_path)
# line: 3257
if isinstance(obj, FakeDirectory):
# line: 3258
dir_wrapper = FakeDirWrapper(obj, file_path, self.filesystem)
# line: 3259
file_des = self.filesystem.AddOpenFile(dir_wrapper)
# line: 3260
dir_wrapper.filedes = file_des
# line: 3261
return file_des
# line: 3264
str_flags = 'b'
# line: 3265
delete_on_close = False
# line: 3266
if hasattr(os, 'O_TEMPORARY'):
# line: 3267
delete_on_close = ((flags & os.O_TEMPORARY) == os.O_TEMPORARY)
# line: 3268
fake_file = FakeFileOpen(self.filesystem, delete_on_close=delete_on_close, raw_io=True)(file_path, str_flags, open_modes=open_modes)
# line: 3271
self.chmod(file_path, mode)
# line: 3272
return fake_file.fileno()
# line: 3274
def close(self, file_des):
# line: 3283
'Close a file descriptor.\n\n Args:\n file_des: An integer file descriptor for the file object requested.\n\n Raises:\n OSError: bad file descriptor.\n TypeError: if file descriptor is not an integer.\n '
# line: 3284
file_handle = self.filesystem.GetOpenFile(file_des)
# line: 3285
file_handle.close()
# line: 3287
def read(self, file_des, num_bytes):
# line: 3300
'Read number of bytes from a file descriptor, returns bytes read.\n\n Args:\n file_des: An integer file descriptor for the file object requested.\n num_bytes: Number of bytes to read from file.\n\n Returns:\n Bytes read from file.\n\n Raises:\n OSError: bad file descriptor.\n TypeError: if file descriptor is not an integer.\n '
# line: 3301
file_handle = self.filesystem.GetOpenFile(file_des)
# line: 3302
file_handle.raw_io = True
# line: 3303
return file_handle.read(num_bytes)
# line: 3305
def write(self, file_des, contents):
# line: 3318
'Write string to file descriptor, returns number of bytes written.\n\n Args:\n file_des: An integer file descriptor for the file object requested.\n contents: String of bytes to write to file.\n\n Returns:\n Number of bytes written.\n\n Raises:\n OSError: bad file descriptor.\n TypeError: if file descriptor is not an integer.\n '
# line: 3319
file_handle = self.filesystem.GetOpenFile(file_des)
# line: 3320
file_handle.raw_io = True
# line: 3321
file_handle._sync_io()
# line: 3322
file_handle.write(contents)
# line: 3323
file_handle.flush()
# line: 3324
return len(contents)
# line: 3326
@classmethod
# line: 3326
def stat_float_times(cls, newvalue=None):
# line: 3337
"Determine whether a file's time stamps are reported as floats or ints.\n New in pyfakefs 2.9.\n\n Calling without arguments returns the current value. The value is shared\n by all instances of FakeOsModule.\n\n Args:\n newvalue: if True, mtime, ctime, atime are reported as floats.\n Else, as ints (rounding down).\n "
# line: 3338
if (newvalue is not None):
# line: 3339
cls._stat_float_times = bool(newvalue)
# line: 3340
return cls._stat_float_times
# line: 3342
def fstat(self, file_des):
# line: 3353
"Return the os.stat-like tuple for the FakeFile object of file_des.\n\n Args:\n file_des: file descriptor of filesystem object to retrieve.\n\n Returns:\n the FakeStatResult object corresponding to entry_path.\n\n Raises:\n OSError: if the filesystem object doesn't exist.\n "
# line: 3355
file_object = self.filesystem.GetOpenFile(file_des).GetObject()
# line: 3356
return file_object.stat_result.copy()
# line: 3358
def umask(self, new_mask):
# line: 3369
'Change the current umask.\n\n Args:\n new_mask: An integer.\n\n Returns:\n The old mask.\n\n Raises:\n TypeError: new_mask is of an invalid type.\n '
# line: 3370
if (not isinstance(new_mask, int)):
# line: 3371
raise TypeError('an integer is required')
# line: 3372
old_umask = self.filesystem.umask
# line: 3373
self.filesystem.umask = new_mask
# line: 3374
return old_umask
# line: 3376
def chdir(self, target_directory):
# line: 3385
'Change current working directory to target directory.\n\n Args:\n target_directory: path to new current working directory.\n\n Raises:\n OSError: if user lacks permission to enter the argument directory or if\n the target is not a directory\n '
# line: 3386
target_directory = self.filesystem.ResolvePath(target_directory, allow_fd=True)
# line: 3387
self.filesystem.ConfirmDir(target_directory)
# line: 3388
directory = self.filesystem.ResolveObject(target_directory)
# line: 3390
if (not (directory.st_mode | PERM_EXE)):
# line: 3391
raise OSError(errno.EACCES, 'Fake os module: permission denied', directory)
# line: 3393
self.filesystem.cwd = target_directory
# line: 3395
def getcwd(self):
# line: 3396
'Return current working directory.'
# line: 3397
return self.filesystem.cwd
# line: 3399
if (sys.version_info < (3,)):
# line: 3400
def getcwdu(self):
# line: 3401
'Return current working directory as unicode. Python 2 only.'
# line: 3402
return unicode(self.filesystem.cwd)
else:
# line: 3405
def getcwdb(self):
# line: 3406
'Return current working directory as bytes. Python 3 only.'
# line: 3407
return bytes(self.filesystem.cwd, locale.getpreferredencoding(False))
# line: 3409
def listdir(self, target_directory):
# line: 3421
'Return a list of file names in target_directory.\n\n Args:\n target_directory: path to the target directory within the fake\n filesystem.\n\n Returns:\n a list of file names within the target directory in arbitrary order.\n\n Raises:\n OSError: if the target is not a directory.\n '
# line: 3422
return self.filesystem.ListDir(target_directory)
# line: 3424
if (sys.platform.startswith('linux') and (sys.version_info >= (3, 3))):
# line: 3425
def listxattr(self, path=None, follow_symlinks=True):
# line: 3426
'Dummy implementation that returns an empty list - used by shutil.'
# line: 3427
return []
# line: 3429
if (sys.version_info >= (3, 5)):
# line: 3430
def scandir(self, path=''):
# line: 3442
'Return an iterator of DirEntry objects corresponding to the entries\n in the directory given by path.\n\n Args:\n path: path to the target directory within the fake filesystem.\n\n Returns:\n an iterator to an unsorted list of os.DirEntry objects for each entry in path.\n\n Raises:\n OSError: if the target is not a directory.\n '
# line: 3443
return self.filesystem.ScanDir(path)
# line: 3445
def _ClassifyDirectoryContents(self, root):
# line: 3460
'Classify contents of a directory as files/directories.\n\n Args:\n root: (str) Directory to examine.\n\n Returns:\n (tuple) A tuple consisting of three values: the directory examined, a\n list containing all of the directory entries, and a list containing all\n of the non-directory entries. (This is the same format as returned by\n the os.walk generator.)\n\n Raises:\n Nothing on its own, but be ready to catch exceptions generated by\n underlying mechanisms like os.listdir.\n '
# line: 3461
dirs = []
# line: 3462
files = []
# line: 3463
for entry in self.listdir(root):
# line: 3464
if self.path.isdir(self.path.join(root, entry)):
# line: 3465
dirs.append(entry)
else:
# line: 3467
files.append(entry)
# line: 3468
return (root, dirs, files)
# line: 3470
def walk(self, top, topdown=True, onerror=None, followlinks=False):
# line: 3486
'Perform an os.walk operation over the fake filesystem.\n\n Args:\n top: root directory from which to begin walk.\n topdown: determines whether to return the tuples with the root as the\n first entry (True) or as the last, after all the child directory\n tuples (False).\n onerror: if not None, function which will be called to handle the\n os.error instance provided when os.listdir() fails.\n followlinks: if True, symbolic links are followed. New in pyfakefs 2.9.\n\n Yields:\n (path, directories, nondirectories) for top and each of its\n subdirectories. See the documentation for the builtin os module for\n further details.\n '
# line: 3487
def do_walk(top, topMost=False):
# line: 3488
top = self.path.normpath(top)
# line: 3489
if ((not topMost) and (not followlinks) and self.path.islink(top)):
# line: 3490
return
# line: 3491
try:
# line: 3492
top_contents = self._ClassifyDirectoryContents(top)
# line: 3493
except OSError as exc:
# line: 3494
top_contents = None
# line: 3495
if (onerror is not None):
# line: 3496
onerror(exc)
# line: 3498
if (top_contents is not None):
# line: 3499
if topdown:
# line: 3500
yield top_contents
# line: 3502
for directory in top_contents[1]:
# line: 3503
if ((not followlinks) and self.path.islink(directory)):
# line: 3504
continue
# line: 3505
for contents in do_walk(self.path.join(top, directory)):
# line: 3506
yield contents
# line: 3508
if (not topdown):
# line: 3509
yield top_contents
# line: 3511
return do_walk(top, topMost=True)
# line: 3514
def readlink(self, path, dir_fd=None):
# line: 3530
'Read the target of a symlink.\n\n Args:\n path: Symlink to read the target of.\n dir_fd: If not `None`, the file descriptor of a directory,\n with `path` being relative to this directory.\n New in Python 3.3. New in pyfakefs 3.3.\n\n Returns:\n the string representing the path to which the symbolic link points.\n\n Raises:\n TypeError: if `path` is None\n OSError: (with errno=ENOENT) if path is not a valid path, or\n (with errno=EINVAL) if path is valid, but is not a symlink.\n '
# line: 3531
path = self._path_with_dir_fd(path, self.readlink, dir_fd)
# line: 3532
return self.filesystem.ReadLink(path)
# line: 3534
def stat(self, entry_path, dir_fd=None, follow_symlinks=None):
# line: 3551
"Return the os.stat-like tuple for the FakeFile object of entry_path.\n\n Args:\n entry_path: path to filesystem object to retrieve.\n dir_fd: (int) If not `None`, the file descriptor of a directory,\n with `entry_path` being relative to this directory.\n New in Python 3.3. New in pyfakefs 3.3.\n follow_symlinks: (bool) If `False` and `entry_path` points to a symlink,\n the link itself is changed instead of the linked object.\n New in Python 3.3. New in pyfakefs 3.0.\n\n Returns:\n the FakeStatResult object corresponding to entry_path.\n\n Raises:\n OSError: if the filesystem object doesn't exist.\n "
# line: 3552
if (follow_symlinks is None):
# line: 3553
follow_symlinks = True
elif (sys.version_info < (3, 3)):
# line: 3555
raise TypeError("stat() got an unexpected keyword argument 'follow_symlinks'")
# line: 3556
entry_path = self._path_with_dir_fd(entry_path, self.stat, dir_fd)
# line: 3557
return self.filesystem.GetStat(entry_path, follow_symlinks)
# line: 3559
def lstat(self, entry_path, dir_fd=None):
# line: 3573
"Return the os.stat-like tuple for entry_path, not following symlinks.\n\n Args:\n entry_path: path to filesystem object to retrieve.\n dir_fd: If not `None`, the file descriptor of a directory, with `entry_path`\n being relative to this directory.\n New in Python 3.3. New in pyfakefs 3.3.\n\n Returns:\n the FakeStatResult object corresponding to `entry_path`.\n\n Raises:\n OSError: if the filesystem object doesn't exist.\n "
# line: 3575
entry_path = self._path_with_dir_fd(entry_path, self.lstat, dir_fd)
# line: 3576
return self.filesystem.GetStat(entry_path, follow_symlinks=False)
# line: 3578
def remove(self, path, dir_fd=None):
# line: 3591
'Remove the FakeFile object at the specified file path.\n\n Args:\n path: Path to file to be removed.\n dir_fd: If not `None`, the file descriptor of a directory,\n with `path` being relative to this directory.\n New in Python 3.3. New in pyfakefs 3.3.\n\n Raises:\n OSError: if path points to a directory.\n OSError: if path does not exist.\n OSError: if removal failed.\n '
# line: 3592
path = self._path_with_dir_fd(path, self.remove, dir_fd)
# line: 3593
self.filesystem.RemoveFile(path)
# line: 3595
def unlink(self, path, dir_fd=None):
# line: 3608
'Remove the FakeFile object at the specified file path.\n\n Args:\n path: Path to file to be removed.\n dir_fd: If not `None`, the file descriptor of a directory,\n with `path` being relative to this directory.\n New in Python 3.3. New in pyfakefs 3.3.\n\n Raises:\n OSError: if path points to a directory.\n OSError: if path does not exist.\n OSError: if removal failed.\n '
# line: 3609
path = self._path_with_dir_fd(path, self.unlink, dir_fd)
# line: 3610
self.filesystem.RemoveFile(path)
# line: 3612
def rename(self, old_file_path, new_file_path, dir_fd=None):
# line: 3631
'Rename a FakeFile object at old_file_path to new_file_path,\n preserving all properties.\n Also replaces existing new_file_path object, if one existed (Unix only).\n\n Args:\n old_file_path: Path to filesystem object to rename.\n new_file_path: Path to where the filesystem object will live after this call.\n dir_fd: If not `None`, the file descriptor of a directory,\n with `old_file_path` being relative to this directory.\n New in Python 3.3. New in pyfakefs 3.3.\n\n Raises:\n OSError: if old_file_path does not exist.\n OSError: if new_file_path is an existing directory.\n OSError: if new_file_path is an existing file (Windows only)\n OSError: if new_file_path is an existing file and could not be removed (Unix)\n OSError: if `dirname(new_file)` does not exist\n OSError: if the file would be moved to another filesystem (e.g. mount point)\n '
# line: 3632
old_file_path = self._path_with_dir_fd(old_file_path, self.rename, dir_fd)
# line: 3633
self.filesystem.RenameObject(old_file_path, new_file_path)
# line: 3635
if (sys.version_info >= (3, 3)):
# line: 3636
def replace(self, old_file_path, new_file_path):
# line: 3652
'Renames a FakeFile object at old_file_path to new_file_path,\n preserving all properties.\n Also replaces existing new_file_path object, if one existed.\n New in pyfakefs 3.0.\n\n Args:\n old_file_path: path to filesystem object to rename\n new_file_path: path to where the filesystem object will live after this call\n\n Raises:\n OSError: if old_file_path does not exist.\n OSError: if new_file_path is an existing directory.\n OSError: if new_file_path is an existing file and could not be removed\n OSError: if `dirname(new_file)` does not exist\n OSError: if the file would be moved to another filesystem (e.g. mount point)\n '
# line: 3653
self.filesystem.RenameObject(old_file_path, new_file_path, force_replace=True)
# line: 3655
def rmdir(self, target_directory, dir_fd=None):
# line: 3667
"Remove a leaf Fake directory.\n\n Args:\n target_directory: (str) Name of directory to remove.\n dir_fd: If not `None`, the file descriptor of a directory,\n with `target_directory` being relative to this directory.\n New in Python 3.3. New in pyfakefs 3.3.\n\n Raises:\n OSError: if target_directory does not exist or is not a directory,\n or as per FakeFilesystem.RemoveObject. Cannot remove '.'.\n "
# line: 3668
target_directory = self._path_with_dir_fd(target_directory, self.rmdir, dir_fd)
# line: 3669
self.filesystem.RemoveDirectory(target_directory)
# line: 3671
def removedirs(self, target_directory):
# line: 3680
'Remove a leaf fake directory and all empty intermediate ones.\n\n Args:\n target_directory: the directory to be removed.\n\n Raises:\n OSError: if target_directory does not exist or is not a directory.\n OSError: if target_directory is not empty.\n '
# line: 3681
target_directory = self.filesystem.NormalizePath(target_directory)
# line: 3682
directory = self.filesystem.ConfirmDir(target_directory)
# line: 3683
if directory.contents:
# line: 3684
raise OSError(errno.ENOTEMPTY, 'Fake Directory not empty', self.path.basename(target_directory))
else:
# line: 3687
self.rmdir(target_directory)
# line: 3688
(head, tail) = self.path.split(target_directory)
# line: 3689
if (not tail):
# line: 3690
(head, tail) = self.path.split(head)
# line: 3691
while (head and tail):
# line: 3692
head_dir = self.filesystem.ConfirmDir(head)
# line: 3693
if head_dir.contents:
# line: 3694
break
# line: 3696
self.filesystem.RemoveDirectory(head, allow_symlink=True)
# line: 3697
(head, tail) = self.path.split(head)
# line: 3699
def mkdir(self, dir_name, mode=PERM_DEF, dir_fd=None):
# line: 3714
"Create a leaf Fake directory.\n\n Args:\n dir_name: (str) Name of directory to create.\n Relative paths are assumed to be relative to '/'.\n mode: (int) Mode to create directory with. This argument defaults to\n 0o777. The umask is applied to this mode.\n dir_fd: If not `None`, the file descriptor of a directory,\n with `dir_name` being relative to this directory.\n New in Python 3.3. New in pyfakefs 3.3.\n\n Raises:\n OSError: if the directory name is invalid or parent directory is read only\n or as per FakeFilesystem.AddObject.\n "
# line: 3715
dir_name = self._path_with_dir_fd(dir_name, self.mkdir, dir_fd)
# line: 3716
self.filesystem.MakeDirectory(dir_name, mode)
# line: 3718
def makedirs(self, dir_name, mode=PERM_DEF, exist_ok=None):
# line: 3733
'Create a leaf Fake directory + create any non-existent parent dirs.\n\n Args:\n dir_name: (str) Name of directory to create.\n mode: (int) Mode to create directory (and any necessary parent\n directories) with. This argument defaults to 0o777. The umask is\n applied to this mode.\n exist_ok: (boolean) If exist_ok is False (the default), an OSError is\n raised if the target directory already exists. New in Python 3.2.\n New in pyfakefs 2.9.\n\n Raises:\n OSError: if the directory already exists and exist_ok=False, or as per\n `FakeFilesystem.CreateDirectory()`.\n '
# line: 3734
if (exist_ok is None):
# line: 3735
exist_ok = False
elif (sys.version_info < (3, 2)):
# line: 3737
raise TypeError("makedir() got an unexpected keyword argument 'exist_ok'")
# line: 3738
self.filesystem.MakeDirectories(dir_name, mode, exist_ok)
# line: 3740
def _path_with_dir_fd(self, path, fct, dir_fd):
# line: 3741
'Return the path considering dir_fd. Raise on nmvalid parameters.'
# line: 3742
if (dir_fd is not None):
# line: 3743
if (sys.version_info < (3, 3)):
# line: 3744
raise TypeError(("%s() got an unexpected keyword argument 'dir_fd'" % fct.__name__))
# line: 3747
real_fct = getattr(os, fct.__name__)
# line: 3748
if (real_fct not in self.supports_dir_fd):
# line: 3749
raise NotImplementedError('dir_fd unavailable on this platform')
# line: 3750
if isinstance(path, int):
# line: 3751
raise ValueError(("%s: Can't specify dir_fd without matching path" % fct.__name__))
# line: 3753
if (not self.path.isabs(path)):
# line: 3754
return self.path.join(self.filesystem.GetOpenFile(dir_fd).GetObject().GetPath(), path)
# line: 3756
return path
# line: 3758
def access(self, path, mode, dir_fd=None, follow_symlinks=None):
# line: 3774
'Check if a file exists and has the specified permissions.\n\n Args:\n path: (str) Path to the file.\n mode: (int) Permissions represented as a bitwise-OR combination of\n os.F_OK, os.R_OK, os.W_OK, and os.X_OK.\n dir_fd: If not `None`, the file descriptor of a directory, with `path`\n being relative to this directory.\n New in Python 3.3. New in pyfakefs 3.3.\n follow_symlinks: (bool) If `False` and `path` points to a symlink,\n the link itself is queried instead of the linked object.\n New in Python 3.3. New in pyfakefs 3.0.\n\n Returns:\n bool, `True` if file is accessible, `False` otherwise.\n '
# line: 3775
if ((follow_symlinks is not None) and (sys.version_info < (3, 3))):
# line: 3776
raise TypeError("access() got an unexpected keyword argument 'follow_symlinks'")
# line: 3777
path = self._path_with_dir_fd(path, self.access, dir_fd)
# line: 3778
try:
# line: 3779
stat_result = self.stat(path, follow_symlinks=follow_symlinks)
# line: 3780
except OSError as os_error:
# line: 3781
if (os_error.errno == errno.ENOENT):
# line: 3782
return False
# line: 3783
raise
# line: 3784
return ((mode & ((stat_result.st_mode >> 6) & 7)) == mode)
# line: 3786
def chmod(self, path, mode, dir_fd=None, follow_symlinks=None):
# line: 3798
'Change the permissions of a file as encoded in integer mode.\n\n Args:\n path: (str) Path to the file.\n mode: (int) Permissions.\n dir_fd: If not `None`, the file descriptor of a directory, with `path`\n being relative to this directory.\n New in Python 3.3. New in pyfakefs 3.3.\n follow_symlinks: (bool) If `False` and `path` points to a symlink,\n the link itself is queried instead of the linked object.\n New in Python 3.3. New in pyfakefs 3.0.\n '
# line: 3799
if (follow_symlinks is None):
# line: 3800
follow_symlinks = True
elif (sys.version_info < (3, 3)):
# line: 3802
raise TypeError("chmod() got an unexpected keyword argument 'follow_symlinks'")
# line: 3803
path = self._path_with_dir_fd(path, self.chmod, dir_fd)
# line: 3804
self.filesystem.ChangeMode(path, mode, follow_symlinks)
# line: 3806
def lchmod(self, path, mode):
# line: 3813
'Change the permissions of a file as encoded in integer mode.\n If the file is a link, the permissions of the link are changed.\n\n Args:\n path: (str) Path to the file.\n mode: (int) Permissions.\n '
# line: 3814
if self.filesystem.is_windows_fs:
# line: 3815
raise (NameError, "name 'lchmod' is not defined")
# line: 3816
self.filesystem.ChangeMode(path, mode, follow_symlinks=False)
# line: 3818
def utime(self, path, times=None, ns=None, dir_fd=None, follow_symlinks=None):
# line: 3842
'Change the access and modified times of a file.\n\n Args:\n path: (str) Path to the file.\n times: 2-tuple of int or float numbers, of the form (atime, mtime) \n which is used to set the access and modified times in seconds. \n If None, both times are set to the current time.\n ns: 2-tuple of int numbers, of the form (atime, mtime) which is \n used to set the access and modified times in nanoseconds. \n If None, both times are set to the current time.\n New in Python 3.3. New in pyfakefs 3.3.\n dir_fd: If not `None`, the file descriptor of a directory, with `path`\n being relative to this directory.\n New in Python 3.3. New in pyfakefs 3.3.\n follow_symlinks: (bool) If `False` and `path` points to a symlink,\n the link itself is queried instead of the linked object.\n New in Python 3.3. New in pyfakefs 3.0.\n \n Raises:\n TypeError: If anything other than the expected types is \n specified in the passed `times` or `ns` tuple, \n or if the tuple length is not equal to 2.\n ValueError: If both times and ns are specified.\n '
# line: 3843
if (follow_symlinks is None):
# line: 3844
follow_symlinks = True
elif (sys.version_info < (3, 3)):
# line: 3846
raise TypeError("utime() got an unexpected keyword argument 'follow_symlinks'")
# line: 3847
path = self._path_with_dir_fd(path, self.utime, dir_fd)
# line: 3848
if ((ns is not None) and (sys.version_info < (3, 3))):
# line: 3849
raise TypeError("utime() got an unexpected keyword argument 'ns'")
# line: 3851
self.filesystem.UpdateTime(path, times, ns, follow_symlinks)
# line: 3853
def chown(self, path, uid, gid, dir_fd=None, follow_symlinks=None):
# line: 3872
'Set ownership of a faked file.\n\n Args:\n path: (str) Path to the file or directory.\n uid: (int) Numeric uid to set the file or directory to.\n gid: (int) Numeric gid to set the file or directory to.\n dir_fd: (int) If not `None`, the file descriptor of a directory,\n with `path` being relative to this directory.\n New in Python 3.3. New in pyfakefs 3.3.\n follow_symlinks: (bool) If `False` and path points to a symlink,\n the link itself is changed instead of the linked object.\n New in Python 3.3. New in pyfakefs 3.0.\n\n Raises:\n OSError: if path does not exist.\n\n `None` is also allowed for `uid` and `gid`. This permits `os.rename` to\n use `os.chown` even when the source file `uid` and `gid` are `None` (unset).\n '
# line: 3873
if (follow_symlinks is None):
# line: 3874
follow_symlinks = True
elif (sys.version_info < (3, 3)):
# line: 3876
raise TypeError("chown() got an unexpected keyword argument 'follow_symlinks'")
# line: 3877
path = self._path_with_dir_fd(path, self.chown, dir_fd)
# line: 3878
try:
# line: 3879
file_object = self.filesystem.ResolveObject(path, follow_symlinks, allow_fd=True)
# line: 3880
except IOError as io_error:
# line: 3881
if (io_error.errno == errno.ENOENT):
# line: 3882
raise OSError(errno.ENOENT, 'No such file or directory in fake filesystem', path)
# line: 3885
raise
# line: 3886
if (not ((isinstance(uid, int) or (uid is None)) and (isinstance(gid, int) or (gid is None)))):
# line: 3888
raise TypeError('An integer is required')
# line: 3889
if (uid != (-1)):
# line: 3890
file_object.st_uid = uid
# line: 3891
if (gid != (-1)):
# line: 3892
file_object.st_gid = gid
# line: 3894
def mknod(self, filename, mode=None, device=None, dir_fd=None):
# line: 3914
"Create a filesystem node named 'filename'.\n\n Does not support device special files or named pipes as the real os\n module does.\n\n Args:\n filename: (str) Name of the file to create\n mode: (int) Permissions to use and type of file to be created.\n Default permissions are 0o666. Only the stat.S_IFREG file type\n is supported by the fake implementation. The umask is applied\n to this mode.\n device: not supported in fake implementation\n dir_fd: If not `None`, the file descriptor of a directory,\n with `filename` being relative to this directory.\n New in Python 3.3. New in pyfakefs 3.3.\n\n Raises:\n OSError: if called with unsupported options or the file can not be\n created.\n "
# line: 3915
if self.filesystem.is_windows_fs:
# line: 3916
raise (AttributeError, "module 'os' has no attribute 'mknode'")
# line: 3917
if (mode is None):
# line: 3918
mode = (stat.S_IFREG | PERM_DEF_FILE)
# line: 3919
if (device or (not (mode & stat.S_IFREG))):
# line: 3920
raise OSError(errno.ENOENT, 'Fake os mknod implementation only supports regular files.')
# line: 3924
filename = self._path_with_dir_fd(filename, self.mknod, dir_fd)
# line: 3925
(head, tail) = self.path.split(filename)
# line: 3926
if (not tail):
# line: 3927
if self.filesystem.Exists(head):
# line: 3928
raise OSError(errno.EEXIST, ('Fake filesystem: %s: %s' % (os.strerror(errno.EEXIST), filename)))
# line: 3930
raise OSError(errno.ENOENT, ('Fake filesystem: %s: %s' % (os.strerror(errno.ENOENT), filename)))
# line: 3932
if (tail in ('.', u'.', '..', u'..')):
# line: 3933
raise OSError(errno.ENOENT, ('Fake fileystem: %s: %s' % (os.strerror(errno.ENOENT), filename)))
# line: 3935
if self.filesystem.Exists(filename):
# line: 3936
raise OSError(errno.EEXIST, ('Fake fileystem: %s: %s' % (os.strerror(errno.EEXIST), filename)))
# line: 3938
try:
# line: 3939
self.filesystem.AddObject(head, FakeFile(tail, (mode & (~ self.filesystem.umask)), filesystem=self.filesystem))
# line: 3942
except IOError as e:
# line: 3943
raise OSError(e.errno, ('Fake filesystem: %s: %s' % (os.strerror(e.errno), filename)))
# line: 3946
def symlink(self, link_target, path, dir_fd=None):
# line: 3958
'Creates the specified symlink, pointed at the specified link target.\n\n Args:\n link_target: The target of the symlink.\n path: Path to the symlink to create.\n dir_fd: If not `None`, the file descriptor of a directory,\n with `link_target` being relative to this directory.\n New in Python 3.3. New in pyfakefs 3.3.\n\n Raises:\n OSError: if the file already exists.\n '
# line: 3959
link_target = self._path_with_dir_fd(link_target, self.symlink, dir_fd)
# line: 3960
self.filesystem.CreateLink(path, link_target, create_missing_dirs=False)
# line: 3962
def link(self, oldpath, newpath, dir_fd=None):
# line: 3980
"Create a hard link at new_path, pointing at old_path.\n New in pyfakefs 2.9.\n\n Args:\n old_path: An existing link to the target file.\n new_path: The destination path to create a new link at.\n dir_fd: If not `None`, the file descriptor of a directory, with `oldpath`\n being relative to this directory.\n New in Python 3.3. New in pyfakefs 3.3.\n\n Returns:\n the FakeFile object referred to by `oldpath`.\n\n Raises:\n OSError: if something already exists at new_path.\n OSError: if the parent directory doesn't exist.\n OSError: if on Windows before Python 3.2.\n "
# line: 3981
oldpath = self._path_with_dir_fd(oldpath, self.link, dir_fd)
# line: 3982
self.filesystem.CreateHardLink(oldpath, newpath)
# line: 3984
def fsync(self, file_des):
# line: 3994
'Perform fsync for a fake file (in other words, do nothing).\n New in pyfakefs 2.9.\n\n Args:\n file_des: file descriptor of the open file.\n\n Raises:\n OSError: file_des is an invalid file descriptor.\n TypeError: file_des is not an integer.\n '
# line: 3996
self.filesystem.GetOpenFile(file_des)
# line: 3998
def fdatasync(self, file_des):
# line: 4008
'Perform fdatasync for a fake file (in other words, do nothing).\n New in pyfakefs 2.9.\n\n Args:\n file_des: file descriptor of the open file.\n\n Raises:\n OSError: file_des is an invalid file descriptor.\n TypeError: file_des is not an integer.\n '
# line: 4010
self.filesystem.GetOpenFile(file_des)
# line: 4012
def __getattr__(self, name):
# line: 4013
'Forwards any unfaked calls to the standard os module.'
# line: 4014
return getattr(self._os_module, name)
# line: 4017
class FakeIoModule(object):
# line: 4026
'Uses FakeFilesystem to provide a fake io module replacement.\n New in pyfakefs 2.9.\n\n Currently only used to wrap `io.open()` which is an alias to `open()`.\n\n You need a fake_filesystem to use this:\n filesystem = fake_filesystem.FakeFilesystem()\n my_io_module = fake_filesystem.FakeIoModule(filesystem)\n '
# line: 4028
def __init__(self, filesystem):
# line: 4032
'\n Args:\n filesystem: FakeFilesystem used to provide file system information\n '
# line: 4033
self.filesystem = filesystem
# line: 4034
self._io_module = io
# line: 4036
def open(self, file_path, mode='r', buffering=(-1), encoding=None, errors=None, newline=None, closefd=True, opener=None):
# line: 4040
'Redirect the call to FakeFileOpen.\n See FakeFileOpen.Call() for description.\n '
# line: 4041
if ((opener is not None) and (sys.version_info < (3, 3))):
# line: 4042
raise TypeError("open() got an unexpected keyword argument 'opener'")
# line: 4043
fake_open = FakeFileOpen(self.filesystem, use_io=True)
# line: 4044
return fake_open(file_path, mode, buffering, encoding, errors, newline, closefd, opener)
# line: 4046
def __getattr__(self, name):
# line: 4047
'Forwards any unfaked calls to the standard io module.'
# line: 4048
return getattr(self._io_module, name)
# line: 4051
class FakeFileWrapper(object):
# line: 4056
'Wrapper for a stream object for use by a FakeFile object.\n\n If the wrapper has any data written to it, it will propagate to\n the FakeFile object on close() or flush().\n '
# line: 4057
def __init__(self, file_object, file_path, update=False, read=False, append=False, delete_on_close=False, filesystem=None, newline=None, binary=True, closefd=True, encoding=None, errors=None, raw_io=False, is_stream=False, use_io=True):
# line: 4061
self._file_object = file_object
# line: 4062
self._file_path = file_path
# line: 4063
self._append = append
# line: 4064
self._read = read
# line: 4065
self.allow_update = update
# line: 4066
self._closefd = closefd
# line: 4067
self._file_epoch = file_object.epoch
# line: 4068
self.raw_io = raw_io
# line: 4069
self._binary = binary
# line: 4070
self.is_stream = is_stream
# line: 4071
contents = file_object.byte_contents
# line: 4072
self._encoding = encoding
# line: 4073
errors = (errors or 'strict')
# line: 4074
if encoding:
# line: 4075
file_wrapper = FakeFileWrapper(file_object, file_path, update, read, append, delete_on_close=False, filesystem=filesystem, newline=None, binary=True, closefd=closefd, is_stream=True)
# line: 4079
codec_info = codecs.lookup(encoding)
# line: 4080
self._io = codecs.StreamReaderWriter(file_wrapper, codec_info.streamreader, codec_info.streamwriter, errors)
else:
# line: 4083
if ((not binary) and (sys.version_info >= (3, 0))):
# line: 4084
io_class = io.StringIO
else:
# line: 4086
io_class = io.BytesIO
# line: 4087
io_args = ({} if binary else {'newline': newline, })
# line: 4088
if (contents and (not binary)):
# line: 4089
contents = contents.decode((encoding or locale.getpreferredencoding(False)), errors=errors)
# line: 4091
if (contents and (not update)):
# line: 4092
self._io = io_class(contents, **io_args)
else:
# line: 4094
self._io = io_class(**io_args)
# line: 4096
if contents:
# line: 4097
if update:
# line: 4098
if (not encoding):
# line: 4099
self._io.write(contents)
# line: 4100
if (not append):
# line: 4101
self._io.seek(0)
else:
# line: 4103
self._read_whence = 0
# line: 4104
if (read and (not use_io)):
# line: 4105
self._read_seek = 0
else:
# line: 4107
self._read_seek = self._io.tell()
else:
# line: 4109
self._read_whence = 0
# line: 4110
self._read_seek = 0
# line: 4112
if delete_on_close:
# line: 4113
assert filesystem, 'delete_on_close=True requires filesystem'
# line: 4114
self._filesystem = filesystem
# line: 4115
self.delete_on_close = delete_on_close
# line: 4118
self.name = file_object.opened_as
# line: 4119
self.filedes = None
# line: 4121
def __enter__(self):
# line: 4122
"To support usage of this fake file with the 'with' statement."
# line: 4123
return self
# line: 4125
def __exit__(self, type, value, traceback):
# line: 4126
"To support usage of this fake file with the 'with' statement."
# line: 4127
self.close()
# line: 4129
def _raise(self, message):
# line: 4130
if self.raw_io:
# line: 4131
raise OSError(errno.EBADF, message)
# line: 4132
if (sys.version_info < (3, 0)):
# line: 4133
raise IOError(message)
# line: 4134
raise io.UnsupportedOperation(message)
# line: 4136
def GetObject(self):
# line: 4137
'Return the FakeFile object that is wrapped by the current instance.'
# line: 4138
return self._file_object
# line: 4140
def fileno(self):
# line: 4141
'Return the file descriptor of the file object.'
# line: 4142
return self.filedes
# line: 4144
def close(self):
# line: 4145
'Close the file.'
# line: 4147
if (self not in self._filesystem.open_files):
# line: 4148
return
# line: 4150
if (self.allow_update and (not self.raw_io)):
# line: 4151
self._file_object.SetContents(self._io.getvalue(), self._encoding)
# line: 4152
if self._closefd:
# line: 4153
self._filesystem.CloseOpenFile(self.filedes)
# line: 4154
if self.delete_on_close:
# line: 4155
self._filesystem.RemoveObject(self.GetObject().GetPath())
# line: 4157
def flush(self):
# line: 4158
"Flush file contents to 'disk'."
# line: 4159
self._check_open_file()
# line: 4160
if self.allow_update:
# line: 4161
self._io.flush()
# line: 4162
self._file_object.SetContents(self._io.getvalue(), self._encoding)
# line: 4163
self._file_epoch = self._file_object.epoch
# line: 4165
def seek(self, offset, whence=0):
# line: 4166
"Move read/write pointer in 'file'."
# line: 4167
self._check_open_file()
# line: 4168
if (not self._append):
# line: 4169
self._io.seek(offset, whence)
else:
# line: 4171
self._read_seek = offset
# line: 4172
self._read_whence = whence
# line: 4173
if (not self.is_stream):
# line: 4174
self.flush()
# line: 4176
def tell(self):
# line: 4181
"Return the file's current position.\n\n Returns:\n int, file's current position in bytes.\n "
# line: 4182
self._check_open_file()
# line: 4183
self._flush_for_read()
# line: 4184
if (not self._append):
# line: 4185
return self._io.tell()
# line: 4186
if self._read_whence:
# line: 4187
write_seek = self._io.tell()
# line: 4188
self._io.seek(self._read_seek, self._read_whence)
# line: 4189
self._read_seek = self._io.tell()
# line: 4190
self._read_whence = 0
# line: 4191
self._io.seek(write_seek)
# line: 4192
return self._read_seek
# line: 4194
def _flush_for_read(self):
# line: 4196
if self._flushes_after_read():
# line: 4197
self.flush()
# line: 4199
def _flushes_after_read(self):
# line: 4200
return ((not self.is_stream) and ((not self._filesystem.is_windows_fs) or (sys.version_info[0] > 2)))
# line: 4204
def _sync_io(self):
# line: 4205
'Update the stream with changes to the file object contents.'
# line: 4206
if (self._file_epoch == self._file_object.epoch):
# line: 4207
return
# line: 4209
if isinstance(self._io, io.BytesIO):
# line: 4210
contents = self._file_object.byte_contents
else:
# line: 4212
contents = self._file_object.contents
# line: 4214
is_stream_reader_writer = isinstance(self._io, codecs.StreamReaderWriter)
# line: 4215
if is_stream_reader_writer:
# line: 4216
self._io.stream.allow_update = True
# line: 4217
whence = self._io.tell()
# line: 4218
self._io.seek(0)
# line: 4219
self._io.truncate()
# line: 4220
self._io.write(contents)
# line: 4221
if self._append:
# line: 4222
self._io.seek(0, os.SEEK_END)
else:
# line: 4224
self._io.seek(whence)
# line: 4226
if is_stream_reader_writer:
# line: 4227
self._io.stream.allow_update = False
# line: 4228
self._file_epoch = self._file_object.epoch
# line: 4230
def _ReadWrapper(self, name):
# line: 4241
'Wrap a stream attribute in a read wrapper.\n\n Returns a read_wrapper which tracks our own read pointer since the\n stream object has no concept of a different read and write pointer.\n\n Args:\n name: the name of the attribute to wrap. Should be a read call.\n\n Returns:\n either a read_error or read_wrapper function.\n '
# line: 4242
io_attr = getattr(self._io, name)
# line: 4244
def read_wrapper(*args, **kwargs):
# line: 4256
"Wrap all read calls to the stream object.\n\n We do this to track the read pointer separate from the write\n pointer. Anything that wants to read from the stream object\n while we're in append mode goes through this.\n\n Args:\n *args: pass through args\n **kwargs: pass through kwargs\n Returns:\n Wrapped stream object method\n "
# line: 4257
self._io.seek(self._read_seek, self._read_whence)
# line: 4258
ret_value = io_attr(*args, **kwargs)
# line: 4259
self._read_seek = self._io.tell()
# line: 4260
self._read_whence = 0
# line: 4261
self._io.seek(0, 2)
# line: 4262
return ret_value
# line: 4264
return read_wrapper
# line: 4266
def _OtherWrapper(self, name, writing):
# line: 4274
'Wrap a stream attribute in an other_wrapper.\n\n Args:\n name: the name of the stream attribute to wrap.\n\n Returns:\n other_wrapper which is described below.\n '
# line: 4275
io_attr = getattr(self._io, name)
# line: 4277
def other_wrapper(*args, **kwargs):
# line: 4289
'Wrap all other calls to the stream Object.\n\n We do this to track changes to the write pointer. Anything that\n moves the write pointer in a file open for appending should move\n the read pointer as well.\n\n Args:\n *args: pass through args\n **kwargs: pass through kwargs\n Returns:\n Wrapped stream object method\n '
# line: 4290
write_seek = self._io.tell()
# line: 4291
ret_value = io_attr(*args, **kwargs)
# line: 4292
if (write_seek != self._io.tell()):
# line: 4293
self._read_seek = self._io.tell()
# line: 4294
self._read_whence = 0
# line: 4295
if ((not writing) or (sys.version_info >= (3,))):
# line: 4296
return ret_value
# line: 4298
return other_wrapper
# line: 4300
def _TruncateWrapper(self):
# line: 4305
'Wrap truncate() to allow flush after truncate.\n\n Returns:\n wrapper which is described below.\n '
# line: 4306
io_attr = getattr(self._io, 'truncate')
# line: 4308
def truncate_wrapper(*args, **kwargs):
# line: 4309
'Wrap truncate call to call flush after truncate.'
# line: 4310
if self._append:
# line: 4311
self._io.seek(self._read_seek, self._read_whence)
# line: 4312
size = io_attr(*args, **kwargs)
# line: 4313
self.flush()
# line: 4314
if (not self.is_stream):
# line: 4315
self._file_object.SetSize(size)
# line: 4316
buffer_size = len(self._io.getvalue())
# line: 4317
if (buffer_size < size):
# line: 4318
self._io.seek(buffer_size)
# line: 4319
self._io.write(('\x00' * (size - buffer_size)))
# line: 4320
self._file_object.SetContents(self._io.getvalue(), self._encoding)
# line: 4321
if (sys.version_info >= (3,)):
# line: 4322
return size
# line: 4324
return truncate_wrapper
# line: 4326
def _WriteWrapper(self, name):
# line: 4331
'Wrap write() to adapt return value for Python 2.\n\n Returns:\n wrapper which is described below.\n '
# line: 4332
io_attr = getattr(self._io, name)
# line: 4334
def write_wrapper(*args, **kwargs):
# line: 4335
'Wrap trunctae call to call flush after truncate.'
# line: 4336
ret_value = io_attr(*args, **kwargs)
# line: 4337
if (sys.version_info >= (3,)):
# line: 4338
return ret_value
# line: 4340
return write_wrapper
# line: 4342
def Size(self):
# line: 4343
'Return the content size in bytes of the wrapped file.'
# line: 4344
return self._file_object.st_size
# line: 4346
def __getattr__(self, name):
# line: 4347
if self._file_object.IsLargeFile():
# line: 4348
raise FakeLargeFileIoException(self._file_path)
# line: 4350
reading = (name.startswith('read') or (name == 'next'))
# line: 4351
truncate = (name == 'truncate')
# line: 4352
writing = (name.startswith('write') or truncate)
# line: 4353
if (reading or writing):
# line: 4354
self._check_open_file()
# line: 4355
if ((not self._read) and reading):
# line: 4356
def read_error(*args, **kwargs):
# line: 4357
'Throw an error unless the argument is zero.'
# line: 4358
if (args and (args[0] == 0)):
# line: 4359
if (self._filesystem.is_windows_fs and self.raw_io):
# line: 4360
return ('' if self._binary else u'')
# line: 4361
self._raise('File is not open for reading.')
# line: 4363
return read_error
# line: 4365
if ((not self.allow_update) and writing):
# line: 4366
def write_error(*args, **kwargs):
# line: 4367
'Throw an error.'
# line: 4368
if self.raw_io:
# line: 4369
if (self._filesystem.is_windows_fs and args and (len(args[0]) == 0)):
# line: 4370
return 0
# line: 4371
self._raise('File is not open for writing.')
# line: 4373
return write_error
# line: 4375
if reading:
# line: 4376
self._sync_io()
# line: 4377
self._flush_for_read()
# line: 4378
if truncate:
# line: 4379
return self._TruncateWrapper()
# line: 4380
if self._append:
# line: 4381
if reading:
# line: 4382
return self._ReadWrapper(name)
else:
# line: 4384
return self._OtherWrapper(name, writing)
# line: 4385
if writing:
# line: 4386
return self._WriteWrapper(name)
# line: 4388
return getattr(self._io, name)
# line: 4390
def _check_open_file(self):
# line: 4391
if ((not self.is_stream) and (not (self in self._filesystem.open_files))):
# line: 4392
raise ValueError('I/O operation on closed file')
# line: 4394
def __iter__(self):
# line: 4395
if (not self._read):
# line: 4396
self._raise('File is not open for reading')
# line: 4397
return self._io.__iter__()
# line: 4400
class FakeDirWrapper(object):
# line: 4402
'Wrapper for a FakeDirectory object to be used in open files list.\n '
# line: 4403
def __init__(self, file_object, file_path, filesystem):
# line: 4404
self._file_object = file_object
# line: 4405
self._file_path = file_path
# line: 4406
self._filesystem = filesystem
# line: 4407
self.filedes = None
# line: 4409
def GetObject(self):
# line: 4410
'Return the FakeFile object that is wrapped by the current instance.'
# line: 4411
return self._file_object
# line: 4413
def fileno(self):
# line: 4414
'Return the file descriptor of the file object.'
# line: 4415
return self.filedes
# line: 4417
def close(self):
# line: 4418
'Close the directory.'
# line: 4419
self._filesystem.CloseOpenFile(self.filedes)
# line: 4422
class FakeFileOpen(object):
# line: 4427
'Faked `file()` and `open()` function replacements.\n\n Returns FakeFile objects in a FakeFilesystem in place of the `file()`\n or `open()` function.\n '
# line: 4428
__name__ = 'FakeFileOpen'
# line: 4430
def __init__(self, filesystem, delete_on_close=False, use_io=False, raw_io=False):
# line: 4438
'init.\n\n Args:\n filesystem: FakeFilesystem used to provide file system information\n delete_on_close: optional boolean, deletes file on close()\n use_io: if True, the io.open() version is used (ignored for Python 3,\n where io.open() is an alias to open() )\n '
# line: 4439
self.filesystem = filesystem
# line: 4440
self._delete_on_close = delete_on_close
# line: 4441
self._use_io = (use_io or (sys.version_info >= (3, 0)) or (platform.python_implementation() == 'PyPy'))
# line: 4443
self.raw_io = raw_io
# line: 4445
def __call__(self, *args, **kwargs):
# line: 4446
'Redirects calls to file() or open() to appropriate method.'
# line: 4447
if self._use_io:
# line: 4448
return self.Call(*args, **kwargs)
else:
# line: 4450
return self._call_ver2(*args, **kwargs)
# line: 4452
def _call_ver2(self, file_path, mode='r', buffering=(-1), flags=None, open_modes=None):
# line: 4453
'Limits args of open() or file() for Python 2.x versions.'
# line: 4455
mode = (flags or mode)
# line: 4456
return self.Call(file_path, mode, buffering, open_modes=open_modes)
# line: 4458
def Call(self, file_, mode='r', buffering=(-1), encoding=None, errors=None, newline=None, closefd=True, opener=None, open_modes=None):
# line: 4484
"Return a file-like object with the contents of the target file object.\n\n Args:\n file_: path to target file or a file descriptor.\n mode: additional file modes. All r/w/a/x r+/w+/a+ modes are supported.\n 't', and 'U' are ignored, e.g., 'wU' is treated as 'w'. 'b' sets\n binary mode, no end of line translations in StringIO.\n buffering: ignored. (Used for signature compliance with __builtin__.open)\n encoding: the encoding used to encode unicode strings / decode bytes.\n New in pyfakefs 2.9.\n errors: ignored, this relates to encoding.\n newline: controls universal newlines, passed to stream object.\n closefd: if a file descriptor rather than file name is passed, and set\n to false, then the file descriptor is kept open when file is closed.\n opener: not supported.\n open_modes: Modes for opening files if called from low-level API\n\n Returns:\n a file-like object containing the contents of the target file.\n\n Raises:\n IOError: if the target object is a directory, the path is invalid or\n permission is denied.\n "
# line: 4485
orig_modes = mode
# line: 4487
binary = ((sys.version_info < (3, 0)) or ('b' in mode))
# line: 4489
mode = mode.replace('t', '').replace('b', '')
# line: 4490
mode = mode.replace('rU', 'r').replace('U', 'r')
# line: 4492
if (not self.raw_io):
# line: 4493
if (mode not in _OPEN_MODE_MAP):
# line: 4494
raise ValueError(('Invalid mode: %r' % orig_modes))
# line: 4495
open_modes = _OpenModes(*_OPEN_MODE_MAP[mode])
# line: 4497
file_object = None
# line: 4498
filedes = None
# line: 4500
if isinstance(file_, int):
# line: 4501
filedes = file_
# line: 4502
wrapper = self.filesystem.GetOpenFile(filedes)
# line: 4503
self._delete_on_close = wrapper.delete_on_close
# line: 4504
file_object = self.filesystem.GetOpenFile(filedes).GetObject()
# line: 4505
file_path = file_object.name
else:
# line: 4507
file_path = file_
# line: 4508
real_path = self.filesystem.ResolvePath(file_path, raw_io=self.raw_io)
# line: 4509
if self.filesystem.Exists(file_path):
# line: 4510
file_object = self.filesystem.GetObjectFromNormalizedPath(real_path)
# line: 4511
closefd = True
# line: 4513
error_class = (OSError if self.raw_io else IOError)
# line: 4514
if (open_modes.must_not_exist and (file_object or self.filesystem.IsLink(file_path))):
# line: 4515
raise error_class(errno.EEXIST, 'File exists', file_path)
# line: 4516
if file_object:
# line: 4517
if ((open_modes.can_read and (not (file_object.st_mode & PERM_READ))) or (open_modes.can_write and (not (file_object.st_mode & PERM_WRITE)))):
# line: 4519
raise error_class(errno.EACCES, 'Permission denied', file_path)
# line: 4520
if open_modes.can_write:
# line: 4521
if open_modes.truncate:
# line: 4522
file_object.SetContents('')
else:
# line: 4524
if open_modes.must_exist:
# line: 4525
raise error_class(errno.ENOENT, 'No such file or directory', file_path)
# line: 4526
file_object = self.filesystem.CreateFileInternally(real_path, create_missing_dirs=False, apply_umask=True, raw_io=self.raw_io)
# line: 4529
if stat.S_ISDIR(file_object.st_mode):
# line: 4530
if self.filesystem.is_windows_fs:
# line: 4531
raise OSError(errno.EPERM, 'Fake file object: is a directory', file_path)
else:
# line: 4533
raise error_class(errno.EISDIR, 'Fake file object: is a directory', file_path)
# line: 4537
file_object.opened_as = file_path
# line: 4539
fakefile = FakeFileWrapper(file_object, file_path, update=open_modes.can_write, read=open_modes.can_read, append=open_modes.append, delete_on_close=self._delete_on_close, filesystem=self.filesystem, newline=newline, binary=binary, closefd=closefd, encoding=encoding, errors=errors, raw_io=self.raw_io, use_io=self._use_io)
# line: 4553
if (filedes is not None):
# line: 4554
fakefile.filedes = filedes
# line: 4556
self.filesystem.open_files[filedes] = fakefile
else:
# line: 4558
fakefile.filedes = self.filesystem.AddOpenFile(fakefile)
# line: 4559
return fakefile
# line: 4562
def _RunDoctest():
# line: 4563
import doctest
# line: 4564
from pyfakefs import fake_filesystem
# line: 4565
return doctest.testmod(fake_filesystem)
# line: 4568
if (__name__ == '__main__'):
# line: 4569
_RunDoctest() | [
"[email protected]"
] | |
7429637b3d1eff61bac629cdaf2031d9d72d1832 | a29c6e83ae4f9010941d15c8fd4cfc67680bb054 | /ml/m15_randomSearch3.py | 676fef1a96a144265ebefcfd7f4e4e1df752e92b | [] | no_license | ym0179/bit_seoul | f1ff5faf4ae20fbc8c0e2ed10a005f8bd4b2c2b8 | 14d1fb2752312790c39898fc53a45c1cf427a4d1 | refs/heads/master | 2023-02-27T19:52:23.577540 | 2021-02-08T00:30:16 | 2021-02-08T00:30:16 | 311,265,926 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,570 | py | #Day12
#2020-11-24
# 당뇨병 데이터
# 모델 : RandomForestRegressor
import pandas as pd
from sklearn.datasets import load_diabetes
from sklearn.model_selection import train_test_split, KFold, cross_val_score, GridSearchCV, RandomizedSearchCV
from sklearn.metrics import accuracy_score, r2_score
import warnings
from sklearn.svm import LinearSVC, SVC
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
warnings.filterwarnings('ignore')
# 1. 데이터
x,y = load_diabetes(return_X_y=True)
# train-test split
x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=66, shuffle=True, train_size=0.8)
# 2. 모델
kfold = KFold(n_splits=5, shuffle=True)
params = [
{'n_estimators' : [300, 400, 500], #결정 트리의 개수, default=10, 많을 수록 좋은 성능이 나올 "수"도 있음 (시간이 오래걸림)
'max_depth' : [6, 8, 10], #트리의 깊이, default=None(완벽하게 클래스 값이 결정될 때 까지 분할), 깊이가 깊어지면 과적합될 수 있으므로 적절히 제어 필요
'min_samples_leaf' : [7, 10, 12, 14], #리프노드가 되기 위한 최소한의 샘플 데이터 수, default=2, min_samples_split과 함께 과적합 제어 용도
'min_samples_split' : [12, 14, 16], #노드를 분할하기 위한 최소한의 데이터 수, default=2, 과적합을 제어하는데 사용 (작게 설정할 수록 분할 노드가 많아져 과적합 가능성 증가)
'n_jobs' : [-1]} #모든 코어를 다 쓰겠다
]
model = RandomizedSearchCV(RandomForestRegressor(), params, cv=kfold, verbose=2)
# 3. 훈련
model.fit(x_train,y_train) #model: RandomizedSearchCV
# 4. 평가, 예측
print("최적의 매개변수 : ", model.best_estimator_)
print("최적 하이퍼 파라미터 : ", model.best_params_)
print("최고 정확도 : {0:.4f}".format(model.best_score_))
# RandomizedSearchCV refit으로 이미 학습이 된 estimator 반환
estimator = model.best_estimator_
y_predict = estimator.predict(x_test)
print("(테스트 데이터 세트 r2) 최종정답률 : ", r2_score(y_test,y_predict))
'''
최적의 매개변수 : RandomForestRegressor(max_depth=6, min_samples_leaf=12,
min_samples_split=12,
n_estimators=400, n_jobs=-1)
최적 하이퍼 파라미터 : {'n_jobs': -1, 'n_estimators': 400, 'min_samples_split': 12, 'min_samples_leaf': 12, 'max_depth': 6}
최고 정확도 : 0.4409
(테스트 데이터 세트 r2) 최종정답률 : 0.4142511040047415
''' | [
"[email protected]"
] | |
13264a103e66b2e23f72deff4819ceff21da6eca | 1eb2d7d2a6e945a9bc487afcbc51daefd9af02e6 | /eds/controller/__init__.py | deebe85d22d41b69dab825eb5fd53a7005d7ea4e | [] | no_license | fengges/eds | 11dc0fdc7a17b611af1f61894f497ad443439bfe | 635bcf015e3ec12e96949632c546d29fc99aee31 | refs/heads/master | 2021-06-20T04:43:02.019309 | 2019-06-20T12:55:26 | 2019-06-20T12:55:26 | 133,342,023 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,143 | py |
# author :feng
# time :2018/1/25
# function : 扫描所有文件,找出所有蓝图并注册
import os,sys
from flask import Blueprint
#------------扫描所有文件----------
bp_file=[]
def eachFile(filepath):
try :
pathDir = os.listdir(filepath)
except:
return
for allDir in pathDir:
# 忽略__开头的文件和文件夹
if allDir.startswith('__') :
continue
path=filepath+'/'+allDir
#如果是文件夹
if not os.path.isfile(path):
eachFile(path)
else:
map=[filepath,allDir]
bp_file.append(map)
eachFile(sys.path[0]+'/eds/controller')
#------------导入蓝图----------
bp_list=[]
for bp in bp_file:
dirs=bp[0].replace(sys.path[0]+'/','').replace('/','.')
if bp[1].find('.txt')>=0:
continue
name=bp[1].replace('.py','')
code="from "+dirs+" import "+name+" as a"
exec(code)
list=eval("dir(a)")
for l in list:
if l.startswith('__') :
continue
temp=eval('a.'+l)
if type(temp)==Blueprint:
bp_list.append(temp)
| [
"[email protected]"
] | |
6d118674593e7b3bc5b82d7ca42cd1c3846863ac | 1fac53ab13a9a682ecd926857ef565fa779afae4 | /pro/13.py | 0b8d62883eb73d45ca1280fea31b7038e7b8463f | [] | no_license | Shamabanu/python-1 | 339123ff4e7667d6331c207cb1c7ca3fc775dc48 | 4c1642679bb0bdd53a1d21e5421e04eb7abda65b | refs/heads/master | 2020-04-13T23:49:27.700807 | 2018-12-29T15:10:26 | 2018-12-29T15:10:26 | 163,516,492 | 1 | 0 | null | 2018-12-29T14:16:28 | 2018-12-29T14:16:28 | null | UTF-8 | Python | false | false | 308 | py | def min(l,s,e):
min=999
for i in range(s,e+1):
if min>l[i]:
min=l[i]
return min
def main():
n=int(input())
l=[]
for i in range(n):
l.append(int(input()))
q=int(input())
out=[]
for i in range(q):
s=int(input())
e=int(input())
out.append(min(l,s,e))
for i in out:
print(i)
p
| [
"[email protected]"
] | |
fc4eba5935059e6128b133cc2c060cdb972f9a15 | dcf2ab4e1a66d53eaaa1260a8824882fbd0bbd9f | /bonus of salary.py | f4ca9a0bd3e168131d1e460d06d77175392a50b1 | [] | no_license | gauriindalkar/if-else | a54189ef8e6368be5cc6b8a9fdafa6e9497843be | fac48790ffb9f6d021eff921a80ff57e399a4f77 | refs/heads/main | 2023-05-11T05:59:13.275673 | 2021-05-29T17:24:19 | 2021-05-29T17:24:19 | 371,955,342 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 153 | py | salary=int(input("enter salary"))
years=int(input("enter year of salary"))
if years>5%100:
print("bonus is",5/100*salary)
else:
print("no bonus") | [
"[email protected]"
] | |
b1d060eda455ff0f11063c05df77ac25e88142cf | 80694fba85a4096e9069d836ab71917d5f8b3540 | /rpc_less1/__init__.py | fc1860f538e1d6f1da72aa51a8c431fc4e400142 | [] | no_license | gitletian/zerorpc_test | dad732fd3d9086bbf4200e2d6b790afb377bb685 | 7f11a62dee1ea71cf1ed743c7bc17a3397a806c8 | refs/heads/master | 2021-06-23T18:25:37.210949 | 2019-07-09T07:59:39 | 2019-07-09T07:59:39 | 147,283,404 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 117 | py | # coding: utf-8
# __author__: ""
# like https://github.com/0rpc/zerorpc-python/blob/master/tests/test_middleware.py
| [
"[email protected]"
] | |
a42aa2ddf7f80efdf7902f6b7dd7298a8d9b657a | 23b333449524887594530f73c0079ce60cb8eefb | /python_module/examples/279_Perfect_Squares.py | 2b38c220680fe826fc33b7042145e52e6e043b5d | [] | no_license | benbendaisy/CommunicationCodes | 9deb371095f5d67e260030d3d8abf211c90e7642 | 444cc502ef26810b46115797f2e26ab305a4ebdf | refs/heads/master | 2023-08-09T21:46:58.691987 | 2023-07-20T05:11:39 | 2023-07-20T05:11:39 | 27,856,438 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,495 | py | import math
from cmath import sqrt
from functools import lru_cache
class Solution:
"""
Given an integer n, return the least number of perfect square numbers that sum to n.
A perfect square is an integer that is the square of an integer; in other words, it is the product of some integer with itself. For example, 1, 4, 9, and 16 are perfect squares while 3 and 11 are not.
Example 1:
Input: n = 12
Output: 3
Explanation: 12 = 4 + 4 + 4.
Example 2:
Input: n = 13
Output: 2
Explanation: 13 = 4 + 9.
Constraints:
1 <= n <= 104
"""
def numSquares(self, n: int) -> int:
square_nums = [i ** 2 for i in range(1, int(sqrt(n)) + 1)]
dp = [math.inf] * (n + 1)
dp[0] = 0
for i in range(1, n + 1):
for square in square_nums:
if i < square:
break
dp[i] = min(dp[i], dp[i - square] + 1)
return dp[n]
def numSquares1(self, n: int) -> int:
square_nums = [i ** 2 for i in range(1, int(sqrt(n)) + 1)]
@lru_cache(None)
def squares(k: int) -> int:
if k in square_nums:
return 1
min_square = math.inf
for square in square_nums:
if k < square:
break
min_square = min(min_square, squares(k - square) + 1)
return min_square
return squares(n) | [
"[email protected]"
] | |
855c6aaf9eed566e4170ab64ac60019fbc1e0d0a | 76fc4ffc931ce83cfdfc9846435d92f1f217af26 | /jmbo/migrations/0004_photosize_name_length.py | 00a8457f321175fcf2da3750bbc25a86dd0be0f3 | [] | no_license | praekelt/jmbo | 70d2a4c8c19ffcc0b10ed1b915c05a6453ecb3de | b674c14c6611191643870a070ca8c9f229776776 | refs/heads/develop | 2020-04-06T06:10:09.524522 | 2018-06-08T10:52:42 | 2018-06-08T10:52:42 | 1,899,978 | 4 | 6 | null | 2018-06-08T10:52:43 | 2011-06-15T12:29:27 | Python | UTF-8 | Python | false | false | 1,126 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-30 12:47
from __future__ import unicode_literals
from django.core.validators import RegexValidator
from django.db import migrations, models
from django.utils import timezone
# We're modifying a field from another app. This requires trickery.
def fix_app_label(apps, schema_editor):
migrations.recorder.MigrationRecorder.Migration.objects.create(
app='jmbo', name='0004_photosize_name_length',
applied=timezone.now()
)
class Migration(migrations.Migration):
dependencies = [
('jmbo', '0003_auto_20160530_1247'),
]
operations = [
migrations.AlterField(
model_name='photosize',
name='name',
field=models.CharField(unique=True, max_length=255, validators=[RegexValidator(regex='^[a-z0-9_]+$', message='Use only plain lowercase letters (ASCII), numbers and underscores.')]),
),
migrations.RunPython(fix_app_label)
]
def __init__(self, *args, **kwargs):
super(Migration, self).__init__(*args, **kwargs)
self.app_label = 'photologue'
| [
"[email protected]"
] | |
4220343014eaae108a86cbbe3bc8fb5c6a51211b | f80255edf86a11ca237fd123485cc6af8e121fb6 | /build/lib/woe/eval.py | eafd44d7944da70efb52849d66d65167a5ba44a1 | [
"MIT"
] | permissive | AllenChai/woe | f0d04bf93a590113798f9085f95ecfb80fcd8ae3 | 421cb8ef10627ecf667c625a89e3d25fc0ac7b68 | refs/heads/master | 2021-07-15T16:13:48.955926 | 2017-10-12T10:04:50 | 2017-10-12T10:04:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,302 | py | # -*- coding:utf-8 -*-
__author__ = 'boredbird'
import pandas as pd
import numpy as np
import scipy
import matplotlib.pyplot as plt
def eval_feature_detail(Info_Value_list,out_path=False):
"""
format InfoValue list to Dataframe
:param Info_Value_list: Instance list of Class InfoValue
:param out_path:specify the Dataframe to csv file path ,default False
:return:DataFrame about feature detail
"""
rst = Info_Value_list
format_rst = []
for kk in range(0,len(rst)):
print rst[kk].var_name
split_list = []
if rst[kk].split_list != []:
if not rst[kk].is_discrete:
#deal with split_list
split_list.append('(-INF,'+str(rst[kk].split_list[0])+']')
for i in range(0,len(rst[kk].split_list)-1):
split_list.append('(' + str(rst[kk].split_list[i])+','+ str(rst[kk].split_list[i+1]) + ']')
split_list.append('(' + str(rst[kk].split_list[len(rst[kk].split_list)-1]) + ',+INF)')
else:
split_list = rst[kk].split_list
else:
split_list.append('(-INF,+INF)')
# merge into dataframe
columns = ['var_name','split_list','sub_total_sample_num','positive_sample_num'
,'negative_sample_num','sub_total_num_percentage','positive_rate_in_sub_total'
,'woe_list','iv_list','iv']
rowcnt = len(rst[kk].iv_list)
if rowcnt < len(split_list):
split_list = split_list[:rowcnt]
var_name = [rst[kk].var_name] * rowcnt
iv = [rst[kk].iv] * rowcnt
iv_list = rst[kk].iv_list
woe_list = rst[kk].woe_list
a = pd.DataFrame({'var_name':var_name,'iv_list':iv_list,'woe_list':woe_list
,'split_list':split_list,'iv':iv,'sub_total_sample_num':rst[kk].sub_total_sample_num
,'positive_sample_num':rst[kk].positive_sample_num,'negative_sample_num':rst[kk].negative_sample_num
,'sub_total_num_percentage':rst[kk].sub_total_num_percentage
,'positive_rate_in_sub_total':rst[kk].positive_rate_in_sub_total
,'negative_rate_in_sub_total':rst[kk].negative_rate_in_sub_total},columns=columns)
format_rst.append(a)
# merge dataframe list into one dataframe vertically
cformat_rst = pd.concat(format_rst)
if out_path:
file_name = out_path if isinstance(out_path, str) else None
cformat_rst.to_csv(file_name, index=False)
return cformat_rst
def eval_data_summary(df_list,source_name,out_path=False):
'''
:param df_list: A dataset DataFrame
:param source_name: string type
:param out_path: specify the Dataframe to csv file path ,default False
:return: DataFrame about dataset summary info
'''
train_validation_data_summary = []
for i in range(len(source_name)):
a = dict()
a['source'] = source_name[i]
a['total_sample_cnt'] = len(df_list[i])
a['positive_sample_cnt'] = df_list[i]['target'].sum()
a['negative_sample_cnt'] = a['total_sample_cnt'] - a['positive_sample_cnt']
a['positive_rate'] = a['positive_sample_cnt']*1.0/a['total_sample_cnt']
train_validation_data_summary.append(a)
train_validation_data_summary = pd.DataFrame(train_validation_data_summary)
if out_path:
file_name = out_path if isinstance(out_path, str) else None
train_validation_data_summary.to_csv(file_name, index=False)
return train_validation_data_summary
def eval_model_summary(list_dict,out_path=False):
'''
:param list_dict: a list of dict
:param out_path: specify the Dataframe to csv file path ,default False
:return: DataFrame about model summary info
'''
model_summary = pd.DataFrame([list_dict[0]])
if len(list_dict)>1:
for i in range(len(list_dict)-1):
b = pd.DataFrame([list_dict[i+1]])
model_summary = pd.merge(model_summary, b, how='outer')
if out_path:
file_name = out_path if isinstance(out_path, str) else None
model_summary.to_csv(file_name, index=False)
return model_summary
def wald_test(model,X):
'''
:param model: a model file that should have predict_proba() function
:param X: dataset features DataFrame
:return: the value of wald_stats,p_value
'''
pred_probs = np.matrix(model.predict_proba(X))
X_design = np.hstack((np.ones(shape=(X.shape[0], 1)), X))
diag_array = np.multiply(pred_probs[:, 0], pred_probs[:, 1]).A1
V = scipy.sparse.diags(diag_array)
m1 = X_design.T * V
m2 = m1.dot(X_design)
cov_mat = np.linalg.inv(m2)
model_params = np.hstack((model.intercept_[0], model.coef_[0]))
wald_stats = (model_params / np.sqrt(np.diag(cov_mat))) ** 2
wald = scipy.stats.wald()
p_value = wald.pdf(wald_stats)
return wald_stats,p_value
def eval_feature_summary(train_X,model,civ_list,candidate_var_list,out_path=False):
'''
:param train_X: training dataset features DataFrame
:param model: model file
:param civ_list: list of InfoValue Class instances
:param candidate_var_list: the list of model input variable
:param out_path: specify the Dataframe to csv file path ,default False
:return: DataFrame about feature summary
'''
feature_summary = {}
feature_summary['feature_name'] = list(['Intercept'])
feature_summary['feature_name'].extend(list(candidate_var_list))
feature_summary['coef'] = [model.intercept_]
feature_summary['coef'].extend(model.coef_[0])
var_name = [civ.var_name for civ in civ_list]
feature_summary['iv'] = [0]
feature_summary['iv'].extend([civ_list[var_name.index(var)].iv for var in candidate_var_list])
feature_summary['wald_stats'], feature_summary['p_value'] = wald_test(model, train_X)
feature_summary = pd.DataFrame(feature_summary)
if out_path:
file_name = out_path if isinstance(out_path, str) else None
feature_summary.to_csv(file_name, index=False)
return feature_summary
def eval_segment_metrics(target, predict_proba, segment_cnt = 20,out_path=False):
'''
:param target: the list of actual target value
:param predict_proba: the list of predicted probability
:param segment_cnt: the segment number
:param out_path: specify the Dataframe to csv file path ,default False
:return: DataFrame about segment metrics
'''
proba_descend_idx = np.argsort(predict_proba)
proba_descend_idx = proba_descend_idx[::-1]
grp_idx = 1
start_idx = 0
total_sample_cnt = len(predict_proba)
total_positive_sample_cnt = target.sum()
total_negative_sample_cnt = total_sample_cnt - total_positive_sample_cnt
segment_sample_cnt = int(len(predict_proba) / segment_cnt)
cumulative_sample_percentage = 0.0
cumulative_positive_percentage = 0.0
cumulative_negative_percentage = 0.0
segment_list = []
columns = ['grp_idx', 'segment_sample_cnt', 'segment_sample_percentage', 'cumulative_sample_percentage',
'in_segment_positive_percentage', 'positive_percentage_in_total', 'cumulative_positive_percentage',
'cumulative_negative_percentage', 'ks']
while start_idx < total_sample_cnt:
s = {}
s['grp_idx'] = grp_idx
segment_idx_list = proba_descend_idx[start_idx : start_idx + segment_sample_cnt]
segment_target = target[segment_idx_list]
segment_sample_cnt = len(segment_idx_list)
s['segment_sample_cnt'] = segment_sample_cnt
segment_pos_cnt = segment_target.sum()
segment_neg_cnt = segment_sample_cnt - segment_pos_cnt
segment_sample_percentage = segment_sample_cnt*1.0/total_sample_cnt
s['segment_sample_percentage'] = segment_sample_percentage
pos_percentage_in_total = float(segment_pos_cnt * 100) / total_positive_sample_cnt
neg_percentage_in_total = float(segment_neg_cnt * 100) / total_negative_sample_cnt
s['positive_percentage_in_total'] = pos_percentage_in_total
in_segment_positive_percentage = float(segment_pos_cnt) / segment_sample_cnt
s['in_segment_positive_percentage'] = in_segment_positive_percentage
cumulative_sample_percentage += segment_sample_percentage
s['cumulative_sample_percentage'] = cumulative_sample_percentage
cumulative_positive_percentage += pos_percentage_in_total
cumulative_negative_percentage += neg_percentage_in_total
s['cumulative_positive_percentage'] = cumulative_positive_percentage
s['cumulative_negative_percentage'] = cumulative_negative_percentage
ks = cumulative_positive_percentage - cumulative_negative_percentage
s['ks'] = ks
segment_list.append(s)
grp_idx += 1
start_idx += segment_sample_cnt
segment_list = pd.DataFrame(segment_list,columns=columns)
if out_path:
file_name = out_path if isinstance(out_path, str) else None
segment_list.to_csv(file_name, index=False)
return segment_list
def eval_model_stability(proba_train, proba_validation, segment_cnt = 10,out_path=False):
'''
:param proba_train: the list of predicted probability on training dataset
:param proba_validation: the list of predicted probability on validation dataset
:param segment_cnt: the segment number
:param out_path: specify the Dataframe to csv file path ,default False
:return: DataFrame about model stability
'''
step = 1.0/segment_cnt
flag = 0.0
model_stability = []
len_train = len(proba_train)
len_validation = len(proba_validation)
columns = ['score_range','segment_train_percentage','segment_validation_percentage','difference',
'variance','ln_variance','stability_index']
while flag < 1.0:
temp = {}
score_range = '['+str(flag)+','+str(flag + step)+')'
segment_train_cnt = proba_train[(proba_train >= flag) & (proba_train < flag + step)].count()
segment_train_percentage = segment_train_cnt*1.0/len_train
segment_validation_cnt = proba_validation[(proba_validation >= flag) & (proba_validation < flag + step)].count()
segment_validation_percentage = segment_validation_cnt * 1.0 / len_validation
difference = segment_validation_percentage - segment_train_percentage
variance = float(segment_validation_percentage)/segment_train_percentage
ln_variance = variance
stability_index = difference * ln_variance
temp['score_range'] = score_range
temp['segment_train_percentage'] = segment_train_percentage
temp['segment_validation_percentage'] = segment_validation_percentage
temp['difference'] = difference
temp['variance'] = variance
temp['ln_variance'] = ln_variance
temp['stability_index'] = stability_index
model_stability.append(temp)
flag += step
model_stability = pd.DataFrame(model_stability,columns=columns)
if out_path:
file_name = out_path if isinstance(out_path, str) else None
model_stability.to_csv(file_name, index=False)
return model_stability
def eval_feature_stability(civ_list, df_train, df_validation,candidate_var_list,out_path=False):
'''
:param civ_list: List of InfoValue Class instances
:param df_train: DataFrame of training dataset
:param df_validation: DataFrame of validation dataset
:param candidate_var_list: the list of model input variable
:param out_path: specify the Dataframe to csv file path ,default False
:return: DataFrame about features stability
'''
psi_dict = {}
civ_var_list = [civ_list[i].var_name for i in range(len(civ_list))]
intersection = list(set(civ_var_list).intersection(set(candidate_var_list)))
civ_idx_list = [civ_var_list.index(var) for var in intersection]
len_train = len(df_train)
len_validation = len(df_validation)
psi_dict['feature_name'] = []
psi_dict['group'] = []
psi_dict['segment_train_cnt'] = []
psi_dict['segment_train_percentage'] = []
psi_dict['segment_validation_cnt'] = []
psi_dict['segment_validation_percentage'] = []
for i in civ_idx_list:
if civ_list[i].is_discrete:
for j in range(len(civ_list[i].split_list)):
psi_dict['feature_name'].append(civ_list[i].var_name)
psi_dict['group'].append(civ_list[i].split_list[j])
civ_split_list = civ_list[i].split_list[j]
segment_train_cnt = 0
for m in civ_split_list:
segment_train_cnt += df_train[civ_list[i].var_name][df_train[civ_list[i].var_name] == m].count()
psi_dict['segment_train_cnt'].append(segment_train_cnt)
psi_dict['segment_train_percentage'].append(float(segment_train_cnt)/len_train)
segment_validation_cnt = 0
for m in civ_split_list:
segment_validation_cnt += df_validation[civ_list[i].var_name][df_validation[civ_list[i].var_name] == m].count()
psi_dict['segment_validation_cnt'].append(segment_validation_cnt)
psi_dict['segment_validation_percentage'].append(float(segment_validation_cnt)/len_validation)
else:
split_list = []
split_list.append(float("-inf"))
split_list.extend([temp for temp in civ_list[i].split_list])
split_list.append(float("inf"))
var_name = civ_list[i].var_name
for j in range(len(split_list)-3):
psi_dict['feature_name'].append(civ_list[i].var_name)
psi_dict['group'].append('('+str(split_list[j])+','+str(split_list[j+1])+']')
segment_train_cnt = df_train[var_name][(df_train[var_name] > split_list[j])&(df_train[var_name] <= split_list[j+1])].count()
psi_dict['segment_train_cnt'].append(segment_train_cnt)
psi_dict['segment_train_percentage'].append(float(segment_train_cnt)/len_train)
segment_validation_cnt = df_validation[var_name][(df_validation[var_name] > split_list[j])&
(df_validation[var_name] <= split_list[j+1])].count()
psi_dict['segment_validation_cnt'].append(segment_validation_cnt)
psi_dict['segment_validation_percentage'].append(float(segment_validation_cnt)/len_validation)
psi_dict['feature_name'].append(var_name)
psi_dict['group'].append('(' + str(split_list[len(split_list)-2]) + ',+INF)')
segment_train_cnt = df_train[var_name][df_train[var_name] > split_list[len(split_list)-1]].count()
psi_dict['segment_train_cnt'].append(segment_train_cnt)
psi_dict['segment_train_percentage'].append(float(segment_train_cnt) / len_train)
segment_validation_cnt = df_validation[var_name][df_validation[var_name] > split_list[len(split_list)-1]].count()
psi_dict['segment_validation_cnt'].append(segment_validation_cnt)
psi_dict['segment_validation_percentage'].append(float(segment_validation_cnt) / len_validation)
psi_dict['difference'] = pd.Series(psi_dict['segment_validation_percentage']) - pd.Series(psi_dict['segment_train_percentage'])
psi_dict['variance'] = map(lambda (x, y): x / (y+0.0000001), zip(psi_dict['segment_validation_percentage'], psi_dict['segment_train_percentage']))
psi_dict['Ln(variance)'] = np.log(psi_dict['variance'])
psi_dict['stability_index'] = np.dot(psi_dict['difference'],psi_dict['Ln(variance)'])
columns = ['feature_name','group','segment_train_cnt','segment_train_percentage',
'segment_validation_cnt','segment_validation_percentage','difference',
'variance','Ln(variance)','stability_index']
psi_df = pd.DataFrame(psi_dict, columns=columns)
if out_path:
file_name = out_path if isinstance(out_path, str) else None
psi_df.to_csv(file_name, index=False)
return psi_df
def plot_ks(pos_percent, neg_percent, file_name=None):
'''
pos_percent: 1-d array, cumulative positive sample percentage
neg_percent: 1-d array, cumulative negative sample percentage
return:None
'''
plt.plot(pos_percent, 'ro-', label="positive")
plt.plot(neg_percent, 'go-', label="negative")
plt.grid(True)
plt.legend(loc=0)
plt.xlabel("population")
plt.ylabel("cumulative percentage")
if file_name is not None:
plt.savefig(file_name)
else:
plt.show()
plt.close()
def compute_ks_gini(target, predict_proba, segment_cnt=100, plot=False):
'''
target: numpy array of shape (1,)
predict_proba: numpy array of shape (1,), predicted probability of the sample being positive
segment_cnt: segment count for computing KS score, more segments result in more accurate estimate of KS score, default is 100
plot: boolean or string, whether to draw the KS plot, save to a file if plot is string of the file name
returns:
gini: float, gini score estimation
ks: float, ks score estimation
'''
proba_descend_idx = np.argsort(predict_proba)
proba_descend_idx = proba_descend_idx[::-1]
one_segment_sample_num = int(len(predict_proba) / segment_cnt)
grp_idx = 1
start_idx = 0
total_sample_cnt = len(predict_proba)
total_positive_sample_cnt = target.sum()
total_negative_sample_cnt = total_sample_cnt - total_positive_sample_cnt
cumulative_positive_percentage = 0.0
cumulative_negative_percentage = 0.0
cumulative_random_positive_percentage = 0.0
random_positive_percentage_step = 100.0 / segment_cnt
ks_pos_percent_list = list()
ks_neg_percent_list = list()
ks_score = 0.0
gini_a_area = 0.0
while start_idx < total_sample_cnt:
segment_idx_list = proba_descend_idx[start_idx: start_idx + one_segment_sample_num]
segment_target = target[segment_idx_list]
segment_sample_cnt = len(segment_idx_list)
segment_pos_cnt = segment_target.sum()
segment_neg_cnt = segment_sample_cnt - segment_pos_cnt
pos_percentage_in_total = float(segment_pos_cnt * 100) / total_positive_sample_cnt
neg_percentage_in_total = float(segment_neg_cnt * 100) / total_negative_sample_cnt
cumulative_positive_percentage += pos_percentage_in_total
cumulative_negative_percentage += neg_percentage_in_total
ks = cumulative_positive_percentage - cumulative_negative_percentage
ks_score = max(ks_score, ks)
cumulative_random_positive_percentage += random_positive_percentage_step
gini_a_area += (cumulative_positive_percentage - cumulative_random_positive_percentage) * (1.0 / segment_cnt)
ks_pos_percent_list.append(cumulative_positive_percentage)
ks_neg_percent_list.append(cumulative_negative_percentage)
grp_idx += 1
start_idx += one_segment_sample_num
gini_score = gini_a_area * 2
if plot:
file_name = plot if isinstance(plot, str) else None
plot_ks(ks_pos_percent_list, ks_neg_percent_list, file_name)
return (gini_score, ks_score)
| [
"[email protected]"
] | |
d7870a40f1c0cc50e7e98e6853dcf6f4cf63878b | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/225/users/3995/codes/1672_1950.py | d9036258f64b368da9802335bbf86f788d624ec8 | [] | no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | t=float(input("temperatura:"))
v=float(input("velocidade:"))
o=13.12+(0.6215*t)-(11.37*(v**0.16))+(0.3965*t*(v**0.16))
if((t>=-50 and t<10)and(v>=4.8)):
print(round(o, 4))
elif(v<4.8):
print("Velocidade invalida")
else:
print("Temperatura invalida") | [
"[email protected]"
] | |
c9e3a5a2870c9edd52b272704f1a5fe5d524c02d | 8a25ada37271acd5ea96d4a4e4e57f81bec221ac | /usr/share/pyshared/ajenti/plugins/fm/fm.py | aecbcde1a5101d2dbf7369b8b8633ab88fc1226e | [
"Apache-2.0"
] | permissive | lupyuen/RaspberryPiImage | 65cebead6a480c772ed7f0c4d0d4e08572860f08 | 664e8a74b4628d710feab5582ef59b344b9ffddd | refs/heads/master | 2021-01-20T02:12:27.897902 | 2016-11-17T17:32:30 | 2016-11-17T17:32:30 | 42,438,362 | 7 | 8 | null | null | null | null | UTF-8 | Python | false | false | 10,685 | py | import gevent
import grp
import logging
import os
import pwd
from ajenti.api import *
from ajenti.api.http import *
from ajenti.plugins.configurator.api import ClassConfigEditor
from ajenti.plugins.main.api import SectionPlugin, intent
from ajenti.ui import on
from ajenti.ui.binder import Binder
from backend import FMBackend, Item, Unpacker
@plugin
class FileManagerConfigEditor (ClassConfigEditor):
title = _('File Manager')
icon = 'folder-open'
def init(self):
self.append(self.ui.inflate('fm:config'))
@plugin
class FileManager (SectionPlugin):
default_classconfig = {'root': '/', 'start': '/'}
classconfig_editor = FileManagerConfigEditor
classconfig_root = True
def init(self):
self.title = _('File Manager')
self.category = _('Tools')
self.icon = 'folder-open'
self.backend = FMBackend().get()
self.append(self.ui.inflate('fm:main'))
self.controller = Controller()
def post_item_bind(object, collection, item, ui):
ui.find('name').on('click', self.on_item_click, object, item)
ui.find('edit').on('click', self.edit, item.fullpath)
self.find('items').post_item_bind = post_item_bind
def post_bc_bind(object, collection, item, ui):
ui.find('name').on('click', self.on_bc_click, object, item)
self.find('breadcrumbs').post_item_bind = post_bc_bind
self.clipboard = []
self.tabs = self.find('tabs')
def on_first_page_load(self):
self.new_tab()
self.binder = Binder(self.controller, self.find('filemanager')).populate()
self.binder_c = Binder(self, self.find('bind-clipboard')).populate()
def on_page_load(self):
self.refresh()
def refresh_clipboard(self):
self.binder_c.setup().populate()
@on('tabs', 'switch')
def on_tab_switch(self):
if self.tabs.active == (len(self.controller.tabs) - 1):
self.new_tab()
self.refresh()
@intent('fm:browse')
def new_tab(self, path=None):
dir = path or self.classconfig.get('start', None) or '/'
if not dir.startswith(self.classconfig['root']):
dir = self.classconfig['root']
self.controller.new_tab(dir)
if not self.active:
self.activate()
@on('close', 'click')
def on_tab_close(self):
if len(self.controller.tabs) > 2:
self.controller.tabs.pop(self.tabs.active)
self.tabs.active = 0
self.refresh()
@on('new-file', 'click')
def on_new_file(self):
destination = self.controller.tabs[self.tabs.active].path
logging.info('[fm] new file in %s' % destination)
path = os.path.join(destination, 'new file')
try:
open(path, 'w').close()
self._chown_new(path)
except OSError as e:
self.context.notify('error', str(e))
self.refresh()
@on('new-dir', 'click')
def on_new_directory(self):
destination = self.controller.tabs[self.tabs.active].path
logging.info('[fm] new directory in %s' % destination)
path = os.path.join(destination, 'new directory')
if not os.path.exists(path):
try:
os.mkdir(path)
os.chmod(path, 0755)
self._chown_new(path)
except OSError as e:
self.context.notify('error', str(e))
self.refresh()
def _chown_new(self, path):
uid = self.classconfig.get('new_owner', 'root') or 'root'
gid = self.classconfig.get('new_group', 'root') or 'root'
try:
uid = int(uid)
except:
uid = pwd.getpwnam(uid)[2]
try:
gid = int(gid)
except:
gid = grp.getgrnam(gid)[2]
os.chown(path, uid, gid)
def upload(self, name, file):
destination = self.controller.tabs[self.tabs.active].path
logging.info('[fm] uploading %s to %s' % (name, destination))
try:
output = open(os.path.join(destination, name), 'w')
while True:
data = file.read(1024 * 1024)
if not data:
break
gevent.sleep(0)
output.write(data)
output.close()
except OSError as e:
self.context.notify('error', str(e))
self.refresh()
@on('mass-cut', 'click')
def on_cut(self):
l = self._get_checked()
for i in l:
i.action = 'cut'
self.clipboard += l
self.refresh_clipboard()
@on('mass-copy', 'click')
def on_copy(self):
l = self._get_checked()
for i in l:
i.action = 'copy'
self.clipboard += l
self.refresh_clipboard()
@on('mass-delete', 'click')
def on_delete(self):
def callback(task):
self.context.notify('info', _('Files deleted'))
self.refresh()
self.backend.remove(self._get_checked(), cb=callback)
@on('paste', 'click')
def on_paste(self):
tab = self.controller.tabs[self.tabs.active]
for_move = []
for_copy = []
for i in self.clipboard:
if i.action == 'cut':
for_move.append(i)
else:
for_copy.append(i)
try:
if for_move:
def callback(task):
self.context.notify('info', _('Files moved'))
self.refresh()
self.backend.move(for_move, tab.path, callback)
if for_copy:
def callback(task):
self.context.notify('info', _('Files copied'))
self.refresh()
self.backend.copy(for_copy, tab.path, callback)
self.clipboard = []
except Exception as e:
self.context.notify('error', str(e))
self.refresh_clipboard()
@on('select-all', 'click')
def on_select_all(self):
self.binder.update()
tab = self.controller.tabs[self.tabs.active]
for item in tab.items:
item.checked = not item.checked
self.binder.populate()
self.context.notify('info', _('Selected %i items') % len(tab.items))
def _get_checked(self):
self.binder.update()
tab = self.controller.tabs[self.tabs.active]
r = []
for item in tab.items:
if item.checked:
r.append(item)
item.checked = False
self.refresh()
return r
@on('clear-clipboard', 'click')
def on_clear_clipboard(self):
self.clipboard = []
self.refresh_clipboard()
def on_item_click(self, tab, item):
path = os.path.join(tab.path, item.name)
if not os.path.isdir(path):
self.edit(path)
if not path.startswith(self.classconfig['root']):
return
tab.navigate(path)
self.refresh()
def edit(self, path):
self.find('dialog').visible = True
self.item = Item(path)
self.item.read()
self.binder_d = Binder(self.item, self.find('dialog')).populate()
# Unpack
u = Unpacker.find(self.item.fullpath.lower())
unpack_btn = self.find('dialog').find('unpack')
unpack_btn.visible = u is not None
def cb():
self.context.notify('info', _('Unpacked'))
self.refresh()
def unpack():
u.unpack(self.item.fullpath, cb=cb)
logging.info('[fm] unpacking %s' % self.item.fullpath)
unpack_btn.on('click', lambda: unpack())
# Edit
edit_btn = self.find('dialog').find('edit')
if self.item.size > 1024 * 1024 * 5:
edit_btn.visible = False
def edit():
self.context.launch('notepad', path=self.item.fullpath)
edit_btn.on('click', lambda: edit())
@on('dialog', 'button')
def on_close_dialog(self, button):
self.find('dialog').visible = False
if button == 'save':
self.binder_d.update()
try:
self.item.write()
except Exception as e:
self.context.notify('error', str(e))
self.refresh()
if self.find('chmod-recursive').value:
cmd = 'chown -Rv "%s:%s" "%s"; chmod -Rv %o "%s"' % (
self.item.owner, self.item.group,
self.item.fullpath,
self.item.mode,
self.item.fullpath,
)
self.context.launch('terminal', command=cmd)
logging.info('[fm] modifying %s: %o %s:%s' % (self.item.fullpath, self.item.mode, self.item.owner, self.item.group))
def on_bc_click(self, tab, item):
if not item.path.startswith(self.classconfig['root']):
return
tab.navigate(item.path)
self.refresh()
def refresh(self, _=None):
for tab in self.controller.tabs:
tab.refresh()
self.binder.populate()
@plugin
class UploadReceiver (HttpPlugin):
@url('/ajenti:fm-upload')
def handle_upload(self, context):
file = context.query['file']
context.session.endpoint.get_section(FileManager).upload(file.filename, file.file)
context.respond_ok()
return 'OK'
class Controller (object):
def __init__(self):
self.tabs = []
def new_tab(self, path='/'):
if len(self.tabs) > 1:
self.tabs.pop(-1)
self.tabs.append(Tab(path))
self.tabs.append(Tab(None))
class Tab (object):
def __init__(self, path):
if path:
self.navigate(path)
else:
self.shortpath = '+'
self.path = None
def refresh(self):
if self.path:
self.navigate(self.path)
def navigate(self, path):
if not os.path.isdir(path):
return
self.path = path
self.shortpath = os.path.split(path)[1] or '/'
self.items = []
for item in os.listdir(unicode(self.path)):
itempath = os.path.join(self.path, item)
if os.path.exists(itempath):
self.items.append(Item(itempath))
self.items = sorted(self.items, key=lambda x: (not x.isdir, x.name))
self.breadcrumbs = []
p = path
while len(p) > 1:
p = os.path.split(p)[0]
self.breadcrumbs.insert(0, Breadcrumb(p))
class Breadcrumb (object):
def __init__(self, path):
self.name = os.path.split(path)[1]
self.path = path
if self.path == '/':
self.name = '/'
| [
"[email protected]"
] | |
ebf0411cf8fd15124d51fefb1d9fce1ff401d78a | 4ca07649f61d70b8803481e89f0f35a3ad7f7f4c | /Jamie/env/bin/pyrsa-priv2pub | 9f127f79577a4e19d9b15f291939113e553dddc1 | [] | no_license | coralisland-git/web-spider-cloud | b5ab74ea4b2188dd18281618ecaf3337163bd4d1 | 1da2aca0c28e6e08db1978939007706fdf60779e | refs/heads/master | 2021-04-09T15:47:18.220441 | 2018-06-01T16:09:55 | 2018-06-01T16:09:55 | 125,665,826 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 303 | #!/media/apple/f50223eb-8502-4183-93ad-3e6be8fce2e0/work/scrapy/Jamie/env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from rsa.util import private_to_public
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(private_to_public())
| [
"[email protected]"
] | ||
241fa6b221f9014d153bddf461f56f124deed35d | bf0d7c8d987d5fda14208eb9ce70e31c83c25c25 | /c-ex4/viz_hidden.py | 77f2b6e5a6e6b235b4949916dff588d425775ba8 | [] | no_license | SummerBigData/SamRepo | 7876e9393c7175e300e175a60c17633c3b23a1bb | fd84ad654370faa48c084349952c2921fde4032d | refs/heads/master | 2020-03-18T05:09:08.787956 | 2018-06-18T17:11:49 | 2018-06-18T17:11:49 | 134,327,137 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 824 | py | import matplotlib.pyplot as plt
import numpy as np
from nn_util import *
n = 400
k = 10
s = [n, 25, k]
L = len(s)
sizes = [(s[i+1], s[i]+1) for i in range(L-1)]
theta_flat = np.genfromtxt('weights.txt')
thetas = thetas_from_flat(theta_flat, sizes)
theta_h = thetas[0][:,1:]
def inv_sigmoid(a):
return np.log(a/(1-a))
small_number = 1e-8
active_val = 1 - small_number
X = np.zeros((25, n))
for i in range(s[1]):
a = np.array(s[1] * [small_number]).reshape((1, s[1]))
a[0,i] = active_val
z = inv_sigmoid(a)
x = z.dot(np.linalg.pinv(theta_h).T)
X[i] = x
rows, cols = 5, 5
_, axs = plt.subplots(rows, cols, figsize=(rows, cols))
row, col = -1, 0
for x in X:
if col % cols == 0:
row += 1
col = 0
x = x.reshape((20, 20)).T
ax = axs[row, col]
ax.imshow(x, cmap='gray')
ax.axis('off')
col += 1
plt.show()
| [
"[email protected]"
] | |
c348b8fbf62271f2569db6ac5e932e2083f492cd | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02687/s366330739.py | 8212de878e4731ef97587ca957ac1639f31d7b1e | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 148 | py | # A - A?C
# 'ABC'には'ARC'を、'ARC'には'ABC'を返す
S = str(input())
if S == 'ABC':
print('ARC')
elif S == 'ARC':
print('ABC')
| [
"[email protected]"
] | |
f82670200f5a400d8e48a8b174a2cd82239ab27d | 46ce38eacc390e549944d2e73020cbfd7e0b020d | /ml/rl/test/gym/world_model/state_embed_gym.py | 5e5bb9f273deb929dd36f4d255e2f21d9f25bbcf | [
"BSD-3-Clause"
] | permissive | sdaulton/ReAgent | 0ae229da0f55ce0bf0108c5bc7bf481dc42aeb66 | 426d4915dcd90beb3c3781d030c64e748e336351 | refs/heads/master | 2020-09-06T18:44:23.454865 | 2019-11-07T22:34:47 | 2019-11-07T22:36:20 | 220,512,766 | 2 | 0 | NOASSERTION | 2019-11-08T17:07:30 | 2019-11-08T17:07:30 | null | UTF-8 | Python | false | false | 9,698 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
"""
This file shows an example of using embedded states to feed to RL models in
partially observable environments (POMDPs). Embedded states are generated by a world
model which learns how to encode past n observations into a low-dimension
vector.Embedded states improve performance in POMDPs compared to just using
one-step observations as states because they encode more historical information
than one-step observations.
"""
import argparse
import json
import logging
import sys
from collections import deque
import gym
import ml.rl.types as rlt
import numpy as np
import torch
from gym import Env
from gym.spaces import Box
from ml.rl.json_serialize import from_json, json_to_object
from ml.rl.models.world_model import MemoryNetwork
from ml.rl.parameters import MDNRNNParameters, OpenAiGymParameters, RLParameters
from ml.rl.test.gym.open_ai_gym_environment import EnvType, OpenAIGymEnvironment
from ml.rl.test.gym.open_ai_gym_memory_pool import OpenAIGymMemoryPool
from ml.rl.test.gym.run_gym import (
create_epsilon,
create_predictor,
create_trainer,
train_gym_offline_rl,
)
from ml.rl.test.gym.world_model.mdnrnn_gym import create_embed_rl_dataset, mdnrnn_gym
from ml.rl.training.rl_dataset import RLDataset
logger = logging.getLogger(__name__)
class StateEmbedGymEnvironment(Env):
def __init__(
self,
gym_env: Env,
mdnrnn: MemoryNetwork,
max_embed_seq_len: int,
state_min_value: float,
state_max_value: float,
):
self.env = gym_env
self.unwrapped.spec = self.env.unwrapped.spec
self.max_embed_seq_len = max_embed_seq_len
self.mdnrnn = mdnrnn
self.embed_size = self.mdnrnn.num_hiddens
self.raw_state_dim = self.env.observation_space.shape[0] # type: ignore
self.state_dim = self.embed_size + self.raw_state_dim
if isinstance(self.env.action_space, gym.spaces.Discrete):
self.action_type = EnvType.DISCRETE_ACTION
self.action_dim = self.env.action_space.n
elif isinstance(self.env.action_space, gym.spaces.Box):
self.action_type = EnvType.CONTINUOUS_ACTION
self.action_dim = self.env.action_space.shape[0]
self.action_space = self.env.action_space
self.observation_space = Box( # type: ignore
low=state_min_value, high=state_max_value, shape=(self.state_dim,)
)
self.cur_raw_state = None
self.recent_states = deque([], maxlen=self.max_embed_seq_len) # type: ignore
self.recent_actions = deque([], maxlen=self.max_embed_seq_len) # type: ignore
def seed(self, seed):
self.env.seed(seed)
def embed_state(self, state):
""" Embed state after either reset() or step() """
assert len(self.recent_states) == len(self.recent_actions)
old_mdnrnn_mode = self.mdnrnn.mdnrnn.training
self.mdnrnn.mdnrnn.eval()
# Embed the state as the hidden layer's output
# until the previous step + current state
if len(self.recent_states) == 0:
mdnrnn_state = np.zeros((1, self.raw_state_dim))
mdnrnn_action = np.zeros((1, self.action_dim))
else:
mdnrnn_state = np.array(list(self.recent_states))
mdnrnn_action = np.array(list(self.recent_actions))
mdnrnn_state = torch.tensor(mdnrnn_state, dtype=torch.float).unsqueeze(1)
mdnrnn_action = torch.tensor(mdnrnn_action, dtype=torch.float).unsqueeze(1)
mdnrnn_input = rlt.PreprocessedStateAction.from_tensors(
state=mdnrnn_state, action=mdnrnn_action
)
mdnrnn_output = self.mdnrnn(mdnrnn_input)
hidden_embed = (
mdnrnn_output.all_steps_lstm_hidden[-1].squeeze().detach().cpu().numpy()
)
state_embed = np.hstack((hidden_embed, state))
self.mdnrnn.mdnrnn.train(old_mdnrnn_mode)
logger.debug(
"Embed_state\nrecent states: {}\nrecent actions: {}\nstate_embed{}\n".format(
np.array(self.recent_states), np.array(self.recent_actions), state_embed
)
)
return state_embed
def reset(self):
next_raw_state = self.env.reset()
self.recent_states = deque([], maxlen=self.max_embed_seq_len)
self.recent_actions = deque([], maxlen=self.max_embed_seq_len)
self.cur_raw_state = next_raw_state
next_embed_state = self.embed_state(next_raw_state)
return next_embed_state
def step(self, action):
if self.action_type == EnvType.DISCRETE_ACTION:
action_np = np.zeros(self.action_dim)
action_np[action] = 1.0
else:
action_np = action
self.recent_states.append(self.cur_raw_state)
self.recent_actions.append(action_np)
next_raw_state, reward, terminal, info = self.env.step(action)
logger.debug("action {}, reward {}\n".format(action, reward))
self.cur_raw_state = next_raw_state
next_embed_state = self.embed_state(next_raw_state)
return next_embed_state, reward, terminal, info
def main(args):
parser = argparse.ArgumentParser(
description="Train a RL net to play in an OpenAI Gym environment. "
"States are embedded by a mdn-rnn model."
)
parser.add_argument(
"-p",
"--mdnrnn_parameters",
help="Path to JSON parameters file for MDN-RNN training.",
)
parser.add_argument(
"-q", "--rl_parameters", help="Path to JSON parameters file for RL training."
)
parser.add_argument(
"-s",
"--score-bar",
help="Bar for averaged tests scores.",
type=float,
default=None,
)
parser.add_argument(
"-g",
"--gpu_id",
help="If set, will use GPU with specified ID. Otherwise will use CPU.",
default=-1,
)
parser.add_argument(
"-l",
"--log_level",
help="If set, use logging level specified (debug, info, warning, error, "
"critical). Else defaults to info.",
default="info",
)
args = parser.parse_args(args)
if args.log_level not in ("debug", "info", "warning", "error", "critical"):
raise Exception("Logging level {} not valid level.".format(args.log_level))
else:
logging.getLogger().setLevel(getattr(logging, args.log_level.upper()))
with open(args.mdnrnn_parameters, "r") as f:
mdnrnn_params = json_to_object(f.read(), OpenAiGymParameters)
with open(args.rl_parameters, "r") as f:
rl_params = json_to_object(f.read(), OpenAiGymParameters)
env, mdnrnn_trainer, embed_rl_dataset = create_mdnrnn_trainer_and_embed_dataset(
mdnrnn_params, rl_params.use_gpu
)
max_embed_seq_len = mdnrnn_params["run_details"]["seq_len"]
_, _, rl_trainer, rl_predictor, state_embed_env = run_gym(
rl_params,
args.score_bar,
embed_rl_dataset,
env.env,
mdnrnn_trainer.mdnrnn,
max_embed_seq_len,
)
def create_mdnrnn_trainer_and_embed_dataset(
mdnrnn_params: OpenAiGymParameters, use_gpu
):
env, mdnrnn_trainer, _, _, _ = mdnrnn_gym(mdnrnn_params)
embed_rl_dataset = RLDataset("/tmp/rl.pkl")
create_embed_rl_dataset(
env, mdnrnn_trainer, embed_rl_dataset, use_gpu, mdnrnn_params.run_details
)
return env, mdnrnn_trainer, embed_rl_dataset
def run_gym(
params: OpenAiGymParameters,
score_bar,
embed_rl_dataset: RLDataset,
gym_env: Env,
mdnrnn: MemoryNetwork,
max_embed_seq_len: int,
):
assert params.rl is not None
rl_parameters = params.rl
env_type = params.env
model_type = params.model_type
epsilon, epsilon_decay, minimum_epsilon = create_epsilon(
offline_train=True, rl_parameters=rl_parameters, params=params
)
replay_buffer = OpenAIGymMemoryPool(params.max_replay_memory_size)
for row in embed_rl_dataset.rows:
replay_buffer.insert_into_memory(**row)
assert replay_buffer.memory_buffer is not None
state_mem = replay_buffer.memory_buffer.state
state_min_value = torch.min(state_mem).item()
state_max_value = torch.max(state_mem).item()
state_embed_env = StateEmbedGymEnvironment(
gym_env, mdnrnn, max_embed_seq_len, state_min_value, state_max_value
)
open_ai_env = OpenAIGymEnvironment(
state_embed_env,
epsilon,
rl_parameters.softmax_policy,
rl_parameters.gamma,
epsilon_decay,
minimum_epsilon,
)
rl_trainer = create_trainer(params, open_ai_env)
rl_predictor = create_predictor(
rl_trainer, model_type, params.use_gpu, open_ai_env.action_dim
)
assert (
params.run_details.max_steps is not None
and params.run_details.offline_train_epochs is not None
), "Missing data required for offline training: {}".format(str(params.run_details))
return train_gym_offline_rl(
gym_env=open_ai_env,
replay_buffer=replay_buffer,
model_type=model_type,
trainer=rl_trainer,
predictor=rl_predictor,
test_run_name="{} offline rl state embed".format(env_type),
score_bar=score_bar,
max_steps=params.run_details.max_steps,
avg_over_num_episodes=params.run_details.avg_over_num_episodes,
offline_train_epochs=params.run_details.offline_train_epochs,
num_batch_per_epoch=None,
)
if __name__ == "__main__":
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logging.getLogger().setLevel(logging.INFO)
args = sys.argv
main(args[1:])
| [
"[email protected]"
] | |
6be70691a6a372ac9c7ec40b4679883b2939e460 | 641fa8341d8c436ad24945bcbf8e7d7d1dd7dbb2 | /build/android/pylib/constants/__init__.py | 80ad2c1a8b45589070e60fb4c57c2f19f2485186 | [
"BSD-3-Clause"
] | permissive | massnetwork/mass-browser | 7de0dfc541cbac00ffa7308541394bac1e945b76 | 67526da9358734698c067b7775be491423884339 | refs/heads/master | 2022-12-07T09:01:31.027715 | 2017-01-19T14:29:18 | 2017-01-19T14:29:18 | 73,799,690 | 4 | 4 | BSD-3-Clause | 2022-11-26T11:53:23 | 2016-11-15T09:49:29 | null | UTF-8 | Python | false | false | 7,735 | py | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Defines a set of constants shared by test runners and other scripts."""
# TODO(jbudorick): Split these constants into coherent modules.
# pylint: disable=W0212
import collections
import glob
import logging
import os
import subprocess
import devil.android.sdk.keyevent
from devil.android.constants import chrome
from devil.android.sdk import version_codes
from devil.constants import exit_codes
keyevent = devil.android.sdk.keyevent
DIR_SOURCE_ROOT = os.environ.get('CHECKOUT_SOURCE_ROOT',
os.path.abspath(os.path.join(os.path.dirname(__file__),
os.pardir, os.pardir, os.pardir, os.pardir)))
PACKAGE_INFO = dict(chrome.PACKAGE_INFO)
PACKAGE_INFO.update({
'legacy_browser': chrome.PackageInfo(
'com.google.android.browser',
'com.android.browser.BrowserActivity',
None,
None),
'chromecast_shell': chrome.PackageInfo(
'com.google.android.apps.mediashell',
'com.google.android.apps.mediashell.MediaShellActivity',
'/data/local/tmp/castshell-command-line',
None),
'android_webview_shell': chrome.PackageInfo(
'org.chromium.android_webview.shell',
'org.chromium.android_webview.shell.AwShellActivity',
'/data/local/tmp/android-webview-command-line',
None),
'gtest': chrome.PackageInfo(
'org.chromium.native_test',
'org.chromium.native_test.NativeUnitTestActivity',
'/data/local/tmp/chrome-native-tests-command-line',
None),
'components_browsertests': chrome.PackageInfo(
'org.chromium.components_browsertests_apk',
('org.chromium.components_browsertests_apk' +
'.ComponentsBrowserTestsActivity'),
'/data/local/tmp/chrome-native-tests-command-line',
None),
'content_browsertests': chrome.PackageInfo(
'org.chromium.content_browsertests_apk',
'org.chromium.content_browsertests_apk.ContentBrowserTestsActivity',
'/data/local/tmp/chrome-native-tests-command-line',
None),
'chromedriver_webview_shell': chrome.PackageInfo(
'org.chromium.chromedriver_webview_shell',
'org.chromium.chromedriver_webview_shell.Main',
None,
None),
})
# Ports arrangement for various test servers used in Chrome for Android.
# Lighttpd server will attempt to use 9000 as default port, if unavailable it
# will find a free port from 8001 - 8999.
LIGHTTPD_DEFAULT_PORT = 9000
LIGHTTPD_RANDOM_PORT_FIRST = 8001
LIGHTTPD_RANDOM_PORT_LAST = 8999
TEST_SYNC_SERVER_PORT = 9031
TEST_SEARCH_BY_IMAGE_SERVER_PORT = 9041
TEST_POLICY_SERVER_PORT = 9051
TEST_EXECUTABLE_DIR = '/data/local/tmp'
# Directories for common java libraries for SDK build.
# These constants are defined in build/android/ant/common.xml
SDK_BUILD_JAVALIB_DIR = 'lib.java'
SDK_BUILD_TEST_JAVALIB_DIR = 'test.lib.java'
SDK_BUILD_APKS_DIR = 'apks'
ADB_KEYS_FILE = '/data/misc/adb/adb_keys'
PERF_OUTPUT_DIR = os.path.join(DIR_SOURCE_ROOT, 'out', 'step_results')
# The directory on the device where perf test output gets saved to.
DEVICE_PERF_OUTPUT_DIR = (
'/data/data/' + PACKAGE_INFO['chrome'].package + '/files')
SCREENSHOTS_DIR = os.path.join(DIR_SOURCE_ROOT, 'out_screenshots')
ANDROID_SDK_VERSION = version_codes.MARSHMALLOW
ANDROID_SDK_BUILD_TOOLS_VERSION = '24.0.2'
ANDROID_SDK_ROOT = os.path.join(DIR_SOURCE_ROOT,
'third_party', 'android_tools', 'sdk')
ANDROID_SDK_TOOLS = os.path.join(ANDROID_SDK_ROOT,
'build-tools', ANDROID_SDK_BUILD_TOOLS_VERSION)
ANDROID_NDK_ROOT = os.path.join(DIR_SOURCE_ROOT,
'third_party', 'android_tools', 'ndk')
PROGUARD_SCRIPT_PATH = os.path.join(
ANDROID_SDK_ROOT, 'tools', 'proguard', 'bin', 'proguard.sh')
PROGUARD_ROOT = os.path.join(DIR_SOURCE_ROOT, 'third_party', 'proguard')
BAD_DEVICES_JSON = os.path.join(DIR_SOURCE_ROOT,
os.environ.get('CHROMIUM_OUT_DIR', 'out'),
'bad_devices.json')
UPSTREAM_FLAKINESS_SERVER = 'test-results.appspot.com'
# TODO(jbudorick): Remove once unused.
DEVICE_LOCAL_PROPERTIES_PATH = '/data/local.prop'
# TODO(jbudorick): Rework this into testing/buildbot/
PYTHON_UNIT_TEST_SUITES = {
'pylib_py_unittests': {
'path': os.path.join(DIR_SOURCE_ROOT, 'build', 'android'),
'test_modules': [
'devil.android.device_utils_test',
'devil.android.md5sum_test',
'devil.utils.cmd_helper_test',
'pylib.results.json_results_test',
'pylib.utils.proguard_test',
]
},
'gyp_py_unittests': {
'path': os.path.join(DIR_SOURCE_ROOT, 'build', 'android', 'gyp'),
'test_modules': [
'java_cpp_enum_tests',
'java_google_api_keys_tests',
]
},
}
LOCAL_MACHINE_TESTS = ['junit', 'python']
VALID_ENVIRONMENTS = ['local']
VALID_TEST_TYPES = ['gtest', 'instrumentation', 'junit', 'linker', 'monkey',
'perf', 'python']
VALID_DEVICE_TYPES = ['Android', 'iOS']
def GetBuildType():
try:
return os.environ['BUILDTYPE']
except KeyError:
raise EnvironmentError(
'The BUILDTYPE environment variable has not been set')
def SetBuildType(build_type):
os.environ['BUILDTYPE'] = build_type
def SetBuildDirectory(build_directory):
os.environ['CHROMIUM_OUT_DIR'] = build_directory
def SetOutputDirectory(output_directory):
os.environ['CHROMIUM_OUTPUT_DIR'] = output_directory
def GetOutDirectory(build_type=None):
"""Returns the out directory where the output binaries are built.
Args:
build_type: Build type, generally 'Debug' or 'Release'. Defaults to the
globally set build type environment variable BUILDTYPE.
"""
if 'CHROMIUM_OUTPUT_DIR' in os.environ:
return os.path.abspath(os.path.join(
DIR_SOURCE_ROOT, os.environ.get('CHROMIUM_OUTPUT_DIR')))
return os.path.abspath(os.path.join(
DIR_SOURCE_ROOT, os.environ.get('CHROMIUM_OUT_DIR', 'out'),
GetBuildType() if build_type is None else build_type))
def CheckOutputDirectory():
"""Checks that CHROMIUM_OUT_DIR or CHROMIUM_OUTPUT_DIR is set.
If neither are set, but the current working directory is a build directory,
then CHROMIUM_OUTPUT_DIR is set to the current working directory.
Raises:
Exception: If no output directory is detected.
"""
output_dir = os.environ.get('CHROMIUM_OUTPUT_DIR')
out_dir = os.environ.get('CHROMIUM_OUT_DIR')
if not output_dir and not out_dir:
# If CWD is an output directory, then assume it's the desired one.
if os.path.exists('build.ninja'):
output_dir = os.getcwd()
SetOutputDirectory(output_dir)
elif os.environ.get('CHROME_HEADLESS'):
# When running on bots, see if the output directory is obvious.
dirs = glob.glob(os.path.join(DIR_SOURCE_ROOT, 'out', '*', 'build.ninja'))
if len(dirs) == 1:
SetOutputDirectory(dirs[0])
else:
raise Exception('Neither CHROMIUM_OUTPUT_DIR nor CHROMIUM_OUT_DIR '
'has been set. CHROME_HEADLESS detected, but multiple '
'out dirs exist: %r' % dirs)
else:
raise Exception('Neither CHROMIUM_OUTPUT_DIR nor CHROMIUM_OUT_DIR '
'has been set')
# TODO(jbudorick): Convert existing callers to AdbWrapper.GetAdbPath() and
# remove this.
def GetAdbPath():
from devil.android.sdk import adb_wrapper
return adb_wrapper.AdbWrapper.GetAdbPath()
# Exit codes
ERROR_EXIT_CODE = exit_codes.ERROR
INFRA_EXIT_CODE = exit_codes.INFRA
WARNING_EXIT_CODE = exit_codes.WARNING
| [
"[email protected]"
] | |
3ce273ff7c46fa17acd4b7832d4e488d178af564 | c5278643c1df2a0c1a83156f764ba7b2ebcc8cb8 | /hc/api/tests/test_pause.py | e6c3d4142b8b6977190901b14f8e040aea1f06a6 | [
"BSD-3-Clause"
] | permissive | protostartup/healthchecks | a5757584c4506282d488aff207676f54241c975c | 25e48f1b9fff10866e4d6a0f875912527a5160bc | refs/heads/master | 2020-04-11T15:24:42.495688 | 2018-12-14T16:58:35 | 2018-12-14T16:58:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,978 | py | from hc.api.models import Check
from hc.test import BaseTestCase
class PauseTestCase(BaseTestCase):
def test_it_works(self):
check = Check(user=self.alice, status="up")
check.save()
url = "/api/v1/checks/%s/pause" % check.code
r = self.client.post(url, "", content_type="application/json",
HTTP_X_API_KEY="X" * 32)
self.assertEqual(r.status_code, 200)
self.assertEqual(r["Access-Control-Allow-Origin"], "*")
check.refresh_from_db()
self.assertEqual(check.status, "paused")
def test_it_handles_options(self):
check = Check(user=self.alice, status="up")
check.save()
r = self.client.options("/api/v1/checks/%s/pause" % check.code)
self.assertEqual(r.status_code, 204)
self.assertIn("POST", r["Access-Control-Allow-Methods"])
def test_it_only_allows_post(self):
url = "/api/v1/checks/1659718b-21ad-4ed1-8740-43afc6c41524/pause"
r = self.client.get(url, HTTP_X_API_KEY="X" * 32)
self.assertEqual(r.status_code, 405)
def test_it_validates_ownership(self):
check = Check(user=self.bob, status="up")
check.save()
url = "/api/v1/checks/%s/pause" % check.code
r = self.client.post(url, "", content_type="application/json",
HTTP_X_API_KEY="X" * 32)
self.assertEqual(r.status_code, 403)
def test_it_validates_uuid(self):
url = "/api/v1/checks/not-uuid/pause"
r = self.client.post(url, "", content_type="application/json",
HTTP_X_API_KEY="X" * 32)
self.assertEqual(r.status_code, 404)
def test_it_handles_missing_check(self):
url = "/api/v1/checks/07c2f548-9850-4b27-af5d-6c9dc157ec02/pause"
r = self.client.post(url, "", content_type="application/json",
HTTP_X_API_KEY="X" * 32)
self.assertEqual(r.status_code, 404)
| [
"[email protected]"
] | |
13ef81d0170c88304adc0fe294d740fb507ae0ef | 7755efce8e5ec81943ceb491590fae29eaad798a | /Codecademy Lesson 3 Control Flow/L3.5_Boolean_Operators_And.py | 8413b9f09629808f478ec877dbcf37d340a3cfe0 | [] | no_license | jashidsany/Learning-Python | 01b3f2f207dfdf6d31f9ca9f5abd38aab710ba1d | 2c9926fd1b159441cbe8e9e30f0804d99b936573 | refs/heads/main | 2023-02-20T03:59:15.123009 | 2021-01-25T01:12:19 | 2021-01-25T01:12:19 | 319,205,996 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 273 | py | statement_one = (2 + 2 + 2 >= 6) and (-1 * -1 < 0)
print(statement_one)
statement_two = (4 * 2 <= 8) and (7 - 1 == 6)
print(statement_two)
def graduation_reqs(gpa, credits):
if gpa >= 2.0 and credits >= 120:
return "You meet the requirements to graduate!"
| [
"[email protected]"
] | |
c8a947e165c9e763ad4bdf98bb89237d7e4bf66e | 021cf3a95271c46647c45427ca6f4e951a1eacb0 | /2020/B/b.py | 3a2882390d01d691f8a0dd5c7c016017fb86c480 | [] | no_license | calvinchankf/GoogleKickStart | 9c5fe0a2537c4efba79aa9352284a0b1e6e65445 | dbcd259b756785ee2864915c22c5ee6abe270581 | refs/heads/master | 2023-02-12T14:05:59.787258 | 2023-02-11T16:19:50 | 2023-02-11T16:19:50 | 182,479,858 | 3 | 3 | null | null | null | null | UTF-8 | Python | false | false | 695 | py | """
math
Small Pass
Big Pass
"""
def f(nums, D):
rightMost = D
for i in range(len(nums)-1, -1, -1):
rightMost = nums[i] * (rightMost // nums[i])
return rightMost
# a = [3, 7, 2]
# print(f(a, 10))
# a = [11, 10, 5, 50]
# print(f(a, 100))
# a = [1,1]
# print(f(a, 1))
# input() reads a string with a line of input, stripping the ' ' (newline) at the end.
# This is all you need for most Code Jam problems.
t = int(raw_input()) # read a line with a single integer
for i in range(1, t + 1):
N, D = [int(s) for s in raw_input().split(" ")]
arr = [int(s) for s in raw_input().split(" ")]
res = f(arr, D)
print("Case #{}: {}".format(i, res)) | [
"[email protected]"
] | |
d3854df5c0cfad528f8a66d04ee219f3398b3031 | f572e0a4b843ed3fd2cd8edec2ad3aab7a0019d3 | /ows/wcs/v20/exceptions.py | 8b2d542a5baad97f4eec81ecbf70515ef3aeeb76 | [
"MIT"
] | permissive | EOxServer/pyows | 9039c8ed7358c98d736e2b8fd9f47be944f0b0a1 | e09310f992d6e69088940e9b5dbd7302f697344b | refs/heads/master | 2022-10-09T23:27:43.884159 | 2022-10-04T10:03:25 | 2022-10-04T10:03:25 | 218,005,699 | 1 | 1 | null | 2022-01-04T13:36:06 | 2019-10-28T09:01:51 | Python | UTF-8 | Python | false | false | 3,719 | py | # ------------------------------------------------------------------------------
#
# Project: pyows <http://eoxserver.org>
# Authors: Fabian Schindler <[email protected]>
#
# ------------------------------------------------------------------------------
# Copyright (C) 2019 EOX IT Services GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies of this Software or works derived from this Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# ------------------------------------------------------------------------------
class InvalidSubsettingException(Exception):
"""
This exception indicates an invalid WCS 2.0 subsetting parameter was
submitted.
"""
code = "InvalidSubsetting"
locator = "subset"
class InvalidSubsettingCrsException(Exception):
"""
This exception indicates an invalid WCS 2.0 subsettingCrs parameter was
submitted.
"""
code = "SubsettingCrs-NotSupported"
locator = "subsettingCrs"
class InvalidOutputCrsException(Exception):
"""
This exception indicates an invalid WCS 2.0 outputCrs parameter was
submitted.
"""
code = "OutputCrs-NotSupported"
locator = "outputCrs"
class InvalidScaleFactorException(Exception):
""" Error in ScaleFactor and ScaleAxis operations
"""
code = "InvalidScaleFactor"
def __init__(self, scalefactor):
super().__init__(
"Scalefactor '%s' is not valid" % scalefactor
)
self.locator = scalefactor
class InvalidScaleExtentException(Exception):
""" Error in ScaleExtent operations
"""
code = "InvalidExtent"
def __init__(self, low, high):
super().__init__(
"ScaleExtent '%s:%s' is not valid" % (low, high)
)
self.locator = high
class NoSuchCoverageException(Exception):
""" This exception indicates that the requested coverage(s) do not
exist.
"""
code = "NoSuchCoverage"
# def __str__(self):
# return "No such Coverage%s with ID: %s" % (
# "" if len(self.items) == 1 else "s",
# ", ".join(map(lambda i: "'%s'" % i, self.items))
# )
class NoSuchDatasetSeriesOrCoverageException(Exception):
""" This exception indicates that the requested coverage(s) or dataset
series do not exist.
"""
code = "NoSuchDatasetSeriesOrCoverage"
# def __str__(self):
# return "No such Coverage%s or Dataset Series with ID: %s" % (
# " " if len(self.items) == 1 else "s",
# ", ".join(map(lambda i: "'%s'" % i, self.items))
# )
class InterpolationMethodNotSupportedException(Exception):
"""
This exception indicates a not supported interpolation method.
"""
code = "InterpolationMethodNotSupported"
locator = "interpolation"
| [
"[email protected]"
] | |
fb90ef3f04640975faca0418775ddddf8837d8db | 187c27b5d1255f3f08ec87a4bb51cc4056d8e2da | /agents/views.py | 27ef869b7b3b066d67c3ea077e6f104ce339b5d1 | [] | no_license | LKingJ23/Django-Hotel-Site | 1f589154d9475731d015c823e83815292b962e11 | 49b1bef7425ff63bf8ec3e178629ccc2bab45c1f | refs/heads/master | 2020-06-14T20:22:17.310187 | 2019-07-03T19:41:30 | 2019-07-03T19:41:30 | 195,115,416 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 268 | py | from django.shortcuts import render
from .models import Agent
# Create your views here.
def agents_list(request):
agent_list = Agent.objects.all()
template = 'agents/agents.html'
context = {
'agent_list': agent_list
}
return render(request, template, context) | [
"[email protected]"
] | |
bc29b15eabbd22e4af5e1f1da7d7846fe6fa794d | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_387/ch136_2020_04_01_12_06_52_438302.py | 5a78febe80669c25bdb2163ef7afb2a5b120d898 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,151 | py | import random as rnd
def dicas(soma):
num1 = int(input('Primeiro número: '))
num2 = int(input('Segundo número: '))
num3 = int(input('Terceiro número: '))
nums = [num1, num2, num3]
if soma in nums:
return("Está entre os 3")
else:
return("Não está entre os 3")
dado1 = rnd.randint(1,6)
dado2 = rnd.randint(1,6)
dado3 = rnd.randint(1,6)
soma = dado1 + dado2 + dado3
dinheiro = 10
fase_dicas = True
while fase_dicas:
print(f'Você possui {dinheiro} dinheiros')
if dinheiro <= 0:
break
quer_dica = input('Você quer uma dica? (cada dica custa 1 dinheiro) (sim/não)')
if quer_dica == 'sim':
print(dicas(soma))
dinheiro-=1
elif quer_dica == 'não':
fase_dicas = False
while not fase_dicas:
print(f'Você possui {dinheiro} dinheiros')
if dinheiro <= 0:
break
else:
chute = int(input('Chute um número: '))
if chute == soma:
dinheiro*=6
break
else:
dinheiro-=1
if dinheiro > 0:
print(f"Você ganhou o jogo com {dinheiro} dinheiros!")
else:
print("Você perdeu!") | [
"[email protected]"
] | |
d4e0bae3805afbac2f13e7fb4aaa6af639b860f7 | 297497957c531d81ba286bc91253fbbb78b4d8be | /testing/mochitest/pywebsocket3/mod_pywebsocket/util.py | 351ca9fac84852ce0760931431fd2b91c8ebd766 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | marco-c/gecko-dev-comments-removed | 7a9dd34045b07e6b22f0c636c0a836b9e639f9d3 | 61942784fb157763e65608e5a29b3729b0aa66fa | refs/heads/master | 2023-08-09T18:55:25.895853 | 2023-08-01T00:40:39 | 2023-08-01T00:40:39 | 211,297,481 | 0 | 0 | NOASSERTION | 2019-09-29T01:27:49 | 2019-09-27T10:44:24 | C++ | UTF-8 | Python | false | false | 10,317 | py |
"""WebSocket utilities."""
from __future__ import absolute_import
import array
import errno
import logging
import os
import re
import six
from six.moves import map
from six.moves import range
import socket
import struct
import zlib
try:
from mod_pywebsocket import fast_masking
except ImportError:
pass
def prepend_message_to_exception(message, exc):
"""Prepend message to the exception."""
exc.args = (message + str(exc), )
return
def __translate_interp(interp, cygwin_path):
"""Translate interp program path for Win32 python to run cygwin program
(e.g. perl). Note that it doesn't support path that contains space,
which is typically true for Unix, where #!-script is written.
For Win32 python, cygwin_path is a directory of cygwin binaries.
Args:
interp: interp command line
cygwin_path: directory name of cygwin binary, or None
Returns:
translated interp command line.
"""
if not cygwin_path:
return interp
m = re.match('^[^ ]*/([^ ]+)( .*)?', interp)
if m:
cmd = os.path.join(cygwin_path, m.group(1))
return cmd + m.group(2)
return interp
def get_script_interp(script_path, cygwin_path=None):
r"""Get #!-interpreter command line from the script.
It also fixes command path. When Cygwin Python is used, e.g. in WebKit,
it could run "/usr/bin/perl -wT hello.pl".
When Win32 Python is used, e.g. in Chromium, it couldn't. So, fix
"/usr/bin/perl" to "<cygwin_path>\perl.exe".
Args:
script_path: pathname of the script
cygwin_path: directory name of cygwin binary, or None
Returns:
#!-interpreter command line, or None if it is not #!-script.
"""
fp = open(script_path)
line = fp.readline()
fp.close()
m = re.match('^#!(.*)', line)
if m:
return __translate_interp(m.group(1), cygwin_path)
return None
def wrap_popen3_for_win(cygwin_path):
"""Wrap popen3 to support #!-script on Windows.
Args:
cygwin_path: path for cygwin binary if command path is needed to be
translated. None if no translation required.
"""
__orig_popen3 = os.popen3
def __wrap_popen3(cmd, mode='t', bufsize=-1):
cmdline = cmd.split(' ')
interp = get_script_interp(cmdline[0], cygwin_path)
if interp:
cmd = interp + ' ' + cmd
return __orig_popen3(cmd, mode, bufsize)
os.popen3 = __wrap_popen3
def hexify(s):
return ' '.join(['%02x' % x for x in six.iterbytes(s)])
def get_class_logger(o):
"""Return the logging class information."""
return logging.getLogger('%s.%s' %
(o.__class__.__module__, o.__class__.__name__))
def pack_byte(b):
"""Pack an integer to network-ordered byte"""
return struct.pack('!B', b)
class NoopMasker(object):
"""A NoOp masking object.
This has the same interface as RepeatedXorMasker but just returns
the string passed in without making any change.
"""
def __init__(self):
"""NoOp."""
pass
def mask(self, s):
"""NoOp."""
return s
class RepeatedXorMasker(object):
"""A masking object that applies XOR on the string.
Applies XOR on the byte string given to mask method with the masking bytes
given to the constructor repeatedly. This object remembers the position
in the masking bytes the last mask method call ended and resumes from
that point on the next mask method call.
"""
def __init__(self, masking_key):
self._masking_key = masking_key
self._masking_key_index = 0
def _mask_using_swig(self, s):
"""Perform the mask via SWIG."""
masked_data = fast_masking.mask(s, self._masking_key,
self._masking_key_index)
self._masking_key_index = ((self._masking_key_index + len(s)) %
len(self._masking_key))
return masked_data
def _mask_using_array(self, s):
"""Perform the mask via python."""
if isinstance(s, six.text_type):
raise Exception(
'Masking Operation should not process unicode strings')
result = bytearray(s)
masking_key = [c for c in six.iterbytes(self._masking_key)]
masking_key_size = len(masking_key)
masking_key_index = self._masking_key_index
for i in range(len(result)):
result[i] ^= masking_key[masking_key_index]
masking_key_index = (masking_key_index + 1) % masking_key_size
self._masking_key_index = masking_key_index
return bytes(result)
if 'fast_masking' in globals():
mask = _mask_using_swig
else:
mask = _mask_using_array
class _Deflater(object):
def __init__(self, window_bits):
self._logger = get_class_logger(self)
window_bits = max(window_bits, 9)
self._compress = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION,
zlib.DEFLATED, -window_bits)
def compress(self, bytes):
compressed_bytes = self._compress.compress(bytes)
self._logger.debug('Compress input %r', bytes)
self._logger.debug('Compress result %r', compressed_bytes)
return compressed_bytes
def compress_and_flush(self, bytes):
compressed_bytes = self._compress.compress(bytes)
compressed_bytes += self._compress.flush(zlib.Z_SYNC_FLUSH)
self._logger.debug('Compress input %r', bytes)
self._logger.debug('Compress result %r', compressed_bytes)
return compressed_bytes
def compress_and_finish(self, bytes):
compressed_bytes = self._compress.compress(bytes)
compressed_bytes += self._compress.flush(zlib.Z_FINISH)
self._logger.debug('Compress input %r', bytes)
self._logger.debug('Compress result %r', compressed_bytes)
return compressed_bytes
class _Inflater(object):
def __init__(self, window_bits):
self._logger = get_class_logger(self)
self._window_bits = window_bits
self._unconsumed = b''
self.reset()
def decompress(self, size):
if not (size == -1 or size > 0):
raise Exception('size must be -1 or positive')
data = b''
while True:
data += self._decompress.decompress(self._unconsumed,
max(0, size - len(data)))
self._unconsumed = self._decompress.unconsumed_tail
if self._decompress.unused_data:
self._unconsumed = self._decompress.unused_data
self.reset()
if size >= 0 and len(data) == size:
break
else:
continue
else:
break
if data:
self._logger.debug('Decompressed %r', data)
return data
def append(self, data):
self._logger.debug('Appended %r', data)
self._unconsumed += data
def reset(self):
self._logger.debug('Reset')
self._decompress = zlib.decompressobj(-self._window_bits)
class _RFC1979Deflater(object):
"""A compressor class that applies DEFLATE to given byte sequence and
flushes using the algorithm described in the RFC1979 section 2.1.
"""
def __init__(self, window_bits, no_context_takeover):
self._deflater = None
if window_bits is None:
window_bits = zlib.MAX_WBITS
self._window_bits = window_bits
self._no_context_takeover = no_context_takeover
def filter(self, bytes, end=True, bfinal=False):
if self._deflater is None:
self._deflater = _Deflater(self._window_bits)
if bfinal:
result = self._deflater.compress_and_finish(bytes)
result = result + pack_byte(0)
self._deflater = None
return result
result = self._deflater.compress_and_flush(bytes)
if end:
result = result[:-4]
if self._no_context_takeover and end:
self._deflater = None
return result
class _RFC1979Inflater(object):
"""A decompressor class a la RFC1979.
A decompressor class for byte sequence compressed and flushed following
the algorithm described in the RFC1979 section 2.1.
"""
def __init__(self, window_bits=zlib.MAX_WBITS):
self._inflater = _Inflater(window_bits)
def filter(self, bytes):
self._inflater.append(bytes + b'\x00\x00\xff\xff')
return self._inflater.decompress(-1)
class DeflateSocket(object):
"""A wrapper class for socket object to intercept send and recv to perform
deflate compression and decompression transparently.
"""
_RECV_SIZE = 4096
def __init__(self, socket):
self._socket = socket
self._logger = get_class_logger(self)
self._deflater = _Deflater(zlib.MAX_WBITS)
self._inflater = _Inflater(zlib.MAX_WBITS)
def recv(self, size):
"""Receives data from the socket specified on the construction up
to the specified size. Once any data is available, returns it even
if it's smaller than the specified size.
"""
if size <= 0:
raise Exception('Non-positive size passed')
while True:
data = self._inflater.decompress(size)
if len(data) != 0:
return data
read_data = self._socket.recv(DeflateSocket._RECV_SIZE)
if not read_data:
return b''
self._inflater.append(read_data)
def sendall(self, bytes):
self.send(bytes)
def send(self, bytes):
self._socket.sendall(self._deflater.compress_and_flush(bytes))
return len(bytes)
| [
"[email protected]"
] | |
761d376738dff5065eb205f52483975c3895404c | 11e3ca0f8332835bd28ed69b20729079d071289c | /devel/lib/python2.7/dist-packages/custom_sys_msgs/msg/_TrackPathActionResult.py | 53fa8b36dc54d0f018d69916ce9299010e3403e2 | [] | no_license | huan-holmes/roswrok | 7d90cdb97f3b8af849968de0e498df64448454bd | 3d7b15cd53a5f14e40538c1b8fc330e96ae8d379 | refs/heads/master | 2022-12-08T13:12:31.850804 | 2020-09-08T01:47:38 | 2020-09-08T01:47:38 | 293,672,544 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,026 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from custom_sys_msgs/TrackPathActionResult.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import custom_sys_msgs.msg
import genpy
import actionlib_msgs.msg
import std_msgs.msg
class TrackPathActionResult(genpy.Message):
_md5sum = "1eb06eeff08fa7ea874431638cb52332"
_type = "custom_sys_msgs/TrackPathActionResult"
_has_header = True #flag to mark the presence of a Header object
_full_text = """# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
Header header
actionlib_msgs/GoalStatus status
TrackPathResult result
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
================================================================================
MSG: actionlib_msgs/GoalStatus
GoalID goal_id
uint8 status
uint8 PENDING = 0 # The goal has yet to be processed by the action server
uint8 ACTIVE = 1 # The goal is currently being processed by the action server
uint8 PREEMPTED = 2 # The goal received a cancel request after it started executing
# and has since completed its execution (Terminal State)
uint8 SUCCEEDED = 3 # The goal was achieved successfully by the action server (Terminal State)
uint8 ABORTED = 4 # The goal was aborted during execution by the action server due
# to some failure (Terminal State)
uint8 REJECTED = 5 # The goal was rejected by the action server without being processed,
# because the goal was unattainable or invalid (Terminal State)
uint8 PREEMPTING = 6 # The goal received a cancel request after it started executing
# and has not yet completed execution
uint8 RECALLING = 7 # The goal received a cancel request before it started executing,
# but the action server has not yet confirmed that the goal is canceled
uint8 RECALLED = 8 # The goal received a cancel request before it started executing
# and was successfully cancelled (Terminal State)
uint8 LOST = 9 # An action client can determine that a goal is LOST. This should not be
# sent over the wire by an action server
#Allow for the user to associate a string with GoalStatus for debugging
string text
================================================================================
MSG: actionlib_msgs/GoalID
# The stamp should store the time at which this goal was requested.
# It is used by an action server when it tries to preempt all
# goals that were requested before a certain time
time stamp
# The id provides a way to associate feedback and
# result message with specific goal requests. The id
# specified must be unique.
string id
================================================================================
MSG: custom_sys_msgs/TrackPathResult
# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
# no result currently
"""
__slots__ = ['header','status','result']
_slot_types = ['std_msgs/Header','actionlib_msgs/GoalStatus','custom_sys_msgs/TrackPathResult']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,status,result
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(TrackPathActionResult, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg.Header()
if self.status is None:
self.status = actionlib_msgs.msg.GoalStatus()
if self.result is None:
self.result = custom_sys_msgs.msg.TrackPathResult()
else:
self.header = std_msgs.msg.Header()
self.status = actionlib_msgs.msg.GoalStatus()
self.result = custom_sys_msgs.msg.TrackPathResult()
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_2I().pack(_x.status.goal_id.stamp.secs, _x.status.goal_id.stamp.nsecs))
_x = self.status.goal_id.id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_get_struct_B().pack(self.status.status))
_x = self.status.text
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.status is None:
self.status = actionlib_msgs.msg.GoalStatus()
if self.result is None:
self.result = custom_sys_msgs.msg.TrackPathResult()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 8
(_x.status.goal_id.stamp.secs, _x.status.goal_id.stamp.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status.goal_id.id = str[start:end].decode('utf-8')
else:
self.status.goal_id.id = str[start:end]
start = end
end += 1
(self.status.status,) = _get_struct_B().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status.text = str[start:end].decode('utf-8')
else:
self.status.text = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_2I().pack(_x.status.goal_id.stamp.secs, _x.status.goal_id.stamp.nsecs))
_x = self.status.goal_id.id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_get_struct_B().pack(self.status.status))
_x = self.status.text
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.status is None:
self.status = actionlib_msgs.msg.GoalStatus()
if self.result is None:
self.result = custom_sys_msgs.msg.TrackPathResult()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 8
(_x.status.goal_id.stamp.secs, _x.status.goal_id.stamp.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status.goal_id.id = str[start:end].decode('utf-8')
else:
self.status.goal_id.id = str[start:end]
start = end
end += 1
(self.status.status,) = _get_struct_B().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status.text = str[start:end].decode('utf-8')
else:
self.status.text = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_3I = None
def _get_struct_3I():
global _struct_3I
if _struct_3I is None:
_struct_3I = struct.Struct("<3I")
return _struct_3I
_struct_B = None
def _get_struct_B():
global _struct_B
if _struct_B is None:
_struct_B = struct.Struct("<B")
return _struct_B
_struct_2I = None
def _get_struct_2I():
global _struct_2I
if _struct_2I is None:
_struct_2I = struct.Struct("<2I")
return _struct_2I
| [
"[email protected]"
] | |
1b748902903c60969cc84f3c2ff0c43c78765de4 | 03e1bef3d11a7b925b43b18f002c74ffd82d170c | /caffe2/python/layers_test.py | 77918792a46a814f8fff3ea91477245fdf0efeee | [
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla"
] | permissive | ConeyFly/caffe2 | 7f921816e9585ec2a49889d2500be50b4e0c9c8b | bcf8f149b3bf05611c7f7e1c769b75f5bc49cdc7 | refs/heads/master | 2021-01-01T04:04:23.326615 | 2017-08-19T01:19:21 | 2017-08-19T01:19:21 | 97,118,593 | 0 | 0 | null | 2017-07-13T12:08:39 | 2017-07-13T12:08:39 | null | UTF-8 | Python | false | false | 32,695 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import hypothesis.strategies as st
import numpy as np
import numpy.testing as npt
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
from caffe2.python import (
layer_model_instantiator,
schema,
workspace,
)
from caffe2.python.layers.layers import (
InstantiationContext,
)
from caffe2.python.layers.tags import Tags
from caffe2.python.layer_test_util import (
LayersTestCase,
OpSpec,
)
from caffe2.python.layers.layers import (
set_request_only,
is_request_only_scalar,
)
class TestLayers(LayersTestCase):
def _test_net(self, net, ops_list):
"""
Helper function to assert the net contains some set of operations and
then to run the net.
Inputs:
net -- the network to test and run
ops_list -- the list of operation specifications to check for
in the net
"""
ops_output = self.assertNetContainOps(net, ops_list)
workspace.RunNetOnce(net)
return ops_output
def testFCWithoutBias(self):
output_dims = 2
fc_without_bias = self.model.FCWithoutBias(
self.model.input_feature_schema.float_features, output_dims)
self.model.output_schema = fc_without_bias
self.assertEqual(
schema.Scalar((np.float32, (output_dims, ))),
fc_without_bias
)
train_init_net, train_net = self.get_training_nets()
init_ops = self.assertNetContainOps(
train_init_net,
[
OpSpec("UniformFill", None, None),
]
)
mat_mul_spec = OpSpec(
"MatMul",
[
self.model.input_feature_schema.float_features(),
init_ops[0].output[0],
],
fc_without_bias.field_blobs()
)
self.assertNetContainOps(train_net, [mat_mul_spec])
predict_net = self.get_predict_net()
self.assertNetContainOps(predict_net, [mat_mul_spec])
def testSamplingTrain(self):
output_dims = 1000
indices = self.new_record(schema.Scalar((np.int32, (10,))))
sampling_prob = self.new_record(schema.Scalar((np.float32, (10, ))))
sampled_fc = self.model.SamplingTrain(
schema.Struct(
('input', self.model.input_feature_schema.float_features),
('indices', indices),
('sampling_prob', sampling_prob),
),
"FC",
output_dims,
)
self.model.output_schema = sampled_fc
# Check that we don't add prediction layer into the model
self.assertEqual(1, len(self.model.layers))
self.assertEqual(
schema.Scalar((np.float32, (output_dims, ))),
sampled_fc
)
train_init_net, train_net = self.get_training_nets()
init_ops = self.assertNetContainOps(
train_init_net,
[
OpSpec("UniformFill", None, None),
OpSpec("UniformFill", None, None),
]
)
sampled_fc_layer = self.model.layers[0]
gather_w_spec = OpSpec(
"Gather",
[
init_ops[0].output[0],
indices(),
],
[
sampled_fc_layer._prediction_layer.train_param_blobs[0]
]
)
gather_b_spec = OpSpec(
"Gather",
[
init_ops[1].output[0],
indices(),
],
[
sampled_fc_layer._prediction_layer.train_param_blobs[1]
]
)
train_fc_spec = OpSpec(
"FC",
[
self.model.input_feature_schema.float_features(),
] + sampled_fc_layer._prediction_layer.train_param_blobs,
sampled_fc.field_blobs()
)
log_spec = OpSpec("Log", [sampling_prob()], [None])
sub_spec = OpSpec(
"Sub",
[sampled_fc.field_blobs()[0], None],
sampled_fc.field_blobs()
)
train_ops = self.assertNetContainOps(
train_net,
[gather_w_spec, gather_b_spec, train_fc_spec, log_spec, sub_spec])
self.assertEqual(train_ops[3].output[0], train_ops[4].input[1])
predict_net = self.get_predict_net()
self.assertNetContainOps(
predict_net,
[
OpSpec(
"FC",
[
self.model.input_feature_schema.float_features(),
init_ops[0].output[0],
init_ops[1].output[0],
],
sampled_fc.field_blobs()
)
]
)
def testBatchLRLoss(self):
input_record = self.new_record(schema.Struct(
('label', schema.Scalar((np.float64, (1,)))),
('prediction', schema.Scalar((np.float32, (2,)))),
('weight', schema.Scalar((np.float64, (1,))))
))
loss = self.model.BatchLRLoss(input_record)
self.assertEqual(schema.Scalar((np.float32, tuple())), loss)
def testBatchMSELoss(self):
input_record = self.new_record(schema.Struct(
('label', schema.Scalar((np.float64, (1,)))),
('prediction', schema.Scalar((np.float32, (2,)))),
))
loss = self.model.BatchMSELoss(input_record)
self.assertEqual(schema.Scalar((np.float32, tuple())), loss)
def testBatchSigmoidCrossEntropyLoss(self):
input_record = self.new_record(schema.Struct(
('label', schema.Scalar((np.float32, (32,)))),
('prediction', schema.Scalar((np.float32, (32,))))
))
loss = self.model.BatchSigmoidCrossEntropyLoss(input_record)
self.assertEqual(schema.Scalar((np.float32, tuple())), loss)
def testBatchSoftmaxLoss(self):
input_record = self.new_record(schema.Struct(
('label', schema.Scalar((np.float32, tuple()))),
('prediction', schema.Scalar((np.float32, (32,))))
))
loss = self.model.BatchSoftmaxLoss(input_record)
self.assertEqual(schema.Struct(
('softmax', schema.Scalar((np.float32, (32,)))),
('loss', schema.Scalar(np.float32)),
), loss)
def testBatchSoftmaxLossWeight(self):
input_record = self.new_record(schema.Struct(
('label', schema.Scalar((np.float32, tuple()))),
('prediction', schema.Scalar((np.float32, (32,)))),
('weight', schema.Scalar((np.float64, (1,))))
))
loss = self.model.BatchSoftmaxLoss(input_record)
self.assertEqual(schema.Struct(
('softmax', schema.Scalar((np.float32, (32,)))),
('loss', schema.Scalar(np.float32)),
), loss)
@given(
X=hu.arrays(dims=[2, 5]),
)
def testBatchNormalization(self, X):
input_record = self.new_record(schema.Scalar((np.float32, (5,))))
schema.FeedRecord(input_record, [X])
bn_output = self.model.BatchNormalization(input_record)
self.assertEqual(schema.Scalar((np.float32, (5,))), bn_output)
self.model.output_schema = schema.Struct()
train_init_net, train_net = self.get_training_nets()
init_ops = self.assertNetContainOps(
train_init_net,
[
OpSpec("ConstantFill", None, None),
OpSpec("ConstantFill", None, None),
OpSpec("ConstantFill", None, None),
OpSpec("ConstantFill", None, None),
]
)
input_blob = input_record.field_blobs()[0]
output_blob = bn_output.field_blobs()[0]
expand_dims_spec = OpSpec(
"ExpandDims",
[input_blob],
[input_blob],
)
train_bn_spec = OpSpec(
"SpatialBN",
[input_blob, init_ops[0].output[0], init_ops[1].output[0],
init_ops[2].output[0], init_ops[3].output[0]],
[output_blob, init_ops[2].output[0], init_ops[3].output[0], None, None],
{'is_test': 0, 'order': 'NCHW', 'momentum': 0.9},
)
test_bn_spec = OpSpec(
"SpatialBN",
[input_blob, init_ops[0].output[0], init_ops[1].output[0],
init_ops[2].output[0], init_ops[3].output[0]],
[output_blob],
{'is_test': 1, 'order': 'NCHW', 'momentum': 0.9},
)
squeeze_spec = OpSpec(
"Squeeze",
[output_blob],
[output_blob],
)
self.assertNetContainOps(
train_net,
[expand_dims_spec, train_bn_spec, squeeze_spec]
)
eval_net = self.get_eval_net()
self.assertNetContainOps(
eval_net,
[expand_dims_spec, test_bn_spec, squeeze_spec]
)
predict_net = self.get_predict_net()
self.assertNetContainOps(
predict_net,
[expand_dims_spec, test_bn_spec, squeeze_spec]
)
workspace.RunNetOnce(train_init_net)
workspace.RunNetOnce(train_net)
schema.FeedRecord(input_record, [X])
workspace.RunNetOnce(eval_net)
schema.FeedRecord(input_record, [X])
workspace.RunNetOnce(predict_net)
@given(
X=hu.arrays(dims=[5, 2]),
num_to_collect=st.integers(min_value=1, max_value=10),
)
def testLastNWindowCollector(self, X, num_to_collect):
input_record = self.new_record(schema.Scalar(np.float32))
schema.FeedRecord(input_record, [X])
last_n = self.model.LastNWindowCollector(input_record, num_to_collect)
self.run_train_net_forward_only()
output_record = schema.FetchRecord(last_n)
start = max(0, 5 - num_to_collect)
npt.assert_array_equal(X[start:], output_record())
def testUniformSampling(self):
input_record = self.new_record(schema.Scalar(np.int32))
input_array = np.array([3, 10, 11, 15, 20, 99], dtype=np.int32)
schema.FeedRecord(input_record, [input_array])
num_samples = 20
num_elements = 100
uniform_sampling_output = self.model.UniformSampling(
input_record, num_samples, num_elements)
self.model.loss = uniform_sampling_output
self.run_train_net()
samples = workspace.FetchBlob(uniform_sampling_output.samples())
sampling_prob = workspace.FetchBlob(
uniform_sampling_output.sampling_prob())
self.assertEqual(num_samples, len(samples))
np.testing.assert_array_equal(input_array, samples[:len(input_array)])
np.testing.assert_almost_equal(
np.array([float(num_samples) / num_elements] * num_samples,
dtype=np.float32),
sampling_prob
)
def testGatherRecord(self):
indices = np.array([1, 3, 4], dtype=np.int32)
dense = np.array(list(range(20)), dtype=np.float32).reshape(10, 2)
lengths = np.array(list(range(10)), dtype=np.int32)
items = np.array(list(range(lengths.sum())), dtype=np.int64)
items_lengths = np.array(list(range(lengths.sum())), dtype=np.int32)
items_items = np.array(list(range(items_lengths.sum())), dtype=np.int64)
record = self.new_record(schema.Struct(
('dense', schema.Scalar(np.float32)),
('sparse', schema.Struct(
('list', schema.List(np.int64)),
('list_of_list', schema.List(schema.List(np.int64))),
)),
('empty_struct', schema.Struct())
))
indices_record = self.new_record(schema.Scalar(np.int32))
input_record = schema.Struct(
('indices', indices_record),
('record', record),
)
schema.FeedRecord(
input_record,
[indices, dense, lengths, items, lengths, items_lengths,
items_items])
gathered_record = self.model.GatherRecord(input_record)
self.assertTrue(schema.equal_schemas(gathered_record, record))
self.run_train_net_forward_only()
gathered_dense = workspace.FetchBlob(gathered_record.dense())
np.testing.assert_array_equal(
np.concatenate([dense[i:i + 1] for i in indices]), gathered_dense)
gathered_lengths = workspace.FetchBlob(
gathered_record.sparse.list.lengths())
np.testing.assert_array_equal(
np.concatenate([lengths[i:i + 1] for i in indices]),
gathered_lengths)
gathered_items = workspace.FetchBlob(
gathered_record.sparse.list.items())
offsets = lengths.cumsum() - lengths
np.testing.assert_array_equal(
np.concatenate([
items[offsets[i]: offsets[i] + lengths[i]]
for i in indices
]), gathered_items)
gathered_items_lengths = workspace.FetchBlob(
gathered_record.sparse.list_of_list.items.lengths())
np.testing.assert_array_equal(
np.concatenate([
items_lengths[offsets[i]: offsets[i] + lengths[i]]
for i in indices
]),
gathered_items_lengths
)
nested_offsets = []
nested_lengths = []
nested_offset = 0
j = 0
for l in lengths:
nested_offsets.append(nested_offset)
nested_length = 0
for _i in range(l):
nested_offset += items_lengths[j]
nested_length += items_lengths[j]
j += 1
nested_lengths.append(nested_length)
gathered_items_items = workspace.FetchBlob(
gathered_record.sparse.list_of_list.items.items())
np.testing.assert_array_equal(
np.concatenate([
items_items[nested_offsets[i]:
nested_offsets[i] + nested_lengths[i]]
for i in indices
]),
gathered_items_items
)
def testMapToRange(self):
input_record = self.new_record(schema.Scalar(np.int32))
indices_blob = self.model.MapToRange(input_record,
max_index=100).indices
self.model.output_schema = schema.Struct()
train_init_net, train_net = self.get_training_nets()
schema.FeedRecord(
input_record,
[np.array([10, 3, 20, 99, 15, 11, 3, 11], dtype=np.int32)]
)
workspace.RunNetOnce(train_init_net)
workspace.RunNetOnce(train_net)
indices = workspace.FetchBlob(indices_blob())
np.testing.assert_array_equal(
np.array([1, 2, 3, 4, 5, 6, 2, 6], dtype=np.int32),
indices
)
schema.FeedRecord(
input_record,
[np.array([10, 3, 23, 35, 60, 15, 10, 15], dtype=np.int32)]
)
workspace.RunNetOnce(train_net)
indices = workspace.FetchBlob(indices_blob())
np.testing.assert_array_equal(
np.array([1, 2, 7, 8, 9, 5, 1, 5], dtype=np.int32),
indices
)
eval_net = self.get_eval_net()
schema.FeedRecord(
input_record,
[np.array([10, 3, 23, 35, 60, 15, 200], dtype=np.int32)]
)
workspace.RunNetOnce(eval_net)
indices = workspace.FetchBlob(indices_blob())
np.testing.assert_array_equal(
np.array([1, 2, 7, 8, 9, 5, 0], dtype=np.int32),
indices
)
schema.FeedRecord(
input_record,
[np.array([10, 3, 23, 15, 101, 115], dtype=np.int32)]
)
workspace.RunNetOnce(eval_net)
indices = workspace.FetchBlob(indices_blob())
np.testing.assert_array_equal(
np.array([1, 2, 7, 5, 0, 0], dtype=np.int32),
indices
)
predict_net = self.get_predict_net()
schema.FeedRecord(
input_record,
[np.array([3, 3, 20, 23, 151, 35, 60, 15, 200], dtype=np.int32)]
)
workspace.RunNetOnce(predict_net)
indices = workspace.FetchBlob(indices_blob())
np.testing.assert_array_equal(
np.array([2, 2, 3, 7, 0, 8, 9, 5, 0], dtype=np.int32),
indices
)
def testSelectRecordByContext(self):
float_features = self.model.input_feature_schema.float_features
float_array = np.array([1.0, 2.0], dtype=np.float32)
schema.FeedRecord(float_features, [float_array])
with Tags(Tags.EXCLUDE_FROM_PREDICTION):
log_float_features = self.model.Log(float_features, 1)
joined = self.model.SelectRecordByContext(
schema.Struct(
(InstantiationContext.PREDICTION, float_features),
(InstantiationContext.TRAINING, log_float_features),
# TODO: TRAIN_ONLY layers are also generated in eval
(InstantiationContext.EVAL, log_float_features),
)
)
# model.output_schema has to a struct
self.model.output_schema = schema.Struct((
'joined', joined
))
predict_net = layer_model_instantiator.generate_predict_net(self.model)
workspace.RunNetOnce(predict_net)
predict_output = schema.FetchRecord(predict_net.output_record())
npt.assert_array_equal(float_array,
predict_output['joined']())
eval_net = layer_model_instantiator.generate_eval_net(self.model)
workspace.RunNetOnce(eval_net)
eval_output = schema.FetchRecord(eval_net.output_record())
npt.assert_array_equal(np.log(float_array),
eval_output['joined']())
_, train_net = (
layer_model_instantiator.generate_training_nets_forward_only(
self.model
)
)
workspace.RunNetOnce(train_net)
train_output = schema.FetchRecord(train_net.output_record())
npt.assert_array_equal(np.log(float_array),
train_output['joined']())
def testFunctionalLayer(self):
def normalize(net, in_record, out_record):
mean = net.ReduceFrontMean(in_record(), 1)
net.Sub(
[in_record(), mean],
out_record(),
broadcast=1)
normalized = self.model.Functional(
self.model.input_feature_schema.float_features, 1,
normalize, name="normalizer")
# Attach metadata to one of the outputs and use it in FC
normalized.set_type((np.float32, 32))
self.model.output_schema = self.model.FC(normalized, 2)
predict_net = layer_model_instantiator.generate_predict_net(
self.model)
ops = predict_net.Proto().op
assert len(ops) == 3
assert ops[0].type == "ReduceFrontMean"
assert ops[1].type == "Sub"
assert ops[2].type == "FC"
assert len(ops[0].input) == 1
assert ops[0].input[0] ==\
self.model.input_feature_schema.float_features()
assert len(ops[1].output) == 1
assert ops[1].output[0] in ops[2].input
def testFunctionalLayerHelper(self):
mean = self.model.ReduceFrontMean(
self.model.input_feature_schema.float_features, 1)
normalized = self.model.Sub(
schema.Tuple(
self.model.input_feature_schema.float_features, mean),
1, broadcast=1)
# Attach metadata to one of the outputs and use it in FC
normalized.set_type((np.float32, (32,)))
self.model.output_schema = self.model.FC(normalized, 2)
predict_net = layer_model_instantiator.generate_predict_net(
self.model)
ops = predict_net.Proto().op
assert len(ops) == 3
assert ops[0].type == "ReduceFrontMean"
assert ops[1].type == "Sub"
assert ops[2].type == "FC"
assert len(ops[0].input) == 1
assert ops[0].input[0] ==\
self.model.input_feature_schema.float_features()
assert len(ops[1].output) == 1
assert ops[1].output[0] in ops[2].input
def testFunctionalLayerHelperAutoInference(self):
softsign = self.model.Softsign(
schema.Tuple(self.model.input_feature_schema.float_features),
1)
assert softsign.field_type().base == np.float32
assert softsign.field_type().shape == (32,)
self.model.output_schema = self.model.FC(softsign, 2)
predict_net = layer_model_instantiator.generate_predict_net(
self.model)
ops = predict_net.Proto().op
assert len(ops) == 2
assert ops[0].type == "Softsign"
assert ops[1].type == "FC"
assert len(ops[0].input) == 1
assert ops[0].input[0] ==\
self.model.input_feature_schema.float_features()
assert len(ops[0].output) == 1
assert ops[0].output[0] in ops[1].input
def testFunctionalLayerHelperAutoInferenceScalar(self):
loss = self.model.AveragedLoss(self.model.input_feature_schema, 1)
self.assertEqual(1, len(loss.field_types()))
self.assertEqual(np.float32, loss.field_types()[0].base)
self.assertEqual(tuple(), loss.field_types()[0].shape)
def testFunctionalLayerInputCoercion(self):
one = self.model.global_constants['ONE']
two = self.model.Add([one, one], 1)
self.model.loss = two
self.run_train_net()
data = workspace.FetchBlob(two.field_blobs()[0])
np.testing.assert_array_equal([2.0], data)
def testFunctionalLayerWithOutputNames(self):
k = 3
topk = self.model.TopK(
self.model.input_feature_schema,
output_names_or_num=['values', 'indices'],
k=k,
)
self.assertEqual(2, len(topk.field_types()))
self.assertEqual(np.float32, topk.field_types()[0].base)
self.assertEqual((k,), topk.field_types()[0].shape)
self.assertEqual(np.int32, topk.field_types()[1].base)
self.assertEqual((k,), topk.field_types()[1].shape)
self.assertEqual(['TopK/values', 'TopK/indices'], topk.field_blobs())
def testFunctionalLayerWithOutputDtypes(self):
loss = self.model.AveragedLoss(
self.model.input_feature_schema,
1,
output_dtypes=(np.float32, (1,)),
)
self.assertEqual(1, len(loss.field_types()))
self.assertEqual(np.float32, loss.field_types()[0].base)
self.assertEqual((1,), loss.field_types()[0].shape)
def testPropagateRequestOnly(self):
# test case when output is request only
input_record = self.new_record(schema.Struct(
('input1', schema.Scalar((np.float32, (32, )))),
('input2', schema.Scalar((np.float32, (64, )))),
('input3', schema.Scalar((np.float32, (16, )))),
))
set_request_only(input_record)
concat_output = self.model.Concat(input_record)
self.assertEqual(is_request_only_scalar(concat_output), True)
# test case when output is not request only
input_record2 = self.new_record(schema.Struct(
('input4', schema.Scalar((np.float32, (100, ))))
)) + input_record
concat_output2 = self.model.Concat(input_record2)
self.assertEqual(is_request_only_scalar(concat_output2), False)
def testSetRequestOnly(self):
input_record = schema.Scalar(np.int64)
schema.attach_metadata_to_scalars(
input_record,
schema.Metadata(
categorical_limit=100000000,
expected_value=99,
feature_specs=schema.FeatureSpec(
feature_ids=[1, 100, 1001]
)
)
)
set_request_only(input_record)
self.assertEqual(input_record.metadata.categorical_limit, 100000000)
self.assertEqual(input_record.metadata.expected_value, 99)
self.assertEqual(
input_record.metadata.feature_specs.feature_ids,
[1, 100, 1001]
)
@given(
X=hu.arrays(dims=[5, 5]), # Shape of X is irrelevant
)
def testDropout(self, X):
input_record = self.new_record(schema.Scalar((np.float32, (1,))))
schema.FeedRecord(input_record, [X])
d_output = self.model.Dropout(input_record)
self.assertEqual(schema.Scalar((np.float32, (1,))), d_output)
self.model.output_schema = schema.Struct()
train_init_net, train_net = self.get_training_nets()
input_blob = input_record.field_blobs()[0]
output_blob = d_output.field_blobs()[0]
train_d_spec = OpSpec(
"Dropout",
[input_blob],
[output_blob, None],
{'is_test': 0, 'ratio': 0.5}
)
test_d_spec = OpSpec(
"Dropout",
[input_blob],
[output_blob, None],
{'is_test': 1, 'ratio': 0.5}
)
self.assertNetContainOps(
train_net,
[train_d_spec]
)
eval_net = self.get_eval_net()
self.assertNetContainOps(
eval_net,
[test_d_spec]
)
predict_net = self.get_predict_net()
self.assertNetContainOps(
predict_net,
[test_d_spec]
)
workspace.RunNetOnce(train_init_net)
workspace.RunNetOnce(train_net)
schema.FeedRecord(input_record, [X])
workspace.RunNetOnce(eval_net)
schema.FeedRecord(input_record, [X])
workspace.RunNetOnce(predict_net)
@given(
batch_size=st.integers(min_value=2, max_value=10),
input_dims=st.integers(min_value=5, max_value=10),
output_dims=st.integers(min_value=5, max_value=10),
bandwidth=st.floats(min_value=0.1, max_value=5),
)
def testRandomFourierFeatures(self, batch_size, input_dims, output_dims, bandwidth):
X = np.random.random((batch_size, input_dims)).astype(np.float32)
scale = np.sqrt(2.0 / output_dims)
input_record = self.new_record(schema.Scalar((np.float32, (input_dims,))))
schema.FeedRecord(input_record, [X])
input_blob = input_record.field_blobs()[0]
rff_output = self.model.RandomFourierFeatures(input_record,
output_dims,
bandwidth)
self.model.output_schema = schema.Struct()
self.assertEqual(
schema.Scalar((np.float32, (output_dims, ))),
rff_output
)
train_init_net, train_net = self.get_training_nets()
# Init net assertions
init_ops_list = [
OpSpec("GaussianFill", None, None),
OpSpec("UniformFill", None, None),
]
init_ops = self._test_net(train_init_net, init_ops_list)
W = workspace.FetchBlob(self.model.layers[0].w)
b = workspace.FetchBlob(self.model.layers[0].b)
# Operation specifications
fc_spec = OpSpec("FC", [input_blob, init_ops[0].output[0],
init_ops[1].output[0]], None)
cosine_spec = OpSpec("Cos", None, None)
scale_spec = OpSpec("Scale", None, rff_output.field_blobs(),
{'scale': scale})
ops_list = [
fc_spec,
cosine_spec,
scale_spec
]
# Train net assertions
self._test_net(train_net, ops_list)
self._rff_hypothesis_test(rff_output(), X, W, b, scale)
# Eval net assertions
eval_net = self.get_eval_net()
self._test_net(eval_net, ops_list)
self._rff_hypothesis_test(rff_output(), X, W, b, scale)
# Predict net assertions
predict_net = self.get_predict_net()
self._test_net(predict_net, ops_list)
self._rff_hypothesis_test(rff_output(), X, W, b, scale)
def _rff_hypothesis_test(self, rff_output, X, W, b, scale):
"""
Runs hypothesis test for Semi Random Features layer.
Inputs:
rff_output -- output of net after running random fourier features layer
X -- input data
W -- weight parameter from train_init_net
b -- bias parameter from train_init_net
scale -- value by which to scale the output vector
"""
output = workspace.FetchBlob(rff_output)
output_ref = scale * np.cos(np.dot(X, np.transpose(W)) + b)
npt.assert_allclose(output, output_ref, rtol=1e-4)
@given(
batch_size=st.integers(min_value=2, max_value=10),
input_dims=st.integers(min_value=5, max_value=10),
output_dims=st.integers(min_value=5, max_value=10),
s=st.integers(min_value=0, max_value=3),
scale=st.floats(min_value=0.1, max_value=5)
)
def testArcCosineFeatureMap(self, batch_size, input_dims, output_dims, s, scale):
X = np.random.normal(size=(batch_size, input_dims)).astype(np.float32)
input_record = self.new_record(schema.Scalar((np.float32, (input_dims,))))
schema.FeedRecord(input_record, [X])
input_blob = input_record.field_blobs()[0]
ac_output = self.model.ArcCosineFeatureMap(input_record,
output_dims,
s=s,
scale=scale)
self.model.output_schema = schema.Struct()
self.assertEqual(
schema.Scalar((np.float32, (output_dims, ))),
ac_output
)
init_ops_list = [
OpSpec("GaussianFill", None, None),
OpSpec("UniformFill", None, None),
]
train_init_net, train_net = self.get_training_nets()
# Init net assertions
init_ops = self._test_net(train_init_net, init_ops_list)
workspace.RunNetOnce(self.model.param_init_net)
W = workspace.FetchBlob(self.model.layers[0].random_w)
b = workspace.FetchBlob(self.model.layers[0].random_b)
# Operation specifications
fc_spec = OpSpec("FC", [input_blob, init_ops[0].output[0],
init_ops[1].output[0]], None)
gt_spec = OpSpec("GT", None, None, {'broadcast': 1})
cast_spec = OpSpec("Cast", None, ac_output.field_blobs())
relu_spec = OpSpec("Relu", None, None)
relu_spec_output = OpSpec("Relu", None, ac_output.field_blobs())
pow_spec = OpSpec("Pow", None, None, {'exponent': float(s - 1)})
mul_spec = OpSpec("Mul", None, ac_output.field_blobs())
if s == 0:
ops_list = [
fc_spec,
gt_spec,
cast_spec,
]
elif s == 1:
ops_list = [
fc_spec,
relu_spec_output,
]
else:
ops_list = [
fc_spec,
relu_spec,
pow_spec,
mul_spec,
]
# Train net assertions
self._test_net(train_net, ops_list)
self._arc_cosine_hypothesis_test(ac_output(), X, W, b, s)
# Eval net assertions
eval_net = self.get_eval_net()
self._test_net(eval_net, ops_list)
self._arc_cosine_hypothesis_test(ac_output(), X, W, b, s)
# Predict net assertions
predict_net = self.get_predict_net()
self._test_net(predict_net, ops_list)
self._arc_cosine_hypothesis_test(ac_output(), X, W, b, s)
def _arc_cosine_hypothesis_test(self, ac_output, X, W, b, s):
"""
Runs hypothesis test for Arc Cosine layer.
Inputs:
ac_output -- output of net after running arc cosine layer
X -- input data
W -- weight parameter from train_init_net
b -- bias parameter from train_init_net
s -- degree parameter
"""
# Get output from net
net_output = workspace.FetchBlob(ac_output)
# Computing output directly
x_rand = np.matmul(X, np.transpose(W)) + b
x_pow = np.power(x_rand, s)
h_rand_features = np.piecewise(x_rand, [x_rand <= 0, x_rand > 0], [0, 1])
output_ref = np.multiply(x_pow, h_rand_features)
# Comparing net output and computed output
npt.assert_allclose(net_output, output_ref, rtol=1e-04)
| [
"[email protected]"
] | |
75f729bc861f9e09e111d25926334f32e7145824 | 099deeb2c308bdc00a2c423743e4b2aacdac866c | /week1/searching_lyrics/youngyun.py | ba410784e8f79a376a70247b82107a7c61ea8364 | [] | no_license | Joonsun-Hwang/coding-test-study-lamda | 76fed2f18a3220f6731775984425dff49b4379eb | 0632ec9dd60024203ed10ebeab07aa7da4782806 | refs/heads/main | 2023-05-01T21:31:48.174944 | 2021-05-05T09:48:23 | 2021-05-05T09:48:23 | 329,205,708 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,163 | py | # 지나치게 코드가 길며 시간복잡도도 상당히 좋지 않음 글자를 하나하나 맞춰보는 경우 O(n^3)인 경우도 생성됨
def solution(words, queries):
answer = []
print(words)
for query in queries:
match = 0
for word in words:
flag = True
j = 0
i = 0
if len(word) != len(query): # 애초에 길이가 다르면 비교 가치 없음
continue
else:
if query[0] == "?": # 처음엔 ?
while query[i] == "?": # 언제까지 ? 인가?
i += 1
for idx in range(i, len(query)):
if query[idx] != word[idx]:
flag = False
if flag == True:
match += 1
else: # 처음엔 문자
while query[i] != "?":
i += 1
while word[j] == query[j]:
j += 1
if i == j:
match += 1
answer.append(match)
return answer
| [
"[email protected]"
] | |
e51ebc17d32c53b6a368d0019d094b269df2bf23 | e44d77c4d22a9cd0aa0a536d6d4e522359eedc81 | /trialscompendium/trials/api/urls.py | eb740a50c49131d2158a35228430de7d7ed74501 | [
"MIT"
] | permissive | nkoech/trialscompendium | cfd3f65a88a32e3f71bc27cba6a4ae777e319af4 | 9cd2c28a22957b84d97d87eb6b7b9b1b8616bacb | refs/heads/master | 2021-01-01T04:54:18.162744 | 2018-04-17T15:40:05 | 2018-04-17T15:40:05 | 97,270,861 | 2 | 2 | MIT | 2018-02-15T09:48:23 | 2017-07-14T20:36:19 | Python | UTF-8 | Python | false | false | 1,414 | py | from django.conf.urls import url
from .views import (
plot_views,
trial_yield_views,
treatment_views,
)
# Treatment URLs
urlpatterns = [
url(
r'^treatment/$',
treatment_views['TreatmentListAPIView'].as_view(),
name='treatment_list'
),
url(
r'^treatment/create/$',
treatment_views['TreatmentCreateAPIView'].as_view(),
name='treatment_create'
),
url(
r'^treatment/(?P<pk>[\w-]+)/$',
treatment_views['TreatmentDetailAPIView'].as_view(),
name='treatment_detail'
),
]
# Trial Yield URLs
urlpatterns += [
url(
r'^yield/$',
trial_yield_views['TrialYieldListAPIView'].as_view(),
name='trial_yield_list'
),
url(
r'^yield/create/$',
trial_yield_views['TrialYieldCreateAPIView'].as_view(),
name='trial_yield_create'
),
url(
r'^yield/(?P<pk>[\w-]+)/$',
trial_yield_views['TrialYieldDetailAPIView'].as_view(),
name='trial_yield_detail'
),
]
# Plot URLs
urlpatterns += [
url(
r'^$',
plot_views['PlotListAPIView'].as_view(),
name='plot_list'
),
url(
r'^create/$',
plot_views['PlotCreateAPIView'].as_view(),
name='plot_create'
),
url(
r'^(?P<slug>[\w-]+)/$',
plot_views['PlotDetailAPIView'].as_view(),
name='plot_detail'
),
]
| [
"[email protected]"
] | |
aec98a6c7f1944baa6fd27a6ecf9f0ca61fb45e8 | cfa632132cd29a0b58e7f45b441ea4f62b0f5eba | /flytekit/models/admin/workflow.py | 91e82252b40cf9f189066fa756e314dbf9e671ad | [
"Apache-2.0"
] | permissive | chixcode/flytekit | 5b4f2e687e82a0d6527411afcdaf0929a94adb13 | f901aee721847c6264d44079d4fa31a75b8876e1 | refs/heads/master | 2020-08-24T00:06:02.808187 | 2019-10-14T18:34:19 | 2019-10-14T18:34:19 | 216,729,272 | 1 | 0 | Apache-2.0 | 2019-10-22T05:22:01 | 2019-10-22T05:22:00 | null | UTF-8 | Python | false | false | 3,275 | py | from __future__ import absolute_import
from flytekit.models import common as _common
from flytekit.models.core import compiler as _compiler_models, identifier as _identifier
from flyteidl.admin import workflow_pb2 as _admin_workflow
class WorkflowSpec(_common.FlyteIdlEntity):
def __init__(self, template):
"""
This object fully encapsulates the specification of a workflow
:param flytekit.models.core.workflow.WorkflowTemplate template:
"""
self._template = template
@property
def template(self):
"""
:rtype: flytekit.models.core.workflow.WorkflowTemplate.WorkflowTemplate
"""
return self._template
def to_flyte_idl(self):
"""
:rtype: flyteidl.admin.workflow_pb2.WorkflowSpec
"""
return _admin_workflow.WorkflowSpec(
template=self._template.to_flyte_idl()
)
@classmethod
def from_flyte_idl(cls, pb2_object):
"""
:param pb2_object: flyteidl.admin.workflow_pb2.WorkflowSpec
:rtype: WorkflowSpec
"""
return cls(WorkflowSpec.from_flyte_idl(pb2_object.template))
class Workflow(_common.FlyteIdlEntity):
def __init__(
self,
id,
closure
):
"""
:param flytekit.models.core.identifier.Identifier id:
:param WorkflowClosure closure:
"""
self._id = id
self._closure = closure
@property
def id(self):
"""
:rtype: flytekit.models.core.identifier.Identifier
"""
return self._id
@property
def closure(self):
"""
:rtype: WorkflowClosure
"""
return self._closure
def to_flyte_idl(self):
"""
:rtype: flyteidl.admin.workflow_pb2.Workflow
"""
return _admin_workflow.Workflow(
id=self.id.to_flyte_idl(),
closure=self.closure.to_flyte_idl()
)
@classmethod
def from_flyte_idl(cls, pb2_object):
"""
:param flyteidl.admin.workflow_pb2.Workflow pb2_object:
:return: Workflow
"""
return cls(
id=_identifier.Identifier.from_flyte_idl(pb2_object.id),
closure=WorkflowClosure.from_flyte_idl(pb2_object.closure)
)
class WorkflowClosure(_common.FlyteIdlEntity):
def __init__(self, compiled_workflow):
"""
:param flytekit.models.core.compiler.CompiledWorkflowClosure compiled_workflow:
"""
self._compiled_workflow = compiled_workflow
@property
def compiled_workflow(self):
"""
:rtype: flytekit.models.core.compiler.CompiledWorkflowClosure
"""
return self._compiled_workflow
def to_flyte_idl(self):
"""
:rtype: flyteidl.admin.workflow_pb2.WorkflowClosure
"""
return _admin_workflow.WorkflowClosure(
compiled_workflow=self.compiled_workflow.to_flyte_idl()
)
@classmethod
def from_flyte_idl(cls, p):
"""
:param flyteidl.admin.workflow_pb2.WorkflowClosure p:
:rtype: WorkflowClosure
"""
return cls(
compiled_workflow=_compiler_models.CompiledWorkflowClosure.from_flyte_idl(p.compiled_workflow)
)
| [
"[email protected]"
] | |
b3afe55fffeda671f0363abb51fa4c1e66106c94 | 79ed3f72555aad8548634f523f775f34cfe166e7 | /catch/datasets/hag1.py | fdae6eb661d5e4cfaa9398d0e382898e7a03c753 | [
"MIT"
] | permissive | John-Bioinfo/catch | a2ab188ed598767e7759f74227f24af2b284b379 | fe63b86bc41396c1da0b449ac440c6ae9e52b2c5 | refs/heads/master | 2020-03-18T09:29:10.315733 | 2018-04-17T18:36:47 | 2018-04-17T18:36:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 451 | py | """Dataset with 'Human associated gemyvongvirus 1' sequences.
A dataset with 2 'Human associated gemyvongvirus 1' genomes.
THIS PYTHON FILE WAS GENERATED BY A COMPUTER PROGRAM! DO NOT EDIT!
"""
import sys
from catch.datasets import GenomesDatasetSingleChrom
__author__ = 'Hayden Metsky <[email protected]>'
ds = GenomesDatasetSingleChrom(__name__, __file__, __spec__)
ds.add_fasta_path("data/hag1.fasta", relative=True)
sys.modules[__name__] = ds
| [
"[email protected]"
] | |
1a9adb679879e2543ec9b2ab25ece1e16b986fb3 | d569476dd95496339c34b231621ff1f5dfd7fe49 | /PyTest/SampleWebsite/tests/Pages/FeedbackForm.py | 9cd2c00d4dbc8431b3b190dfb5b56ffb0c4b290f | [] | no_license | monteua/Tests | 10f21f9bae027ce1763c73e2ea7edaf436140eae | 553e5f644466683046ea180422727ccb37967b98 | refs/heads/master | 2021-01-23T10:28:49.654273 | 2018-05-09T09:11:30 | 2018-05-09T09:11:30 | 93,061,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,914 | py | from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
from selenium.common.exceptions import NoSuchElementException
#page URL
base_url = "CLASSIFIED"
# page locators
page_logo = "logo"
company_moto = "//*[@class='col-xs-8 align-center']//p"
company_name = "//*[@class='col-xs-8 align-center']//h1"
mailing_list = "//*[@class='pointer']//span"
mailing_list_popup = "modal-content"
mailing_list_headline = "myModalLabel"
class FeedbackForm(object):
def __init__(self, driver):
self.driver = driver
def open_page(self):
self.driver.get(base_url)
# checking the position of the company logo
def check_logo(self):
logo = self.driver.find_element(By.CLASS_NAME, page_logo)
logo_position = [logo.value_of_css_property("text-align"), logo.value_of_css_property("padding-left")]
return logo_position
# checking the css attributes of company moto
def check_moto(self):
moto = self.driver.find_element(By.XPATH, company_moto)
font_size = moto.value_of_css_property("font-size")
font_name = moto.value_of_css_property("font-family").split(",")[0]
font_style = moto.value_of_css_property("font-style")
text_centered = moto.value_of_css_property("text-align")
return [font_size, font_name, font_style, text_centered]
# checking the css attributes of company name
def check_company_name(self):
name = self.driver.find_element(By.XPATH, company_name)
font_size = name.value_of_css_property("font-size")
font_name = name.value_of_css_property("font-family").split(",")[0]
text_centered = name.value_of_css_property("text-align")
return [font_size, font_name, text_centered]
# checking the css attributes of mailing list button
def check_attr_of_mailing_list(self):
mailing_list_btn = self.driver.find_element(By.XPATH, mailing_list)
btn_text = mailing_list_btn.text
font_size = mailing_list_btn.value_of_css_property("font-size")
font_name = mailing_list_btn.value_of_css_property("font-family").split(",")[0]
#move the mouse pointer to see if text turns yellow
start_color = mailing_list_btn.value_of_css_property("color")
ActionChains(self.driver).move_to_element(mailing_list_btn).perform()
color_changed = mailing_list_btn.value_of_css_property("color")
return [btn_text, font_size, font_name, start_color, color_changed]
# checking if the Mailing List button opens pop-up
def check_mailing_list_popup(self):
mailing_list_btn = self.driver.find_element(By.XPATH, mailing_list)
mailing_list_btn.click()
try:
pop_up = self.driver.find_element(By.CLASS_NAME, mailing_list_popup)
if pop_up.is_displayed:
return True
else:
return False
except NoSuchElementException:
return False
# checking if the headline in mailing list matches desired one
def check_mailing_list_headline(self):
mailing_list_btn = self.driver.find_element(By.XPATH, mailing_list)
mailing_list_btn.click()
return self.driver.find_element(By.ID, mailing_list_headline)
# checking the attributes ofthe mailing list headline
def check_attr_of_mailing_list_headline(self):
mailing_list_btn = self.driver.find_element(By.XPATH, mailing_list)
mailing_list_btn.click()
headline = self.driver.find_element(By.ID, mailing_list_headline)
size = headline.value_of_css_property("font-size")
font_family = headline.value_of_css_property("font-family").split(",")[0]
is_centered = headline.value_of_css_property("text-align")
if is_centered == "center":
return [size, font_family, True]
return [size, font_family, False]
| [
"[email protected]"
] | |
0f4aaf1913e904c2aa767b8653cd38ce34ff30bf | 18eef6419da5721139df45b92c7557dbfa654347 | /apps/users/migrations/0004_auto_20180529_2258.py | 40546c416a804fa94b82a01a3ea68690b306a82b | [] | no_license | taoing/pymx | a36989f805c760b535954a0056dcbd8180b32ea3 | 3f24b8d417f1c1896cab73c623ab9774807f520d | refs/heads/master | 2020-03-18T20:58:37.435126 | 2018-06-09T15:16:49 | 2018-06-09T15:16:49 | 135,250,436 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 583 | py | # Generated by Django 2.0.3 on 2018-05-29 22:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0003_auto_20180529_1658'),
]
operations = [
migrations.AlterField(
model_name='user',
name='address',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AlterField(
model_name='user',
name='nickname',
field=models.CharField(blank=True, max_length=50, null=True),
),
]
| [
"="
] | = |
762bbd897e6f2e90c32606aa1aa9c01022761f67 | 27a4eaa9b7052d3d65dd6c63952ac3055c73fbf0 | /ztst/tweet_100th.py | 1d0d4b8246f3b8f2602b011ce8729e61c5b6ef7b | [] | no_license | tobby2002/vec4ir | 84f4695dc0a58f6cca6aff3c970167893a5d8c68 | 8acd1e91cd9eb0a8625bdf18cc3c37d2b7cc2a44 | refs/heads/master | 2021-06-19T20:42:15.913189 | 2021-03-21T10:31:35 | 2021-03-21T10:31:35 | 190,150,202 | 0 | 0 | null | 2019-06-04T07:19:09 | 2019-06-04T07:19:09 | null | UTF-8 | Python | false | false | 1,135 | py | from sklearn.feature_extraction.text import TfidfVectorizer
words = ['the cat sat on the mat cat', 'the fat rat sat on a mat', 'the bat and a rat sat on a mat']
tfidf_vectorizer = TfidfVectorizer(min_df=1, use_idf=True)
tfidf_matrix = tfidf_vectorizer.fit_transform(words)
terms_name = tfidf_vectorizer.get_feature_names()
toarry=tfidf_matrix.todense()
for i in tfidf_matrix.toarray():
print(zip(terms_name, i))
print('i:', i)
# [(u'and', 0.0), (u'bat', 0.0), (u'cat', 0.78800079617844954), (u'fat', 0.0), (u'mat', 0.23270298212286766), (u'on', 0.23270298212286766), (u'rat', 0.0), (u'sat', 0.23270298212286766), (u'the', 0.46540596424573533)]
# [(u'and', 0.0), (u'bat', 0.0), (u'cat', 0.0), (u'fat', 0.57989687146162439), (u'mat', 0.34249643393071422), (u'on', 0.34249643393071422), (u'rat', 0.44102651785124652), (u'sat', 0.34249643393071422), (u'the', 0.34249643393071422)]
# [(u'and', 0.50165133177159349), (u'bat', 0.50165133177159349), (u'cat', 0.0), (u'fat', 0.0), (u'mat', 0.29628335772067432), (u'on', 0.29628335772067432), (u'rat', 0.38151876810273028), (u'sat', 0.29628335772067432), (u'the', 0.29628335772067432)]
| [
"tobby2002@gmailcom"
] | tobby2002@gmailcom |
7f4f6dc4aceff10b6bbf67daff605692bd017395 | cd08fe6948d6b743419cc5904764374dfaa7a0e2 | /apps/accounts/migrations/0001_initial.py | a140f0102164820ce482bef0d4416ca27406e347 | [] | no_license | divisible-by-hero/central-station | 2af493097be75a3c0299d9a10e76fafa8896cb66 | f6bc8a9cb9a502ffc49164187ebdcde2b51619c5 | refs/heads/develop | 2021-01-20T02:20:39.027229 | 2014-01-29T20:11:12 | 2014-01-29T20:11:12 | 955,878 | 5 | 1 | null | 2014-01-29T20:11:12 | 2010-10-02T03:08:57 | Python | UTF-8 | Python | false | false | 11,106 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Account'
db.create_table('accounts_account', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('company_name', self.gf('django.db.models.fields.CharField')(max_length=250, null=True, blank=True)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=50)),
('is_active', self.gf('django.db.models.fields.BooleanField')(default=False)),
('is_deleted', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('accounts', ['Account'])
# Adding model 'Team'
db.create_table('accounts_team', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('deleted', self.gf('django.db.models.fields.BooleanField')(default=False)),
('deleted_date', self.gf('django.db.models.fields.DateField')(null=True, blank=True)),
('created_date', self.gf('django.db.models.fields.DateField')(null=True, blank=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=250, null=True, blank=True)),
('organization', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['accounts.Account'])),
))
db.send_create_signal('accounts', ['Team'])
# Adding model 'UserProfile'
db.create_table('accounts_userprofile', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('deleted', self.gf('django.db.models.fields.BooleanField')(default=False)),
('deleted_date', self.gf('django.db.models.fields.DateField')(null=True, blank=True)),
('created_date', self.gf('django.db.models.fields.DateField')(null=True, blank=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
))
db.send_create_signal('accounts', ['UserProfile'])
# Adding M2M table for field teams on 'UserProfile'
db.create_table('accounts_userprofile_teams', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('userprofile', models.ForeignKey(orm['accounts.userprofile'], null=False)),
('team', models.ForeignKey(orm['accounts.team'], null=False))
))
db.create_unique('accounts_userprofile_teams', ['userprofile_id', 'team_id'])
# Adding model 'Role'
db.create_table('accounts_role', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('deleted', self.gf('django.db.models.fields.BooleanField')(default=False)),
('deleted_date', self.gf('django.db.models.fields.DateField')(null=True, blank=True)),
('created_date', self.gf('django.db.models.fields.DateField')(null=True, blank=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=250)),
))
db.send_create_signal('accounts', ['Role'])
# Adding model 'RoleAssigned'
db.create_table('accounts_roleassigned', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('deleted', self.gf('django.db.models.fields.BooleanField')(default=False)),
('deleted_date', self.gf('django.db.models.fields.DateField')(null=True, blank=True)),
('created_date', self.gf('django.db.models.fields.DateField')(null=True, blank=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('team', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['accounts.Team'])),
('role', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['accounts.Role'])),
))
db.send_create_signal('accounts', ['RoleAssigned'])
def backwards(self, orm):
# Deleting model 'Account'
db.delete_table('accounts_account')
# Deleting model 'Team'
db.delete_table('accounts_team')
# Deleting model 'UserProfile'
db.delete_table('accounts_userprofile')
# Removing M2M table for field teams on 'UserProfile'
db.delete_table('accounts_userprofile_teams')
# Deleting model 'Role'
db.delete_table('accounts_role')
# Deleting model 'RoleAssigned'
db.delete_table('accounts_roleassigned')
models = {
'accounts.account': {
'Meta': {'object_name': 'Account'},
'company_name': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
},
'accounts.role': {
'Meta': {'object_name': 'Role'},
'created_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
'accounts.roleassigned': {
'Meta': {'object_name': 'RoleAssigned'},
'created_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['accounts.Role']"}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['accounts.Team']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'accounts.team': {
'Meta': {'object_name': 'Team'},
'created_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['accounts.Account']"})
},
'accounts.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'created_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['accounts.Team']", 'symmetrical': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['accounts'] | [
"[email protected]"
] | |
f0ac9ae13d2122d7c283b67e7d26a9210686fa38 | b08d42933ac06045905d7c005ca9c114ed3aecc0 | /src/coefSubset/evaluate/ranks/fiftyPercent/rank_4q5e_G.py | afe9e17d1c84d179371c4fcf47787fee1df0ea11 | [] | no_license | TanemuraKiyoto/PPI-native-detection-via-LR | d148d53f5eb60a4dda5318b371a3048e3f662725 | 897e7188b0da94e87126a4acc0c9a6ff44a64574 | refs/heads/master | 2022-12-05T11:59:01.014309 | 2020-08-10T00:41:17 | 2020-08-10T00:41:17 | 225,272,083 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,390 | py | # 9 July 2019
# Kiyoto Aramis Tanemura
# Several metrics are used to assess the performance of the trained RF model, notably native ranking. This script returns a ranking of the native protein-protein complex among a decoy set. For convenience, I will define as a function and will call in a general performance assessment script.
# Modified 11 July 2019 by Kiyoto Aramis Tanemura. To parallelize the process, I will replace the for loop for the testFileList to a multiprocessing pool.
# Modified 9 September 2019 by Kiyoto Aramis Tanemura. I will use the function to perform the calculation on one CSV file only. Thus instead of a function to import in other scripts, they will be individual jobs parallelized as individual jobs in the queue.
import os
import pandas as pd
import numpy as np
import pickle
os.chdir('/mnt/scratch/tanemur1/')
# Read the model and trainFile
testFile = '4q5e.csv'
identifier = 'G'
coefFrac = 0.5
testFilePath = '/mnt/scratch/tanemur1/CASF-PPI/nonb_descriptors/complete/'
modelPath = '/mnt/home/tanemur1/6May2019/2019-11-11/results/coefSubset/fiftyPercent/'
outputPath = '/mnt/home/tanemur1/6May2019/2019-11-11/results/coefSubset/evaluate/fiftyPercent/ranks/'
pdbID = testFile[:4]
with open(modelPath + 'model' + identifier + '.pkl', 'rb') as f:
clf = pickle.load(f)
result = pd.DataFrame()
scoreList = []
df1 = pd.read_csv(testFilePath + testFile)
dropList = ['Unnamed: 0', 'Unnamed: 0.1', 'ref']
df1 = df1.drop(dropList, axis = 1)
df1 = df1.set_index('Pair_name')
df1 = pd.DataFrame(df1.values.T, columns = df1.index, index = df1.columns)
df1.fillna(0.0, inplace = True)
#df1 = df1.reindex(sorted(df1.columns), axis = 1)
# Keep coefficients within the given fraction when ordered by decreasing order of coefficient magnitude
coefs = pd.read_csv('/mnt/home/tanemur1/6May2019/2019-11-11/results/medianCoefs.csv', index_col = 0, header = None, names = ['coefficients'])
coefs['absVal'] = np.abs(coefs['coefficients'])
coefs.sort_values(by = 'absVal', ascending = False, inplace = True)
coefs = coefs[:int(14028 * coefFrac + 0.5)]
keepList = list(coefs.index)
del coefs
df1 = df1[keepList]
df1 = df1.reindex(sorted(df1.columns), axis = 1)
with open(modelPath + 'standardScaler' + identifier + '.pkl', 'rb') as g:
scaler = pickle.load(g)
for i in range(len(df1)):
# subtract from one row each row of the dataframe, then remove the trivial row[[i]] - row[[i]]. Also some input files have 'class' column. This is erroneous and is removed.
df2 = pd.DataFrame(df1.iloc[[i]].values - df1.values, index = df1.index, columns = df1.columns)
df2 = df2.drop(df1.iloc[[i]].index[0], axis = 0)
# Standardize inut DF using the standard scaler used for training data.
df2 = scaler.transform(df2)
# Predict class of each comparison descriptor and sum the classes to obtain score. Higher score corresponds to more native-like complex
predictions = clf.predict(df2)
score = sum(predictions)
scoreList.append(score)
# Make a new DataFrame to store the score and corresponding descriptorID. Add rank as column. Note: lower rank corresponds to more native-like complex
result = pd.DataFrame(data = {'score': scoreList}, index = df1.index.tolist()).sort_values(by = 'score', ascending = False)
result['rank'] = range(1, len(result) + 1)
with open(outputPath + pdbID + identifier + '.csv', 'w') as h:
result.to_csv(h)
| [
"[email protected]"
] | |
396e5b33aa1c7bb304b0785bb82b1573c352d6c1 | 5598c24e484177d6a5b0772013247e3118c6dcf1 | /spatialmath/base/__init__.py | 8546e1251ff85aa70bc62e59f7205d645f82cc31 | [
"MIT"
] | permissive | krenshaw2018/spatialmath-python | 587136040267984cd6a431eaae3e89c8d740a7f9 | 95629b378422d58a4c77c62ebf3b189ef7a42824 | refs/heads/master | 2023-03-28T21:14:03.992328 | 2021-04-06T10:36:53 | 2021-04-06T10:36:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,058 | py | # Part of Spatial Math Toolbox for Python
# Copyright (c) 2000 Peter Corke
# MIT Licence, see details in top-level file: LICENCE
from spatialmath.base.argcheck import * # lgtm [py/polluting-import]
from spatialmath.base.quaternions import * # lgtm [py/polluting-import]
from spatialmath.base.transforms2d import * # lgtm [py/polluting-import]
from spatialmath.base.transforms3d import * # lgtm [py/polluting-import]
from spatialmath.base.transformsNd import * # lgtm [py/polluting-import]
from spatialmath.base.vectors import * # lgtm [py/polluting-import]
from spatialmath.base.symbolic import * # lgtm [py/polluting-import]
from spatialmath.base.animate import * # lgtm [py/polluting-import]
from spatialmath.base.graphics import * # lgtm [py/polluting-import]
from spatialmath.base.numeric import * # lgtm [py/polluting-import]
__all__ = [
# spatialmath.base.argcheck
'assertmatrix',
'ismatrix',
'getvector',
'assertvector',
'isvector',
'isscalar',
'getunit',
'isnumberlist',
'isvectorlist',
# spatialmath.base.quaternions
'pure',
'qnorm',
'unit',
'isunit',
'isequal',
'q2v',
'v2q',
'qqmul',
'inner',
'qvmul',
'vvmul',
'qpow',
'conj',
'q2r',
'r2q',
'slerp',
'rand',
'matrix',
'dot',
'dotb',
'angle',
'qprint',
# spatialmath.base.transforms2d
'rot2',
'trot2',
'transl2',
'ishom2',
'isrot2',
'trlog2',
'trexp2',
'tr2jac2',
'trinterp2',
'trprint2',
'trplot2',
'tranimate2',
'xyt2tr',
'tr2xyt',
'trinv2',
# spatialmath.base.transforms3d
'rotx',
'roty',
'rotz',
'trotx',
'troty',
'trotz',
'transl',
'ishom',
'isrot',
'rpy2r',
'rpy2tr',
'eul2r',
'eul2tr',
'angvec2r',
'angvec2tr',
'oa2r',
'oa2tr',
'tr2angvec',
'tr2eul',
'tr2rpy',
'trlog',
'trexp',
'trnorm',
'trinterp',
'delta2tr',
'trinv',
'tr2delta',
'tr2jac',
'rpy2jac',
'eul2jac',
'exp2jac',
'rot2jac',
'trprint',
'trplot',
'tranimate',
# spatialmath.base.transformsNd
't2r',
'r2t',
'tr2rt',
'rt2tr',
'Ab2M',
'isR',
'isskew',
'isskewa',
'iseye',
'skew',
'vex',
'skewa',
'vexa',
'h2e',
'e2h',
'homtrans',
'rodrigues',
# spatialmath.base.vectors
'colvec',
'unitvec',
'unitvec_norm',
'norm',
'normsq',
'isunitvec',
'iszerovec',
'isunittwist',
'isunittwist2',
'unittwist',
'unittwist_norm',
'unittwist2',
'angdiff',
'removesmall',
'cross',
'iszero',
# spatialmath.base.animate
'Animate',
'Animate2',
#spatial.base.graphics
'plotvol2',
'plotvol3',
'plot_point',
'plot_text',
'plot_box',
'circle',
'ellipse',
'sphere',
'ellipsoid',
'plot_circle',
'plot_ellipse',
'plot_sphere',
'plot_ellipsoid',
'isnotebook',
# spatial.base.numeric
'numjac',
]
| [
"[email protected]"
] | |
482a8b9237a2c8bf0e2469c9062327966a46c836 | ccc407c900ac36c57f2716adcdd28f38108d62ef | /models.py | a2cb7960d1b4b75f3ca645404af912b5cd09e2ce | [] | no_license | uditduhan23/YT_FastAPI_Celery_Redis_Flower_Introduction | d1b3f2f361ae0dc11be39eded78ccde45724b45e | 124b35e3af08330cd89f3658906ec54b2f623b46 | refs/heads/main | 2023-06-16T23:30:36.456816 | 2021-07-16T14:42:16 | 2021-07-16T14:42:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 561 | py | from sqlalchemy import Column, DateTime, Integer, String
from sqlalchemy.sql import func
from db_conf import Base
class Post(Base):
__tablename__ = "post"
id = Column(Integer, primary_key=True, index=True)
title = Column(String)
author = Column(String)
content = Column(String)
time_created = Column(DateTime(timezone=True), server_default=func.now())
class User(Base):
__tablename__ = "user"
id = Column(Integer, primary_key=True, index=True)
username = Column(String, unique=True)
password = Column(String(255))
| [
"[email protected]"
] | |
9eb3b5e4e4cdd96a839e5c24b404869c18d5e9ee | 480e33f95eec2e471c563d4c0661784c92396368 | /GeneratorInterface/GenFilters/test/test_isotrack_cfg.py | af5d058a84dc0c93779710cc9bfda12f465b9313 | [
"Apache-2.0"
] | permissive | cms-nanoAOD/cmssw | 4d836e5b76ae5075c232de5e062d286e2026e8bd | 4eccb8a758b605875003124dd55ea58552b86af1 | refs/heads/master-cmsswmaster | 2021-01-23T21:19:52.295420 | 2020-08-27T08:01:20 | 2020-08-27T08:01:20 | 102,867,729 | 7 | 14 | Apache-2.0 | 2022-05-23T07:58:09 | 2017-09-08T14:03:57 | C++ | UTF-8 | Python | false | false | 1,710 | py | import FWCore.ParameterSet.Config as cms
process = cms.Process("TEST")
process.load("FWCore.Framework.test.cmsExceptionsFatal_cff")
process.load("Configuration.StandardSequences.Services_cff")
process.load("SimGeneral.HepPDTESSource.pythiapdt_cfi")
process.load("Configuration.EventContent.EventContent_cff")
process.load("Configuration.Generator.QCDForPF_14TeV_TuneCUETP8M1_cfi")
process.RandomNumberGeneratorService = cms.Service("RandomNumberGeneratorService",
generator = cms.PSet(
initialSeed = cms.untracked.uint32(123456789),
engineName = cms.untracked.string('HepJamesRandom')
)
)
# The following three lines reduce the clutter of repeated printouts
# of the same exception message.
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.MessageLogger.categories.append('PythiaFilter')
process.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(1000))
process.source = cms.Source("EmptySource")
process.load("GeneratorInterface.GenFilters.PythiaFilterIsolatedTrack_cfi")
process.GEN = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string('QCD14TeVIsoTrack.root'),
SelectEvents = cms.untracked.PSet(
SelectEvents = cms.vstring('p')
),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('GEN')
)
)
process.isotrack_filter.minSeedEta = 2.0
process.isotrack_filter.maxSeedEta = 3.0
process.p = cms.Path(process.generator * process.isotrack_filter)
process.outpath = cms.EndPath(process.GEN)
process.schedule = cms.Schedule(process.p, process.outpath)
| [
"[email protected]"
] | |
d46c7e58abc3648273a301b1ff3f9c4f7e2b1214 | 2c0e8fa208f59fb3d23c7257cb9cff426fa16d85 | /ga4gh/backend.py | 2fdefbd883323811fd1ec9d7abe5a4cb4bec5766 | [
"Apache-2.0"
] | permissive | melaniedc/server | db24c37bf0e778630d0910a1101eeb908647b02e | 9fe974e421d2e4d3510e7928053edbbce47fd4bb | refs/heads/master | 2020-02-26T16:09:18.162175 | 2015-01-30T14:52:46 | 2015-01-30T14:52:46 | 26,870,317 | 0 | 0 | null | 2014-12-05T20:26:25 | 2014-11-19T16:28:39 | Python | UTF-8 | Python | false | false | 6,006 | py | """
Module responsible for handling protocol requests and returning
responses.
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import ga4gh.protocol as protocol
class Backend(object):
"""
The GA4GH backend. This class provides methods for all of the GA4GH
protocol end points.
"""
def __init__(self, dataDir, variantSetClass):
self._dataDir = dataDir
self._variantSetIdMap = {}
# All directories in datadir are assumed to correspond to VariantSets.
for variantSetId in os.listdir(self._dataDir):
relativePath = os.path.join(self._dataDir, variantSetId)
if os.path.isdir(relativePath):
self._variantSetIdMap[variantSetId] = variantSetClass(
variantSetId, relativePath)
self._variantSetIds = sorted(self._variantSetIdMap.keys())
def parsePageToken(self, pageToken, numValues):
"""
Parses the specified pageToken and returns a list of the specified
number of values. Page tokens are assumed to consist of a fixed
number of integers seperated by colons. If the page token does
not conform to this specification, raise a InvalidPageToken
exception.
"""
tokens = pageToken.split(":")
# TODO define exceptions.InvalidPageToken and raise here.
if len(tokens) != numValues:
raise Exception("Invalid number of values in page token")
# TODO catch a ValueError here when bad integers are passed and
# convert this into the appropriate InvalidPageToken exception.
values = map(int, tokens)
return values
def runSearchRequest(
self, requestStr, requestClass, responseClass, pageListName,
objectGenerator):
"""
Runs the specified request. The request is a string containing
a JSON representation of an instance of the specified requestClass
in which the page list variable has the specified pageListName.
We return a string representation of an instance of the specified
responseClass in JSON format. Objects are filled into the page list
using the specified object generator, which must return
(object, nextPageToken) pairs, and be able to resume iteration from
any point using the nextPageToken attribute of the request object.
"""
# TODO change this to fromJSONDict and validate
request = requestClass.fromJSONString(requestStr)
pageList = []
nextPageToken = None
for obj, nextPageToken in objectGenerator(request):
pageList.append(obj)
if len(pageList) >= request.pageSize:
break
response = responseClass()
response.nextPageToken = nextPageToken
setattr(response, pageListName, pageList)
return response.toJSONString()
def searchVariantSets(self, request):
"""
Returns a GASearchVariantSetsResponse for the specified
GASearchVariantSetsRequest object.
"""
return self.runSearchRequest(
request, protocol.GASearchVariantSetsRequest,
protocol.GASearchVariantSetsResponse, "variantSets",
self.variantSetsGenerator)
def searchVariants(self, request):
"""
Returns a GASearchVariantsResponse for the specified
GASearchVariantsRequest object.
"""
return self.runSearchRequest(
request, protocol.GASearchVariantsRequest,
protocol.GASearchVariantsResponse, "variants",
self.variantsGenerator)
def variantSetsGenerator(self, request):
"""
Returns a generator over the (variantSet, nextPageToken) pairs defined
by the speficied request.
"""
currentIndex = 0
if request.pageToken is not None:
currentIndex, = self.parsePageToken(request.pageToken, 1)
while currentIndex < len(self._variantSetIds):
variantSet = protocol.GAVariantSet()
variantSet.id = self._variantSetIds[currentIndex]
variantSet.datasetId = "NotImplemented"
variantSet.metadata = self._variantSetIdMap[
variantSet.id].getMetadata()
currentIndex += 1
nextPageToken = None
if currentIndex < len(self._variantSetIds):
nextPageToken = str(currentIndex)
yield variantSet, nextPageToken
def variantsGenerator(self, request):
"""
Returns a generator over the (variant, nextPageToken) pairs defined by
the specified request.
"""
variantSetIds = request.variantSetIds
startVariantSetIndex = 0
startPosition = request.start
if request.pageToken is not None:
startVariantSetIndex, startPosition = self.parsePageToken(
request.pageToken, 2)
for variantSetIndex in range(startVariantSetIndex, len(variantSetIds)):
variantSetId = variantSetIds[variantSetIndex]
if variantSetId in self._variantSetIdMap:
variantSet = self._variantSetIdMap[variantSetId]
iterator = variantSet.getVariants(
request.referenceName, startPosition, request.end,
request.variantName, request.callSetIds)
for variant in iterator:
nextPageToken = "{0}:{1}".format(
variantSetIndex, variant.start + 1)
yield variant, nextPageToken
class MockBackend(Backend):
"""
A mock Backend class for testing.
"""
def __init__(self, dataDir=None):
# TODO make a superclass of backend that does this
# automatically without needing to know about the internal
# details of the backend.
self._dataDir = None
self._variantSetIdMap = {}
self._variantSetIds = []
| [
"[email protected]"
] | |
d06f9c02053dbaa64d0db28cc52b7690a08a2160 | adc6d8ee596e4710c3241332758bb6990bdd8914 | /subData/ejemplos ROI/ROI_V3.py | 48e1600ee1a3a5b4fbd7593a1e7397eb9abe764a | [] | no_license | NatalyTinoco/Trabajo-de-grado_Artefactos | cf9491c47a8a23ce5bab7c52498093a61319f834 | 5cc4e009f94c871c7ed0d820eb113398ac66ec2f | refs/heads/master | 2022-03-20T00:51:48.420253 | 2019-11-24T19:10:40 | 2019-11-24T19:10:40 | 197,964,659 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,225 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Jul 15 22:32:38 2019
@author: Nataly
"""
import cv2
import pylab as plt
from matplotlib import pyplot as plt
import numpy as np
from pylab import *
from readimg import read_img #leer imagines ### img=read_img(imgfile)##
from skimage.morphology import disk
from skimage.filters import threshold_otsu
from skimage.filters import threshold_li
from skimage.filters import threshold_minimum
from skimage.filters import threshold_triangle
from skimage.filters import try_all_threshold
from skimage.filters import threshold_otsu, threshold_local
from PIL import ImageEnhance
from PIL import Image
import glob
from skimage.segmentation import flood, flood_fill
for imgfile in glob.glob("*.jpg"):
ima=read_img(imgfile)
""" # Primera forma #"""
imR, imG, II=cv2.split(ima)
""" #Segunda forma #"""
#imA=cv2.cvtColor(ima,cv2.COLOR_RGB2HSV)
#I,I,II=cv2.split(imA)
""" # Tercera forma #"""
#imA=cv2.cvtColor(ima,cv2.COLOR_RGB2XYZ)
#I,I,II=cv2.split(imA)
#fig, ax = try_all_threshold(II, figsize=(10, 8), verbose=False)
#plt.show()
#block_size = 51
#thresh = threshold_local(II, block_size, offset=10)
thresh = threshold_li(II)
#thresh=threshold_minimum(II)
#thresh=threshold_otsu(II)
#thresh=threshold_triangle(II)
#ret3,thresh= cv2.threshold(II,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
binary = II > thresh
#"""
binary = (binary*255).astype(np.uint8)
#plt.imshow(binary, cmap=plt.cm.gray)
#plt.show()
#"""
### Transformaciones Morfologicas
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (20, 20))
opening = cv2.morphologyEx(binary, cv2.MORPH_OPEN, kernel)
#dilation = cv2.dilate(opening,kernel,iterations = 1)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (37, 37))
close=cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel)
#plt.imshow(close, cmap=plt.cm.gray)
#plt.show()
dire='./segROI/#3/B/'+imgfile
#img=cv2.cvtColor(im,cv2.COLOR_BGR2RGB)
cv2.imwrite(dire,close)
print(imgfile)
k = cv2.waitKey(1000)
#destroy the window
cv2.destroyAllWindows()
#"""
| [
"[email protected]"
] | |
5b01a0dbeb188b5186ce2c9757ac5da7a4312aa3 | 8e00a42f935ee15ed0dd27241f30fd2a909891c2 | /config.py | 481a95097620da6d15f1258a08e9dbf541d129a4 | [] | no_license | MrBrunotte/Microblog | c17965cc34b42332b95748adae75bd0abdd17996 | b1f33a843d717685481c22ab203c740a589177f2 | refs/heads/master | 2020-06-30T10:44:22.527410 | 2019-08-11T15:00:40 | 2019-08-11T15:00:40 | 200,802,140 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 691 | py | import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config(object):
SECRET_KEY = os.environ.get('SECRET_KEY') or 'you-will-never-guess'
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'app.db')
SQLALCHEMY_TRACK_MODIFICATIONS = False
MAIL_SERVER = os.environ.get('MAIL_SERVER')
MAIL_PORT = int(os.environ.get('MAIL_PORT') or 25)
MAIL_USE_TLS = os.environ.get('MAIL_USE_TLS') is not None
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
ADMINS = ['[email protected]']
LANGUAGES = ['en', 'es', 'se']
POSTS_PER_PAGE = 25
| [
"[email protected]"
] | |
e2cc047b7cd5216552d5667bf63496afc02e548b | e74463d223acfe6b849177177cb409060e7a44d1 | /Data Structures and Algorithms/01 Algorithmic Toolbox/Week 4 - Divide-and-Conquer/poly_mult.py | 3d6d01b5f543a0a0108a339b3a2bfa564fdb5742 | [] | no_license | AlexEngelhardt-old/courses | 24f4acf6de22f6707568024c5ee4a2fde412e461 | 739be99265b0aca1c58abe6f107b4c49de055b9d | refs/heads/master | 2023-05-05T22:25:50.327739 | 2020-12-09T14:57:46 | 2020-12-09T14:57:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,645 | py | def lfill(arr, length):
"""Prepends [0] elements to arr so that its total length is at least length."""
return [0] * (length - len(arr)) + arr
def poly_mult_naive(A, B):
"""Naive O(n^2) implementation of polynomial multiplication.
A and B are coefficient arrays:
Example
-------
A = 3x^2 + 2x + 5
B = 5x^2 + x + 2
A * B = 15x^4 + 13x^3 + 33x^2 + 9x + 10
>>> poly_mult([3, 2, 5], [5, 1, 2])
[15, 13, 33, 9, 10]
>>> poly_mult([3, 2, 5], [4])
[12, 8, 20]
"""
n = max(len(A), len(B))
A = lfill(A, n)
B = lfill(B, n)
res = [0] * (2*n - 1)
for i in range(n):
for j in range(n):
res[i+j] += A[i] * B[j]
# If there are leading zeroes, remove them again:
while(res[0] == 0):
del res[0]
return res
def poly_mult_better(A, B):
"""Divide-and-conquer implementation for polynomial multiplication.
Still a bit naive though.
Idea: Split up A into D1 and D0, each of degree n/2.
e.g. A = [4, 3, 2, 1], then D1 = [4, 3] and D2 = [2, 1]
Just split them up, no computation necessary.
Split B into E1 and E0 in the same way.
Then AB = D1*E1 * x^n + (D1*E0 + D0*E1) * x^(n/2) + D0*E0
Runtime: T(n) = 4 * T(n/2) + kn
Total runtime: O(n^2)
"""
# Meh, I'll skip this
pass
def poly_mult_fast(A, B):
"""By Karatsuba"""
# meh.
return [1, 2, 3]
if __name__ == "__main__":
print(poly_mult_naive([3, 2, 5], [5, 1, 2]))
print(poly_mult_fast([3, 2, 5], [5, 1, 2]))
print('---')
print(poly_mult_naive([3, 2, 5], [4]))
print(poly_mult_fast([3, 2, 5], [4])) | [
"[email protected]"
] | |
3c21c874290414cd6192f699632f2e64c674bd1f | 46e1db8df96707d3a2effbdea6d06e3622c7ecd7 | /tensorflow/contrib/distributions/python/kernel_tests/mvn_diag_plus_low_rank_test.py | 834877769e887b902f79ddb94aaedbc9cdc313f3 | [
"Apache-2.0"
] | permissive | adammalpass/tensorflow | eeb8973fcd220b55118207703919aa9df71506f4 | b340000aeeb2eae9b48354e42177fb03b30860db | refs/heads/master | 2021-01-22T08:33:52.824616 | 2017-02-20T09:04:33 | 2017-02-20T09:04:33 | 81,909,244 | 1 | 0 | null | 2017-02-14T05:37:57 | 2017-02-14T05:37:56 | null | UTF-8 | Python | false | false | 16,462 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for MultivariateNormal."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import distributions
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
ds = distributions
class MultivariateNormalDiagPlusLowRankTest(test.TestCase):
"""Well tested because this is a simple override of the base class."""
def setUp(self):
self._rng = np.random.RandomState(42)
def testDiagBroadcastBothBatchAndEvent(self):
# batch_shape: [3], event_shape: [2]
diag = np.array([[1., 2], [3, 4], [5, 6]])
# batch_shape: [1], event_shape: []
identity_multiplier = np.array([5.])
with self.test_session():
dist = ds.MultivariateNormalDiagPlusLowRank(
scale_diag=diag,
scale_identity_multiplier=identity_multiplier,
validate_args=True)
self.assertAllClose(
np.array([[[1. + 5, 0],
[0, 2 + 5]],
[[3 + 5, 0],
[0, 4 + 5]],
[[5 + 5, 0],
[0, 6 + 5]]]),
dist.scale.to_dense().eval())
def testDiagBroadcastBothBatchAndEvent2(self):
# This test differs from `testDiagBroadcastBothBatchAndEvent` in that it
# broadcasts batch_shape's from both the `scale_diag` and
# `scale_identity_multiplier` args.
# batch_shape: [3], event_shape: [2]
diag = np.array([[1., 2], [3, 4], [5, 6]])
# batch_shape: [3, 1], event_shape: []
identity_multiplier = np.array([[5.], [4], [3]])
with self.test_session():
dist = ds.MultivariateNormalDiagPlusLowRank(
scale_diag=diag,
scale_identity_multiplier=identity_multiplier,
validate_args=True)
self.assertAllEqual(
[3, 3, 2, 2],
dist.scale.to_dense().get_shape())
def testDiagBroadcastOnlyEvent(self):
# batch_shape: [3], event_shape: [2]
diag = np.array([[1., 2], [3, 4], [5, 6]])
# batch_shape: [3], event_shape: []
identity_multiplier = np.array([5., 4, 3])
with self.test_session():
dist = ds.MultivariateNormalDiagPlusLowRank(
scale_diag=diag,
scale_identity_multiplier=identity_multiplier,
validate_args=True)
self.assertAllClose(
np.array([[[1. + 5, 0],
[0, 2 + 5]],
[[3 + 4, 0],
[0, 4 + 4]],
[[5 + 3, 0],
[0, 6 + 3]]]), # shape: [3, 2, 2]
dist.scale.to_dense().eval())
def testDiagBroadcastMultiplierAndLoc(self):
# batch_shape: [], event_shape: [3]
loc = np.array([1., 0, -1])
# batch_shape: [3], event_shape: []
identity_multiplier = np.array([5., 4, 3])
with self.test_session():
dist = ds.MultivariateNormalDiagPlusLowRank(
loc=loc,
scale_identity_multiplier=identity_multiplier,
validate_args=True)
self.assertAllClose(
np.array([[[5, 0, 0],
[0, 5, 0],
[0, 0, 5]],
[[4, 0, 0],
[0, 4, 0],
[0, 0, 4]],
[[3, 0, 0],
[0, 3, 0],
[0, 0, 3]]]),
dist.scale.to_dense().eval())
def testMean(self):
mu = [-1.0, 1.0]
diag_large = [1.0, 5.0]
v = [[2.0], [3.0]]
diag_small = [3.0]
with self.test_session():
dist = ds.MultivariateNormalDiagPlusLowRank(
loc=mu,
scale_diag=diag_large,
scale_perturb_factor=v,
scale_perturb_diag=diag_small,
validate_args=True)
self.assertAllEqual(mu, dist.mean().eval())
def testSample(self):
# TODO(jvdillon): This test should be the basis of a new test fixture which
# is applied to every distribution. When we make this fixture, we'll also
# separate the analytical- and sample-based tests as well as for each
# function tested. For now, we group things so we can recycle one batch of
# samples (thus saving resources).
mu = np.array([-1., 1, 0.5], dtype=np.float32)
diag_large = np.array([1., 0.5, 0.75], dtype=np.float32)
diag_small = np.array([-1.1, 1.2], dtype=np.float32)
v = np.array([[0.7, 0.8],
[0.9, 1],
[0.5, 0.6]], dtype=np.float32) # shape: [k, r] = [3, 2]
true_mean = mu
true_scale = np.diag(diag_large) + np.matmul(np.matmul(
v, np.diag(diag_small)), v.T)
true_covariance = np.matmul(true_scale, true_scale.T)
true_variance = np.diag(true_covariance)
true_stddev = np.sqrt(true_variance)
true_det_covariance = np.linalg.det(true_covariance)
true_log_det_covariance = np.log(true_det_covariance)
with self.test_session() as sess:
dist = ds.MultivariateNormalDiagPlusLowRank(
loc=mu,
scale_diag=diag_large,
scale_perturb_factor=v,
scale_perturb_diag=diag_small,
validate_args=True)
# The following distributions will test the KL divergence calculation.
mvn_identity = ds.MultivariateNormalDiag(
loc=np.array([1., 2, 0.25], dtype=np.float32),
validate_args=True)
mvn_scaled = ds.MultivariateNormalDiag(
loc=mvn_identity.loc,
scale_identity_multiplier=2.2,
validate_args=True)
mvn_diag = ds.MultivariateNormalDiag(
loc=mvn_identity.loc,
scale_diag=np.array([0.5, 1.5, 1.], dtype=np.float32),
validate_args=True)
mvn_chol = ds.MultivariateNormalTriL(
loc=np.array([1., 2, -1], dtype=np.float32),
scale_tril=np.array([[6., 0, 0],
[2, 5, 0],
[1, 3, 4]], dtype=np.float32) / 10.,
validate_args=True)
scale = dist.scale.to_dense()
n = int(30e3)
samps = dist.sample(n, seed=0)
sample_mean = math_ops.reduce_mean(samps, 0)
x = samps - sample_mean
sample_covariance = math_ops.matmul(x, x, transpose_a=True) / n
sample_kl_identity = math_ops.reduce_mean(
dist.log_prob(samps) - mvn_identity.log_prob(samps), 0)
analytical_kl_identity = ds.kl(dist, mvn_identity)
sample_kl_scaled = math_ops.reduce_mean(
dist.log_prob(samps) - mvn_scaled.log_prob(samps), 0)
analytical_kl_scaled = ds.kl(dist, mvn_scaled)
sample_kl_diag = math_ops.reduce_mean(
dist.log_prob(samps) - mvn_diag.log_prob(samps), 0)
analytical_kl_diag = ds.kl(dist, mvn_diag)
sample_kl_chol = math_ops.reduce_mean(
dist.log_prob(samps) - mvn_chol.log_prob(samps), 0)
analytical_kl_chol = ds.kl(dist, mvn_chol)
n = int(10e3)
baseline = ds.MultivariateNormalDiag(
loc=np.array([-1., 0.25, 1.25], dtype=np.float32),
scale_diag=np.array([1.5, 0.5, 1.], dtype=np.float32),
validate_args=True)
samps = baseline.sample(n, seed=0)
sample_kl_identity_diag_baseline = math_ops.reduce_mean(
baseline.log_prob(samps) - mvn_identity.log_prob(samps), 0)
analytical_kl_identity_diag_baseline = ds.kl(baseline, mvn_identity)
sample_kl_scaled_diag_baseline = math_ops.reduce_mean(
baseline.log_prob(samps) - mvn_scaled.log_prob(samps), 0)
analytical_kl_scaled_diag_baseline = ds.kl(baseline, mvn_scaled)
sample_kl_diag_diag_baseline = math_ops.reduce_mean(
baseline.log_prob(samps) - mvn_diag.log_prob(samps), 0)
analytical_kl_diag_diag_baseline = ds.kl(baseline, mvn_diag)
sample_kl_chol_diag_baseline = math_ops.reduce_mean(
baseline.log_prob(samps) - mvn_chol.log_prob(samps), 0)
analytical_kl_chol_diag_baseline = ds.kl(baseline, mvn_chol)
[
sample_mean_,
analytical_mean_,
sample_covariance_,
analytical_covariance_,
analytical_variance_,
analytical_stddev_,
analytical_log_det_covariance_,
analytical_det_covariance_,
scale_,
sample_kl_identity_, analytical_kl_identity_,
sample_kl_scaled_, analytical_kl_scaled_,
sample_kl_diag_, analytical_kl_diag_,
sample_kl_chol_, analytical_kl_chol_,
sample_kl_identity_diag_baseline_,
analytical_kl_identity_diag_baseline_,
sample_kl_scaled_diag_baseline_, analytical_kl_scaled_diag_baseline_,
sample_kl_diag_diag_baseline_, analytical_kl_diag_diag_baseline_,
sample_kl_chol_diag_baseline_, analytical_kl_chol_diag_baseline_,
] = sess.run([
sample_mean,
dist.mean(),
sample_covariance,
dist.covariance(),
dist.variance(),
dist.stddev(),
dist.log_det_covariance(),
dist.det_covariance(),
scale,
sample_kl_identity, analytical_kl_identity,
sample_kl_scaled, analytical_kl_scaled,
sample_kl_diag, analytical_kl_diag,
sample_kl_chol, analytical_kl_chol,
sample_kl_identity_diag_baseline,
analytical_kl_identity_diag_baseline,
sample_kl_scaled_diag_baseline, analytical_kl_scaled_diag_baseline,
sample_kl_diag_diag_baseline, analytical_kl_diag_diag_baseline,
sample_kl_chol_diag_baseline, analytical_kl_chol_diag_baseline,
])
sample_variance_ = np.diag(sample_covariance_)
sample_stddev_ = np.sqrt(sample_variance_)
sample_det_covariance_ = np.linalg.det(sample_covariance_)
sample_log_det_covariance_ = np.log(sample_det_covariance_)
print("true_mean:\n{} ".format(true_mean))
print("sample_mean:\n{}".format(sample_mean_))
print("analytical_mean:\n{}".format(analytical_mean_))
print("true_covariance:\n{}".format(true_covariance))
print("sample_covariance:\n{}".format(sample_covariance_))
print("analytical_covariance:\n{}".format(
analytical_covariance_))
print("true_variance:\n{}".format(true_variance))
print("sample_variance:\n{}".format(sample_variance_))
print("analytical_variance:\n{}".format(analytical_variance_))
print("true_stddev:\n{}".format(true_stddev))
print("sample_stddev:\n{}".format(sample_stddev_))
print("analytical_stddev:\n{}".format(analytical_stddev_))
print("true_log_det_covariance:\n{}".format(
true_log_det_covariance))
print("sample_log_det_covariance:\n{}".format(
sample_log_det_covariance_))
print("analytical_log_det_covariance:\n{}".format(
analytical_log_det_covariance_))
print("true_det_covariance:\n{}".format(
true_det_covariance))
print("sample_det_covariance:\n{}".format(
sample_det_covariance_))
print("analytical_det_covariance:\n{}".format(
analytical_det_covariance_))
print("true_scale:\n{}".format(true_scale))
print("scale:\n{}".format(scale_))
print("kl_identity: analytical:{} sample:{}".format(
analytical_kl_identity_, sample_kl_identity_))
print("kl_scaled: analytical:{} sample:{}".format(
analytical_kl_scaled_, sample_kl_scaled_))
print("kl_diag: analytical:{} sample:{}".format(
analytical_kl_diag_, sample_kl_diag_))
print("kl_chol: analytical:{} sample:{}".format(
analytical_kl_chol_, sample_kl_chol_))
print("kl_identity_diag_baseline: analytical:{} sample:{}".format(
analytical_kl_identity_diag_baseline_,
sample_kl_identity_diag_baseline_))
print("kl_scaled_diag_baseline: analytical:{} sample:{}".format(
analytical_kl_scaled_diag_baseline_,
sample_kl_scaled_diag_baseline_))
print("kl_diag_diag_baseline: analytical:{} sample:{}".format(
analytical_kl_diag_diag_baseline_,
sample_kl_diag_diag_baseline_))
print("kl_chol_diag_baseline: analytical:{} sample:{}".format(
analytical_kl_chol_diag_baseline_,
sample_kl_chol_diag_baseline_))
self.assertAllClose(true_mean, sample_mean_,
atol=0., rtol=0.02)
self.assertAllClose(true_mean, analytical_mean_,
atol=0., rtol=1e-6)
self.assertAllClose(true_covariance, sample_covariance_,
atol=0., rtol=0.02)
self.assertAllClose(true_covariance, analytical_covariance_,
atol=0., rtol=1e-6)
self.assertAllClose(true_variance, sample_variance_,
atol=0., rtol=0.02)
self.assertAllClose(true_variance, analytical_variance_,
atol=0., rtol=1e-6)
self.assertAllClose(true_stddev, sample_stddev_,
atol=0., rtol=0.02)
self.assertAllClose(true_stddev, analytical_stddev_,
atol=0., rtol=1e-6)
self.assertAllClose(true_log_det_covariance, sample_log_det_covariance_,
atol=0., rtol=0.02)
self.assertAllClose(true_log_det_covariance,
analytical_log_det_covariance_,
atol=0., rtol=1e-6)
self.assertAllClose(true_det_covariance, sample_det_covariance_,
atol=0., rtol=0.02)
self.assertAllClose(true_det_covariance, analytical_det_covariance_,
atol=0., rtol=1e-5)
self.assertAllClose(true_scale, scale_,
atol=0., rtol=1e-6)
self.assertAllClose(sample_kl_identity_, analytical_kl_identity_,
atol=0., rtol=0.02)
self.assertAllClose(sample_kl_scaled_, analytical_kl_scaled_,
atol=0., rtol=0.02)
self.assertAllClose(sample_kl_diag_, analytical_kl_diag_,
atol=0., rtol=0.02)
self.assertAllClose(sample_kl_chol_, analytical_kl_chol_,
atol=0., rtol=0.02)
self.assertAllClose(
sample_kl_identity_diag_baseline_,
analytical_kl_identity_diag_baseline_,
atol=0., rtol=0.02)
self.assertAllClose(
sample_kl_scaled_diag_baseline_,
analytical_kl_scaled_diag_baseline_,
atol=0., rtol=0.02)
self.assertAllClose(
sample_kl_diag_diag_baseline_,
analytical_kl_diag_diag_baseline_,
atol=0., rtol=0.04)
self.assertAllClose(
sample_kl_chol_diag_baseline_,
analytical_kl_chol_diag_baseline_,
atol=0., rtol=0.02)
def testImplicitLargeDiag(self):
mu = np.array([[1., 2, 3],
[11, 22, 33]]) # shape: [b, k] = [2, 3]
u = np.array([[[1., 2],
[3, 4],
[5, 6]],
[[0.5, 0.75],
[1, 0.25],
[1.5, 1.25]]]) # shape: [b, k, r] = [2, 3, 2]
m = np.array([[0.1, 0.2],
[0.4, 0.5]]) # shape: [b, r] = [2, 2]
scale = np.stack([
np.eye(3) + np.matmul(np.matmul(u[0], np.diag(m[0])),
np.transpose(u[0])),
np.eye(3) + np.matmul(np.matmul(u[1], np.diag(m[1])),
np.transpose(u[1])),
])
cov = np.stack([np.matmul(scale[0], scale[0].T),
np.matmul(scale[1], scale[1].T)])
print("expected_cov:\n{}".format(cov))
with self.test_session():
mvn = ds.MultivariateNormalDiagPlusLowRank(
loc=mu,
scale_perturb_factor=u,
scale_perturb_diag=m)
self.assertAllClose(cov, mvn.covariance().eval(), atol=0., rtol=1e-6)
if __name__ == "__main__":
test.main()
| [
"[email protected]"
] | |
58afd8e194a66d93081d602c1b5562695a2aacc7 | 7e41d70ee3bf07dc3043afef020cde173d5fb0bc | /airflow_client/client/model/resource.py | 4db2b1d4fdbe4e8423ff2bc14774439fc6e6dd11 | [
"Apache-2.0"
] | permissive | apache/airflow-client-python | fb11789076bfed191d730c459c84273781d50246 | 38d55888f7533253857baa878322007f4581fc21 | refs/heads/main | 2023-09-05T18:23:37.049610 | 2023-08-23T13:10:27 | 2023-08-23T13:10:27 | 275,569,232 | 251 | 44 | Apache-2.0 | 2023-08-23T07:49:13 | 2020-06-28T11:20:41 | Python | UTF-8 | Python | false | false | 19,801 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Airflow API (Stable)
# Overview To facilitate management, Apache Airflow supports a range of REST API endpoints across its objects. This section provides an overview of the API design, methods, and supported use cases. Most of the endpoints accept `JSON` as input and return `JSON` responses. This means that you must usually add the following headers to your request: ``` Content-type: application/json Accept: application/json ``` ## Resources The term `resource` refers to a single type of object in the Airflow metadata. An API is broken up by its endpoint's corresponding resource. The name of a resource is typically plural and expressed in camelCase. Example: `dagRuns`. Resource names are used as part of endpoint URLs, as well as in API parameters and responses. ## CRUD Operations The platform supports **C**reate, **R**ead, **U**pdate, and **D**elete operations on most resources. You can review the standards for these operations and their standard parameters below. Some endpoints have special behavior as exceptions. ### Create To create a resource, you typically submit an HTTP `POST` request with the resource's required metadata in the request body. The response returns a `201 Created` response code upon success with the resource's metadata, including its internal `id`, in the response body. ### Read The HTTP `GET` request can be used to read a resource or to list a number of resources. A resource's `id` can be submitted in the request parameters to read a specific resource. The response usually returns a `200 OK` response code upon success, with the resource's metadata in the response body. If a `GET` request does not include a specific resource `id`, it is treated as a list request. The response usually returns a `200 OK` response code upon success, with an object containing a list of resources' metadata in the response body. When reading resources, some common query parameters are usually available. e.g.: ``` v1/connections?limit=25&offset=25 ``` |Query Parameter|Type|Description| |---------------|----|-----------| |limit|integer|Maximum number of objects to fetch. Usually 25 by default| |offset|integer|Offset after which to start returning objects. For use with limit query parameter.| ### Update Updating a resource requires the resource `id`, and is typically done using an HTTP `PATCH` request, with the fields to modify in the request body. The response usually returns a `200 OK` response code upon success, with information about the modified resource in the response body. ### Delete Deleting a resource requires the resource `id` and is typically executed via an HTTP `DELETE` request. The response usually returns a `204 No Content` response code upon success. ## Conventions - Resource names are plural and expressed in camelCase. - Names are consistent between URL parameter name and field name. - Field names are in snake_case. ```json { \"description\": \"string\", \"name\": \"string\", \"occupied_slots\": 0, \"open_slots\": 0 \"queued_slots\": 0, \"running_slots\": 0, \"scheduled_slots\": 0, \"slots\": 0, } ``` ### Update Mask Update mask is available as a query parameter in patch endpoints. It is used to notify the API which fields you want to update. Using `update_mask` makes it easier to update objects by helping the server know which fields to update in an object instead of updating all fields. The update request ignores any fields that aren't specified in the field mask, leaving them with their current values. Example: ``` resource = request.get('/resource/my-id').json() resource['my_field'] = 'new-value' request.patch('/resource/my-id?update_mask=my_field', data=json.dumps(resource)) ``` ## Versioning and Endpoint Lifecycle - API versioning is not synchronized to specific releases of the Apache Airflow. - APIs are designed to be backward compatible. - Any changes to the API will first go through a deprecation phase. # Trying the API You can use a third party client, such as [curl](https://curl.haxx.se/), [HTTPie](https://httpie.org/), [Postman](https://www.postman.com/) or [the Insomnia rest client](https://insomnia.rest/) to test the Apache Airflow API. Note that you will need to pass credentials data. For e.g., here is how to pause a DAG with [curl](https://curl.haxx.se/), when basic authorization is used: ```bash curl -X PATCH 'https://example.com/api/v1/dags/{dag_id}?update_mask=is_paused' \\ -H 'Content-Type: application/json' \\ --user \"username:password\" \\ -d '{ \"is_paused\": true }' ``` Using a graphical tool such as [Postman](https://www.postman.com/) or [Insomnia](https://insomnia.rest/), it is possible to import the API specifications directly: 1. Download the API specification by clicking the **Download** button at the top of this document 2. Import the JSON specification in the graphical tool of your choice. - In *Postman*, you can click the **import** button at the top - With *Insomnia*, you can just drag-and-drop the file on the UI Note that with *Postman*, you can also generate code snippets by selecting a request and clicking on the **Code** button. ## Enabling CORS [Cross-origin resource sharing (CORS)](https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS) is a browser security feature that restricts HTTP requests that are initiated from scripts running in the browser. For details on enabling/configuring CORS, see [Enabling CORS](https://airflow.apache.org/docs/apache-airflow/stable/security/api.html). # Authentication To be able to meet the requirements of many organizations, Airflow supports many authentication methods, and it is even possible to add your own method. If you want to check which auth backend is currently set, you can use `airflow config get-value api auth_backends` command as in the example below. ```bash $ airflow config get-value api auth_backends airflow.api.auth.backend.basic_auth ``` The default is to deny all requests. For details on configuring the authentication, see [API Authorization](https://airflow.apache.org/docs/apache-airflow/stable/security/api.html). # Errors We follow the error response format proposed in [RFC 7807](https://tools.ietf.org/html/rfc7807) also known as Problem Details for HTTP APIs. As with our normal API responses, your client must be prepared to gracefully handle additional members of the response. ## Unauthenticated This indicates that the request has not been applied because it lacks valid authentication credentials for the target resource. Please check that you have valid credentials. ## PermissionDenied This response means that the server understood the request but refuses to authorize it because it lacks sufficient rights to the resource. It happens when you do not have the necessary permission to execute the action you performed. You need to get the appropriate permissions in other to resolve this error. ## BadRequest This response means that the server cannot or will not process the request due to something that is perceived to be a client error (e.g., malformed request syntax, invalid request message framing, or deceptive request routing). To resolve this, please ensure that your syntax is correct. ## NotFound This client error response indicates that the server cannot find the requested resource. ## MethodNotAllowed Indicates that the request method is known by the server but is not supported by the target resource. ## NotAcceptable The target resource does not have a current representation that would be acceptable to the user agent, according to the proactive negotiation header fields received in the request, and the server is unwilling to supply a default representation. ## AlreadyExists The request could not be completed due to a conflict with the current state of the target resource, e.g. the resource it tries to create already exists. ## Unknown This means that the server encountered an unexpected condition that prevented it from fulfilling the request. # noqa: E501
The version of the OpenAPI document: 2.7.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from airflow_client.client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from airflow_client.client.exceptions import ApiAttributeError
class Resource(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'name': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'name': 'name', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""Resource - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
name (str): The name of the resource. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""Resource - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
name (str): The name of the resource. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| [
"[email protected]"
] | |
0e91187cd1c363c623c71648471d0a741b416aba | 0c66e605e6e4129b09ea14dbb6aa353d18aaa027 | /diventi/accounts/migrations/0118_auto_20190430_1544.py | 87abb4310a3d8a0e7ce02fa18928b782a13d894b | [
"Apache-2.0"
] | permissive | flavoi/diventi | 58fbc8c947f387cbcc1ce607878a59a6f2b72313 | c0b1efe2baa3ff816d6ee9a8e86623f297973ded | refs/heads/master | 2023-07-20T09:32:35.897661 | 2023-07-11T19:44:26 | 2023-07-11T19:44:26 | 102,959,477 | 2 | 1 | Apache-2.0 | 2023-02-08T01:03:17 | 2017-09-09T14:10:51 | Python | UTF-8 | Python | false | false | 452 | py | # Generated by Django 2.1.7 on 2019-04-30 13:44
import diventi.accounts.models
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0117_auto_20190430_1543'),
]
operations = [
migrations.AlterModelManagers(
name='diventiuser',
managers=[
('objects', diventi.accounts.models.DiventiUserManager()),
],
),
]
| [
"[email protected]"
] | |
4d39d846840e01114a83ec7bece187bdf76b160d | 82b946da326148a3c1c1f687f96c0da165bb2c15 | /sdk/python/pulumi_azure_native/kusto/v20210101/event_grid_data_connection.py | a5730756f74d4be6193881f22a8298b833bd3bfb | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | morrell/pulumi-azure-native | 3916e978382366607f3df0a669f24cb16293ff5e | cd3ba4b9cb08c5e1df7674c1c71695b80e443f08 | refs/heads/master | 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 | Apache-2.0 | 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null | UTF-8 | Python | false | false | 24,727 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from ._enums import *
__all__ = ['EventGridDataConnectionArgs', 'EventGridDataConnection']
@pulumi.input_type
class EventGridDataConnectionArgs:
def __init__(__self__, *,
cluster_name: pulumi.Input[str],
consumer_group: pulumi.Input[str],
database_name: pulumi.Input[str],
event_hub_resource_id: pulumi.Input[str],
kind: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
storage_account_resource_id: pulumi.Input[str],
blob_storage_event_type: Optional[pulumi.Input[Union[str, 'BlobStorageEventType']]] = None,
data_connection_name: Optional[pulumi.Input[str]] = None,
data_format: Optional[pulumi.Input[Union[str, 'EventGridDataFormat']]] = None,
ignore_first_record: Optional[pulumi.Input[bool]] = None,
location: Optional[pulumi.Input[str]] = None,
mapping_rule_name: Optional[pulumi.Input[str]] = None,
table_name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a EventGridDataConnection resource.
:param pulumi.Input[str] cluster_name: The name of the Kusto cluster.
:param pulumi.Input[str] consumer_group: The event hub consumer group.
:param pulumi.Input[str] database_name: The name of the database in the Kusto cluster.
:param pulumi.Input[str] event_hub_resource_id: The resource ID where the event grid is configured to send events.
:param pulumi.Input[str] kind: Kind of the endpoint for the data connection
Expected value is 'EventGrid'.
:param pulumi.Input[str] resource_group_name: The name of the resource group containing the Kusto cluster.
:param pulumi.Input[str] storage_account_resource_id: The resource ID of the storage account where the data resides.
:param pulumi.Input[Union[str, 'BlobStorageEventType']] blob_storage_event_type: The name of blob storage event type to process.
:param pulumi.Input[str] data_connection_name: The name of the data connection.
:param pulumi.Input[Union[str, 'EventGridDataFormat']] data_format: The data format of the message. Optionally the data format can be added to each message.
:param pulumi.Input[bool] ignore_first_record: A Boolean value that, if set to true, indicates that ingestion should ignore the first record of every file
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] mapping_rule_name: The mapping rule to be used to ingest the data. Optionally the mapping information can be added to each message.
:param pulumi.Input[str] table_name: The table where the data should be ingested. Optionally the table information can be added to each message.
"""
pulumi.set(__self__, "cluster_name", cluster_name)
pulumi.set(__self__, "consumer_group", consumer_group)
pulumi.set(__self__, "database_name", database_name)
pulumi.set(__self__, "event_hub_resource_id", event_hub_resource_id)
pulumi.set(__self__, "kind", 'EventGrid')
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "storage_account_resource_id", storage_account_resource_id)
if blob_storage_event_type is not None:
pulumi.set(__self__, "blob_storage_event_type", blob_storage_event_type)
if data_connection_name is not None:
pulumi.set(__self__, "data_connection_name", data_connection_name)
if data_format is not None:
pulumi.set(__self__, "data_format", data_format)
if ignore_first_record is not None:
pulumi.set(__self__, "ignore_first_record", ignore_first_record)
if location is not None:
pulumi.set(__self__, "location", location)
if mapping_rule_name is not None:
pulumi.set(__self__, "mapping_rule_name", mapping_rule_name)
if table_name is not None:
pulumi.set(__self__, "table_name", table_name)
@property
@pulumi.getter(name="clusterName")
def cluster_name(self) -> pulumi.Input[str]:
"""
The name of the Kusto cluster.
"""
return pulumi.get(self, "cluster_name")
@cluster_name.setter
def cluster_name(self, value: pulumi.Input[str]):
pulumi.set(self, "cluster_name", value)
@property
@pulumi.getter(name="consumerGroup")
def consumer_group(self) -> pulumi.Input[str]:
"""
The event hub consumer group.
"""
return pulumi.get(self, "consumer_group")
@consumer_group.setter
def consumer_group(self, value: pulumi.Input[str]):
pulumi.set(self, "consumer_group", value)
@property
@pulumi.getter(name="databaseName")
def database_name(self) -> pulumi.Input[str]:
"""
The name of the database in the Kusto cluster.
"""
return pulumi.get(self, "database_name")
@database_name.setter
def database_name(self, value: pulumi.Input[str]):
pulumi.set(self, "database_name", value)
@property
@pulumi.getter(name="eventHubResourceId")
def event_hub_resource_id(self) -> pulumi.Input[str]:
"""
The resource ID where the event grid is configured to send events.
"""
return pulumi.get(self, "event_hub_resource_id")
@event_hub_resource_id.setter
def event_hub_resource_id(self, value: pulumi.Input[str]):
pulumi.set(self, "event_hub_resource_id", value)
@property
@pulumi.getter
def kind(self) -> pulumi.Input[str]:
"""
Kind of the endpoint for the data connection
Expected value is 'EventGrid'.
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: pulumi.Input[str]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group containing the Kusto cluster.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="storageAccountResourceId")
def storage_account_resource_id(self) -> pulumi.Input[str]:
"""
The resource ID of the storage account where the data resides.
"""
return pulumi.get(self, "storage_account_resource_id")
@storage_account_resource_id.setter
def storage_account_resource_id(self, value: pulumi.Input[str]):
pulumi.set(self, "storage_account_resource_id", value)
@property
@pulumi.getter(name="blobStorageEventType")
def blob_storage_event_type(self) -> Optional[pulumi.Input[Union[str, 'BlobStorageEventType']]]:
"""
The name of blob storage event type to process.
"""
return pulumi.get(self, "blob_storage_event_type")
@blob_storage_event_type.setter
def blob_storage_event_type(self, value: Optional[pulumi.Input[Union[str, 'BlobStorageEventType']]]):
pulumi.set(self, "blob_storage_event_type", value)
@property
@pulumi.getter(name="dataConnectionName")
def data_connection_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the data connection.
"""
return pulumi.get(self, "data_connection_name")
@data_connection_name.setter
def data_connection_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "data_connection_name", value)
@property
@pulumi.getter(name="dataFormat")
def data_format(self) -> Optional[pulumi.Input[Union[str, 'EventGridDataFormat']]]:
"""
The data format of the message. Optionally the data format can be added to each message.
"""
return pulumi.get(self, "data_format")
@data_format.setter
def data_format(self, value: Optional[pulumi.Input[Union[str, 'EventGridDataFormat']]]):
pulumi.set(self, "data_format", value)
@property
@pulumi.getter(name="ignoreFirstRecord")
def ignore_first_record(self) -> Optional[pulumi.Input[bool]]:
"""
A Boolean value that, if set to true, indicates that ingestion should ignore the first record of every file
"""
return pulumi.get(self, "ignore_first_record")
@ignore_first_record.setter
def ignore_first_record(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "ignore_first_record", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="mappingRuleName")
def mapping_rule_name(self) -> Optional[pulumi.Input[str]]:
"""
The mapping rule to be used to ingest the data. Optionally the mapping information can be added to each message.
"""
return pulumi.get(self, "mapping_rule_name")
@mapping_rule_name.setter
def mapping_rule_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "mapping_rule_name", value)
@property
@pulumi.getter(name="tableName")
def table_name(self) -> Optional[pulumi.Input[str]]:
"""
The table where the data should be ingested. Optionally the table information can be added to each message.
"""
return pulumi.get(self, "table_name")
@table_name.setter
def table_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "table_name", value)
class EventGridDataConnection(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
blob_storage_event_type: Optional[pulumi.Input[Union[str, 'BlobStorageEventType']]] = None,
cluster_name: Optional[pulumi.Input[str]] = None,
consumer_group: Optional[pulumi.Input[str]] = None,
data_connection_name: Optional[pulumi.Input[str]] = None,
data_format: Optional[pulumi.Input[Union[str, 'EventGridDataFormat']]] = None,
database_name: Optional[pulumi.Input[str]] = None,
event_hub_resource_id: Optional[pulumi.Input[str]] = None,
ignore_first_record: Optional[pulumi.Input[bool]] = None,
kind: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
mapping_rule_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
storage_account_resource_id: Optional[pulumi.Input[str]] = None,
table_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Class representing an Event Grid data connection.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Union[str, 'BlobStorageEventType']] blob_storage_event_type: The name of blob storage event type to process.
:param pulumi.Input[str] cluster_name: The name of the Kusto cluster.
:param pulumi.Input[str] consumer_group: The event hub consumer group.
:param pulumi.Input[str] data_connection_name: The name of the data connection.
:param pulumi.Input[Union[str, 'EventGridDataFormat']] data_format: The data format of the message. Optionally the data format can be added to each message.
:param pulumi.Input[str] database_name: The name of the database in the Kusto cluster.
:param pulumi.Input[str] event_hub_resource_id: The resource ID where the event grid is configured to send events.
:param pulumi.Input[bool] ignore_first_record: A Boolean value that, if set to true, indicates that ingestion should ignore the first record of every file
:param pulumi.Input[str] kind: Kind of the endpoint for the data connection
Expected value is 'EventGrid'.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] mapping_rule_name: The mapping rule to be used to ingest the data. Optionally the mapping information can be added to each message.
:param pulumi.Input[str] resource_group_name: The name of the resource group containing the Kusto cluster.
:param pulumi.Input[str] storage_account_resource_id: The resource ID of the storage account where the data resides.
:param pulumi.Input[str] table_name: The table where the data should be ingested. Optionally the table information can be added to each message.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: EventGridDataConnectionArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Class representing an Event Grid data connection.
:param str resource_name: The name of the resource.
:param EventGridDataConnectionArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(EventGridDataConnectionArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
blob_storage_event_type: Optional[pulumi.Input[Union[str, 'BlobStorageEventType']]] = None,
cluster_name: Optional[pulumi.Input[str]] = None,
consumer_group: Optional[pulumi.Input[str]] = None,
data_connection_name: Optional[pulumi.Input[str]] = None,
data_format: Optional[pulumi.Input[Union[str, 'EventGridDataFormat']]] = None,
database_name: Optional[pulumi.Input[str]] = None,
event_hub_resource_id: Optional[pulumi.Input[str]] = None,
ignore_first_record: Optional[pulumi.Input[bool]] = None,
kind: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
mapping_rule_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
storage_account_resource_id: Optional[pulumi.Input[str]] = None,
table_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = EventGridDataConnectionArgs.__new__(EventGridDataConnectionArgs)
__props__.__dict__["blob_storage_event_type"] = blob_storage_event_type
if cluster_name is None and not opts.urn:
raise TypeError("Missing required property 'cluster_name'")
__props__.__dict__["cluster_name"] = cluster_name
if consumer_group is None and not opts.urn:
raise TypeError("Missing required property 'consumer_group'")
__props__.__dict__["consumer_group"] = consumer_group
__props__.__dict__["data_connection_name"] = data_connection_name
__props__.__dict__["data_format"] = data_format
if database_name is None and not opts.urn:
raise TypeError("Missing required property 'database_name'")
__props__.__dict__["database_name"] = database_name
if event_hub_resource_id is None and not opts.urn:
raise TypeError("Missing required property 'event_hub_resource_id'")
__props__.__dict__["event_hub_resource_id"] = event_hub_resource_id
__props__.__dict__["ignore_first_record"] = ignore_first_record
if kind is None and not opts.urn:
raise TypeError("Missing required property 'kind'")
__props__.__dict__["kind"] = 'EventGrid'
__props__.__dict__["location"] = location
__props__.__dict__["mapping_rule_name"] = mapping_rule_name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if storage_account_resource_id is None and not opts.urn:
raise TypeError("Missing required property 'storage_account_resource_id'")
__props__.__dict__["storage_account_resource_id"] = storage_account_resource_id
__props__.__dict__["table_name"] = table_name
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:kusto/v20210101:EventGridDataConnection"), pulumi.Alias(type_="azure-native:kusto:EventGridDataConnection"), pulumi.Alias(type_="azure-nextgen:kusto:EventGridDataConnection"), pulumi.Alias(type_="azure-native:kusto/v20190121:EventGridDataConnection"), pulumi.Alias(type_="azure-nextgen:kusto/v20190121:EventGridDataConnection"), pulumi.Alias(type_="azure-native:kusto/v20190515:EventGridDataConnection"), pulumi.Alias(type_="azure-nextgen:kusto/v20190515:EventGridDataConnection"), pulumi.Alias(type_="azure-native:kusto/v20190907:EventGridDataConnection"), pulumi.Alias(type_="azure-nextgen:kusto/v20190907:EventGridDataConnection"), pulumi.Alias(type_="azure-native:kusto/v20191109:EventGridDataConnection"), pulumi.Alias(type_="azure-nextgen:kusto/v20191109:EventGridDataConnection"), pulumi.Alias(type_="azure-native:kusto/v20200215:EventGridDataConnection"), pulumi.Alias(type_="azure-nextgen:kusto/v20200215:EventGridDataConnection"), pulumi.Alias(type_="azure-native:kusto/v20200614:EventGridDataConnection"), pulumi.Alias(type_="azure-nextgen:kusto/v20200614:EventGridDataConnection"), pulumi.Alias(type_="azure-native:kusto/v20200918:EventGridDataConnection"), pulumi.Alias(type_="azure-nextgen:kusto/v20200918:EventGridDataConnection")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(EventGridDataConnection, __self__).__init__(
'azure-native:kusto/v20210101:EventGridDataConnection',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'EventGridDataConnection':
"""
Get an existing EventGridDataConnection resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = EventGridDataConnectionArgs.__new__(EventGridDataConnectionArgs)
__props__.__dict__["blob_storage_event_type"] = None
__props__.__dict__["consumer_group"] = None
__props__.__dict__["data_format"] = None
__props__.__dict__["event_hub_resource_id"] = None
__props__.__dict__["ignore_first_record"] = None
__props__.__dict__["kind"] = None
__props__.__dict__["location"] = None
__props__.__dict__["mapping_rule_name"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["storage_account_resource_id"] = None
__props__.__dict__["table_name"] = None
__props__.__dict__["type"] = None
return EventGridDataConnection(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="blobStorageEventType")
def blob_storage_event_type(self) -> pulumi.Output[Optional[str]]:
"""
The name of blob storage event type to process.
"""
return pulumi.get(self, "blob_storage_event_type")
@property
@pulumi.getter(name="consumerGroup")
def consumer_group(self) -> pulumi.Output[str]:
"""
The event hub consumer group.
"""
return pulumi.get(self, "consumer_group")
@property
@pulumi.getter(name="dataFormat")
def data_format(self) -> pulumi.Output[Optional[str]]:
"""
The data format of the message. Optionally the data format can be added to each message.
"""
return pulumi.get(self, "data_format")
@property
@pulumi.getter(name="eventHubResourceId")
def event_hub_resource_id(self) -> pulumi.Output[str]:
"""
The resource ID where the event grid is configured to send events.
"""
return pulumi.get(self, "event_hub_resource_id")
@property
@pulumi.getter(name="ignoreFirstRecord")
def ignore_first_record(self) -> pulumi.Output[Optional[bool]]:
"""
A Boolean value that, if set to true, indicates that ingestion should ignore the first record of every file
"""
return pulumi.get(self, "ignore_first_record")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[str]:
"""
Kind of the endpoint for the data connection
Expected value is 'EventGrid'.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="mappingRuleName")
def mapping_rule_name(self) -> pulumi.Output[Optional[str]]:
"""
The mapping rule to be used to ingest the data. Optionally the mapping information can be added to each message.
"""
return pulumi.get(self, "mapping_rule_name")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioned state of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="storageAccountResourceId")
def storage_account_resource_id(self) -> pulumi.Output[str]:
"""
The resource ID of the storage account where the data resides.
"""
return pulumi.get(self, "storage_account_resource_id")
@property
@pulumi.getter(name="tableName")
def table_name(self) -> pulumi.Output[Optional[str]]:
"""
The table where the data should be ingested. Optionally the table information can be added to each message.
"""
return pulumi.get(self, "table_name")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
| [
"[email protected]"
] | |
78d8ff1315659190e523ddabf9753cb6e744b0aa | 3c000380cbb7e8deb6abf9c6f3e29e8e89784830 | /venv/Lib/site-packages/cobra/modelimpl/fv/rtsvcbdtobdatt.py | f1ffb7ed17d1fb278b7698c60d82bd28d78ea0a0 | [] | no_license | bkhoward/aciDOM | 91b0406f00da7aac413a81c8db2129b4bfc5497b | f2674456ecb19cf7299ef0c5a0887560b8b315d0 | refs/heads/master | 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,548 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class RtSvcBDToBDAtt(Mo):
"""
A target relation to a bridge domain. This is an internal object.
"""
meta = TargetRelationMeta("cobra.model.fv.RtSvcBDToBDAtt", "cobra.model.fv.SvcBD")
meta.moClassName = "fvRtSvcBDToBDAtt"
meta.rnFormat = "rtSvcBDToBDAtt"
meta.category = MoCategory.RELATIONSHIP_FROM_LOCAL
meta.label = "Bridge Domain"
meta.writeAccessMask = 0x1001
meta.readAccessMask = 0x6000400421843101
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.parentClasses.add("cobra.model.fv.BD")
meta.superClasses.add("cobra.model.reln.From")
meta.superClasses.add("cobra.model.reln.Inst")
meta.rnPrefixes = [
('rtSvcBDToBDAtt', False),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "tCl", "tCl", 12197, PropCategory.REGULAR)
prop.label = "Target-class"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 1888
prop.defaultValueStr = "fvSvcBD"
prop._addConstant("fvSvcBD", None, 1888)
prop._addConstant("unspecified", "unspecified", 0)
meta.props.add("tCl", prop)
prop = PropMeta("str", "tDn", "tDn", 100, PropCategory.REGULAR)
prop.label = "Target-dn"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("tDn", prop)
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Ancestor"
meta.deploymentQueryPaths.append(DeploymentPathMeta("BDToNwIf", "Bridge Domain to Interface", "cobra.model.nw.If"))
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
] | |
5577adbf39c30e7cfa89c76e8eb07f81dca2ff8c | 37c6507cb937312017fb05e1010007419e68e5a8 | /post_to_gminer/client_send.py | 94e58eb54cd07a652d151cf836a26292163b5a0e | [] | no_license | bbcf/gdv | 5fc04e186fd0487db6bce850538cba77e82b6284 | f8738e7dc49ca826e29f4454484d2da716389bd2 | refs/heads/master | 2021-01-21T19:28:54.558176 | 2011-07-01T15:01:21 | 2011-07-01T15:01:21 | 1,186,918 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,181 | py | # Modules #
import httplib2, urllib
# All parameters #
args = {
'data' : '''{"operation_type":"desc_stat","characteristic":"number_of_features","compare_parents":[],"per_chromosome":["per_chromosome"],"filter":[{"name":"Ribi genes","path":"/scratch/genomic/tracks/ribosome_genesis.sql"}],"ntracks":[{"name":"S. cer refseq genes","path":"/scratch/genomic/tracks/all_yeast_genes.sql"},{"name":"RP genes","path":"/scratch/genomic/tracks/ribosome_proteins.sql"}]}''',
'output_location' : '/tmp/gMiner',
'callback_url' : 'http://localhost:9999/',
'job_id' : '1'
}
# Make the request #
connection = httplib2.Http()
body = urllib.urlencode(args)
headers = {'content-type':'application/x-www-form-urlencoded'}
address = "http://localhost:7522/"
# Send it #
response, content = connection.request(address, "POST", body=body, headers=headers)
print "Server status: ", response.status
print "Server reason: ", response.reason
print "Server content:", content
#-----------------------------------------#
# This code was written by Lucas Sinclair #
# [email protected] #
#-----------------------------------------#
| [
"[email protected]"
] | |
2b72601aa35f22fe7285d28cc4fd151b2cd8bd11 | fa798e1779af170ee31bfd710a6faca9904a99ef | /6day/2. parameter2.py | ee3813869b83825983ac1f98dfcf349e50f27af4 | [] | no_license | itwebMJ/pythonStudy | 1c573f98b78ce8c9273ae17a44d59a5a26c61b2c | 8ea3112c9c587b6aeb8a5fa6ef715053286fbaae | refs/heads/master | 2023-06-28T05:37:29.239010 | 2021-08-06T08:01:54 | 2021-08-06T08:01:54 | 375,879,186 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 381 | py | def f1(*x): #가변인자. 튜플로 받아옴
print('함수시작')
for i in x:
print(i)
print('함수 끝')
def add(*num):
s = 0
for i in num:
s+=i
return s
def main():
f1()
f1('aaa', 'bbb')
f1('ccc','ddd','eee','fff')
s=add(1,2,3)
print('add(1,2,3):',s)
s=add(1,2,3,4,5)
print('add(1,2,3,4,5):', s)
main()
| [
"[email protected]"
] | |
bfba7db224c5d590de0eaa96f0737a4849accb57 | d094ba0c8a9b1217fbf014aa79a283a49aabe88c | /env/lib/python3.6/site-packages/nipype/interfaces/minc/tests/test_auto_Dump.py | bcca2a480195741d1cc2f4ccc5cdf6e3271a276a | [
"Apache-2.0"
] | permissive | Raniac/NEURO-LEARN | d9274e0baadd97bb02da54bdfcf6ca091fc1c703 | 3c3acc55de8ba741e673063378e6cbaf10b64c7a | refs/heads/master | 2022-12-25T23:46:54.922237 | 2020-09-06T03:15:14 | 2020-09-06T03:15:14 | 182,013,100 | 9 | 2 | Apache-2.0 | 2022-12-09T21:01:00 | 2019-04-18T03:57:00 | CSS | UTF-8 | Python | false | false | 1,907 | py | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..minc import Dump
def test_Dump_inputs():
input_map = dict(
annotations_brief=dict(
argstr='-b %s',
xor=('annotations_brief', 'annotations_full'),
),
annotations_full=dict(
argstr='-f %s',
xor=('annotations_brief', 'annotations_full'),
),
args=dict(argstr='%s', ),
coordinate_data=dict(
argstr='-c',
xor=('coordinate_data', 'header_data'),
),
environ=dict(
nohash=True,
usedefault=True,
),
header_data=dict(
argstr='-h',
xor=('coordinate_data', 'header_data'),
),
input_file=dict(
argstr='%s',
mandatory=True,
position=-2,
),
line_length=dict(argstr='-l %d', ),
netcdf_name=dict(argstr='-n %s', ),
out_file=dict(
argstr='> %s',
genfile=True,
position=-1,
),
output_file=dict(
hash_files=False,
keep_extension=False,
name_source=['input_file'],
name_template='%s_dump.txt',
position=-1,
),
precision=dict(argstr='%s', ),
variables=dict(
argstr='-v %s',
sep=',',
),
)
inputs = Dump.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_Dump_outputs():
output_map = dict(output_file=dict(), )
outputs = Dump.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
| [
"[email protected]"
] | |
fa43a838c4978b258a9605d31ab39eb249c8b487 | ce76b3ef70b885d7c354b6ddb8447d111548e0f1 | /thing_and_time/year/different_group_or_way/own_hand/bad_woman/old_week.py | 9639b61edb0de71dc9e7ec473e80b69899594523 | [] | no_license | JingkaiTang/github-play | 9bdca4115eee94a7b5e4ae9d3d6052514729ff21 | 51b550425a91a97480714fe9bc63cb5112f6f729 | refs/heads/master | 2021-01-20T20:18:21.249162 | 2016-08-19T07:20:12 | 2016-08-19T07:20:12 | 60,834,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 219 | py |
#! /usr/bin/env python
def great_week(str_arg):
year_and_day(str_arg)
print('way_and_little_place')
def year_and_day(str_arg):
print(str_arg)
if __name__ == '__main__':
great_week('want_first_time')
| [
"[email protected]"
] | |
ea5aa6c50f98a175bed92349bae0a7dfb2f69265 | 13b70fefe74a4df57c80207a9f5fddb2c2474f1d | /Ui/Ui_FormConfig.py | f8737de39a52a6332ceafb06dfe1692b405643d9 | [] | no_license | golden7602/zion | 4f8ae947fd754d64de44bb16d9bd2bd7f8c819a5 | 70a5ba13eb1b504f94fdaceba7cc6d0564618c00 | refs/heads/master | 2021-07-01T13:46:03.769916 | 2020-12-01T11:15:01 | 2020-12-01T11:15:01 | 196,413,935 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,713 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'e:\Zion\zion\Ui\FormConfig.ui'
#
# Created by: PyQt5 UI code generator 5.13.0
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(971, 676)
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(10)
Dialog.setFont(font)
self.verticalLayout = QtWidgets.QVBoxLayout(Dialog)
self.verticalLayout.setContentsMargins(3, 3, -1, 3)
self.verticalLayout.setObjectName("verticalLayout")
self.tabWidget = QtWidgets.QTabWidget(Dialog)
self.tabWidget.setObjectName("tabWidget")
self.tab1 = QtWidgets.QWidget()
self.tab1.setObjectName("tab1")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.tab1)
self.verticalLayout_2.setContentsMargins(3, 3, 3, 3)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.label = QtWidgets.QLabel(self.tab1)
self.label.setObjectName("label")
self.verticalLayout_2.addWidget(self.label)
self.Note_PrintingOrder = QtWidgets.QTextEdit(self.tab1)
self.Note_PrintingOrder.setObjectName("Note_PrintingOrder")
self.verticalLayout_2.addWidget(self.Note_PrintingOrder)
self.label_2 = QtWidgets.QLabel(self.tab1)
self.label_2.setObjectName("label_2")
self.verticalLayout_2.addWidget(self.label_2)
self.Bank_Account = QtWidgets.QTextEdit(self.tab1)
self.Bank_Account.setObjectName("Bank_Account")
self.verticalLayout_2.addWidget(self.Bank_Account)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.verticalLayout_2.addLayout(self.horizontalLayout_2)
self.tabWidget.addTab(self.tab1, "")
self.tab = QtWidgets.QWidget()
self.tab.setObjectName("tab")
self.verticalLayout_6 = QtWidgets.QVBoxLayout(self.tab)
self.verticalLayout_6.setObjectName("verticalLayout_6")
self.verticalLayout_5 = QtWidgets.QVBoxLayout()
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.label_8 = QtWidgets.QLabel(self.tab)
self.label_8.setObjectName("label_8")
self.verticalLayout_5.addWidget(self.label_8)
self.BillCopys_Order = QtWidgets.QTextEdit(self.tab)
self.BillCopys_Order.setMaximumSize(QtCore.QSize(16777215, 40))
self.BillCopys_Order.setObjectName("BillCopys_Order")
self.verticalLayout_5.addWidget(self.BillCopys_Order)
self.label_9 = QtWidgets.QLabel(self.tab)
self.label_9.setObjectName("label_9")
self.verticalLayout_5.addWidget(self.label_9)
self.BillCopys_PrintingOrder = QtWidgets.QTextEdit(self.tab)
self.BillCopys_PrintingOrder.setMaximumSize(QtCore.QSize(16777215, 40))
self.BillCopys_PrintingOrder.setObjectName("BillCopys_PrintingOrder")
self.verticalLayout_5.addWidget(self.BillCopys_PrintingOrder)
self.label_10 = QtWidgets.QLabel(self.tab)
self.label_10.setObjectName("label_10")
self.verticalLayout_5.addWidget(self.label_10)
self.BillCopys_OutboundOrder = QtWidgets.QTextEdit(self.tab)
self.BillCopys_OutboundOrder.setMaximumSize(QtCore.QSize(16777215, 40))
self.BillCopys_OutboundOrder.setObjectName("BillCopys_OutboundOrder")
self.verticalLayout_5.addWidget(self.BillCopys_OutboundOrder)
self.label_11 = QtWidgets.QLabel(self.tab)
self.label_11.setObjectName("label_11")
self.verticalLayout_5.addWidget(self.label_11)
self.BillCopys_WarehouseRreceipt = QtWidgets.QTextEdit(self.tab)
self.BillCopys_WarehouseRreceipt.setMaximumSize(QtCore.QSize(16777215, 40))
self.BillCopys_WarehouseRreceipt.setObjectName("BillCopys_WarehouseRreceipt")
self.verticalLayout_5.addWidget(self.BillCopys_WarehouseRreceipt)
self.label_12 = QtWidgets.QLabel(self.tab)
self.label_12.setObjectName("label_12")
self.verticalLayout_5.addWidget(self.label_12)
self.BillCopys_QuotationOrder = QtWidgets.QTextEdit(self.tab)
self.BillCopys_QuotationOrder.setMaximumSize(QtCore.QSize(16777215, 40))
self.BillCopys_QuotationOrder.setObjectName("BillCopys_QuotationOrder")
self.verticalLayout_5.addWidget(self.BillCopys_QuotationOrder)
self.label_13 = QtWidgets.QLabel(self.tab)
self.label_13.setObjectName("label_13")
self.verticalLayout_5.addWidget(self.label_13)
self.BillCopys_QuotationPrintingOrder = QtWidgets.QTextEdit(self.tab)
self.BillCopys_QuotationPrintingOrder.setMaximumSize(QtCore.QSize(16777215, 40))
self.BillCopys_QuotationPrintingOrder.setObjectName("BillCopys_QuotationPrintingOrder")
self.verticalLayout_5.addWidget(self.BillCopys_QuotationPrintingOrder)
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_5.addItem(spacerItem)
self.verticalLayout_6.addLayout(self.verticalLayout_5)
self.tabWidget.addTab(self.tab, "")
self.tab2 = QtWidgets.QWidget()
self.tab2.setObjectName("tab2")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.tab2)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.frame = QtWidgets.QFrame(self.tab2)
self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame.setObjectName("frame")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.frame)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.label_4 = QtWidgets.QLabel(self.frame)
self.label_4.setObjectName("label_4")
self.horizontalLayout_3.addWidget(self.label_4)
spacerItem1 = QtWidgets.QSpacerItem(35, 20, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem1)
self.Null_prompt_bac_color = QtWidgets.QWidget(self.frame)
self.Null_prompt_bac_color.setMinimumSize(QtCore.QSize(100, 0))
self.Null_prompt_bac_color.setStyleSheet("background-color: rgb(255, 0, 255);\n"
"border: 1px inset ;\n"
"border-color: rgb(194, 194, 194);\n"
"")
self.Null_prompt_bac_color.setObjectName("Null_prompt_bac_color")
self.horizontalLayout_3.addWidget(self.Null_prompt_bac_color)
self.colorpicker = QtWidgets.QPushButton(self.frame)
self.colorpicker.setMinimumSize(QtCore.QSize(25, 0))
self.colorpicker.setMaximumSize(QtCore.QSize(25, 16777215))
self.colorpicker.setText("")
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("../res/ico/color_picker.ico"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.colorpicker.setIcon(icon)
self.colorpicker.setObjectName("colorpicker")
self.horizontalLayout_3.addWidget(self.colorpicker)
spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem2)
self.verticalLayout_3.addLayout(self.horizontalLayout_3)
self.horizontalLayout_6 = QtWidgets.QHBoxLayout()
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.label_5 = QtWidgets.QLabel(self.frame)
self.label_5.setObjectName("label_5")
self.horizontalLayout_6.addWidget(self.label_5)
spacerItem3 = QtWidgets.QSpacerItem(10, 20, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_6.addItem(spacerItem3)
self.PrintHighlightBackgroundColor = QtWidgets.QWidget(self.frame)
self.PrintHighlightBackgroundColor.setMinimumSize(QtCore.QSize(100, 0))
self.PrintHighlightBackgroundColor.setStyleSheet("background-color: rgb(255, 0, 255);\n"
"border: 1px inset ;\n"
"border-color: rgb(194, 194, 194);\n"
"")
self.PrintHighlightBackgroundColor.setObjectName("PrintHighlightBackgroundColor")
self.horizontalLayout_6.addWidget(self.PrintHighlightBackgroundColor)
self.colorpicker_2 = QtWidgets.QPushButton(self.frame)
self.colorpicker_2.setMinimumSize(QtCore.QSize(25, 0))
self.colorpicker_2.setMaximumSize(QtCore.QSize(25, 16777215))
self.colorpicker_2.setText("")
self.colorpicker_2.setIcon(icon)
self.colorpicker_2.setObjectName("colorpicker_2")
self.horizontalLayout_6.addWidget(self.colorpicker_2)
spacerItem4 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_6.addItem(spacerItem4)
self.verticalLayout_3.addLayout(self.horizontalLayout_6)
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.label_3 = QtWidgets.QLabel(self.frame)
self.label_3.setObjectName("label_3")
self.horizontalLayout_4.addWidget(self.label_3)
spacerItem5 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_4.addItem(spacerItem5)
self.widget = QtWidgets.QWidget(self.frame)
self.widget.setObjectName("widget")
self.horizontalLayout_5 = QtWidgets.QHBoxLayout(self.widget)
self.horizontalLayout_5.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_5.setSpacing(0)
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.radioButton_AutoEllipsis = QtWidgets.QRadioButton(self.widget)
self.radioButton_AutoEllipsis.setObjectName("radioButton_AutoEllipsis")
self.horizontalLayout_5.addWidget(self.radioButton_AutoEllipsis)
self.radioButton_AutoShrinkFonts = QtWidgets.QRadioButton(self.widget)
self.radioButton_AutoShrinkFonts.setObjectName("radioButton_AutoShrinkFonts")
self.horizontalLayout_5.addWidget(self.radioButton_AutoShrinkFonts)
self.horizontalLayout_4.addWidget(self.widget)
spacerItem6 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_4.addItem(spacerItem6)
self.verticalLayout_3.addLayout(self.horizontalLayout_4)
self.horizontalLayout_10 = QtWidgets.QHBoxLayout()
self.horizontalLayout_10.setObjectName("horizontalLayout_10")
self.label_6 = QtWidgets.QLabel(self.frame)
self.label_6.setObjectName("label_6")
self.horizontalLayout_10.addWidget(self.label_6)
spacerItem7 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_10.addItem(spacerItem7)
self.widget_3 = QtWidgets.QWidget(self.frame)
self.widget_3.setObjectName("widget_3")
self.horizontalLayout_7 = QtWidgets.QHBoxLayout(self.widget_3)
self.horizontalLayout_7.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_7.setSpacing(0)
self.horizontalLayout_7.setObjectName("horizontalLayout_7")
self.radioButton_AutoRefreshWhenDataChange_Open = QtWidgets.QRadioButton(self.widget_3)
self.radioButton_AutoRefreshWhenDataChange_Open.setObjectName("radioButton_AutoRefreshWhenDataChange_Open")
self.horizontalLayout_7.addWidget(self.radioButton_AutoRefreshWhenDataChange_Open)
self.radioButton_AutoRefreshWhenDataChange_Close = QtWidgets.QRadioButton(self.widget_3)
self.radioButton_AutoRefreshWhenDataChange_Close.setObjectName("radioButton_AutoRefreshWhenDataChange_Close")
self.horizontalLayout_7.addWidget(self.radioButton_AutoRefreshWhenDataChange_Close)
self.horizontalLayout_10.addWidget(self.widget_3)
spacerItem8 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_10.addItem(spacerItem8)
self.verticalLayout_3.addLayout(self.horizontalLayout_10)
self.horizontalLayout_9 = QtWidgets.QHBoxLayout()
self.horizontalLayout_9.setObjectName("horizontalLayout_9")
self.label_7 = QtWidgets.QLabel(self.frame)
self.label_7.setObjectName("label_7")
self.horizontalLayout_9.addWidget(self.label_7)
spacerItem9 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_9.addItem(spacerItem9)
self.widget_2 = QtWidgets.QWidget(self.frame)
self.widget_2.setObjectName("widget_2")
self.horizontalLayout_8 = QtWidgets.QHBoxLayout(self.widget_2)
self.horizontalLayout_8.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_8.setSpacing(0)
self.horizontalLayout_8.setObjectName("horizontalLayout_8")
self.radioButton_BubbleTipsWhenDataChange_Open = QtWidgets.QRadioButton(self.widget_2)
self.radioButton_BubbleTipsWhenDataChange_Open.setObjectName("radioButton_BubbleTipsWhenDataChange_Open")
self.horizontalLayout_8.addWidget(self.radioButton_BubbleTipsWhenDataChange_Open)
self.radioButton_BubbleTipsWhenDataChange_Close = QtWidgets.QRadioButton(self.widget_2)
self.radioButton_BubbleTipsWhenDataChange_Close.setObjectName("radioButton_BubbleTipsWhenDataChange_Close")
self.horizontalLayout_8.addWidget(self.radioButton_BubbleTipsWhenDataChange_Close)
self.horizontalLayout_9.addWidget(self.widget_2)
spacerItem10 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_9.addItem(spacerItem10)
self.verticalLayout_3.addLayout(self.horizontalLayout_9)
self.horizontalLayout_11 = QtWidgets.QHBoxLayout()
self.horizontalLayout_11.setObjectName("horizontalLayout_11")
self.label_14 = QtWidgets.QLabel(self.frame)
self.label_14.setObjectName("label_14")
self.horizontalLayout_11.addWidget(self.label_14)
self.TaxRegCerPath = QtWidgets.QLineEdit(self.frame)
self.TaxRegCerPath.setReadOnly(False)
self.TaxRegCerPath.setObjectName("TaxRegCerPath")
self.horizontalLayout_11.addWidget(self.TaxRegCerPath)
self.taxRegPathSelect = QtWidgets.QPushButton(self.frame)
self.taxRegPathSelect.setText("")
self.taxRegPathSelect.setObjectName("taxRegPathSelect")
self.horizontalLayout_11.addWidget(self.taxRegPathSelect)
self.verticalLayout_3.addLayout(self.horizontalLayout_11)
spacerItem11 = QtWidgets.QSpacerItem(20, 429, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_3.addItem(spacerItem11)
self.verticalLayout_4.addWidget(self.frame)
self.tabWidget.addTab(self.tab2, "")
self.verticalLayout.addWidget(self.tabWidget)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
spacerItem12 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem12)
self.buttonBox = QtWidgets.QDialogButtonBox(Dialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.horizontalLayout.addWidget(self.buttonBox)
self.verticalLayout.addLayout(self.horizontalLayout)
self.retranslateUi(Dialog)
self.tabWidget.setCurrentIndex(2)
self.buttonBox.accepted.connect(Dialog.accept)
self.buttonBox.rejected.connect(Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Config "))
self.label.setText(_translate("Dialog", "PrintOrder单据备注内容 PrintOrder Note:"))
self.label_2.setText(_translate("Dialog", "单据账户信息Conta Bancaria:"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab1), _translate("Dialog", "BillNote"))
self.label_8.setText(_translate("Dialog", "Order:"))
self.label_9.setText(_translate("Dialog", "PrintingOrder:"))
self.label_10.setText(_translate("Dialog", "OutboundOrder:"))
self.label_11.setText(_translate("Dialog", "WarehouseRreceipt:"))
self.label_12.setText(_translate("Dialog", "QuotationOrder"))
self.label_13.setText(_translate("Dialog", "QuotationPrintingOrder"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("Dialog", "BillCopys"))
self.label_4.setText(_translate("Dialog", "空值提示文背景色Null Prompt Background Color:"))
self.label_5.setText(_translate("Dialog", "打印突出显示背景色Print Highlight Background Color:"))
self.label_3.setText(_translate("Dialog", "文本宽度超边界时打印方式 Printing Policy When Text Width is Extra Width:"))
self.radioButton_AutoEllipsis.setText(_translate("Dialog", "自动省略Auto ellipsis"))
self.radioButton_AutoShrinkFonts.setText(_translate("Dialog", "自动缩小字体Auto shrink fonts"))
self.label_6.setText(_translate("Dialog", "其他用户修改数据时自动刷新数据 Automatically refresh data when other users modify data:"))
self.radioButton_AutoRefreshWhenDataChange_Open.setText(_translate("Dialog", "打开"))
self.radioButton_AutoRefreshWhenDataChange_Close.setText(_translate("Dialog", "关闭"))
self.label_7.setText(_translate("Dialog", "其他用户修改数据时气泡提示 Bubble Tips When Other Users Modify Data:"))
self.radioButton_BubbleTipsWhenDataChange_Open.setText(_translate("Dialog", "打开"))
self.radioButton_BubbleTipsWhenDataChange_Close.setText(_translate("Dialog", "关闭"))
self.label_14.setText(_translate("Dialog", "客户税务登记证存放路径:Customer Tax registration certificate Path:"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab2), _translate("Dialog", "other"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Dialog = QtWidgets.QDialog()
ui = Ui_Dialog()
ui.setupUi(Dialog)
Dialog.show()
sys.exit(app.exec_())
| [
"[email protected]"
] | |
917dadb212ec658472de836a5b21d4d5c5744946 | 1cc17b2eb1c885389126299602dbaa3bbd1e6dd7 | /liaoxuefeng_python/innerbuilt/do_wearth_parse.py | cc2b69122069756e6f36f542b652328b8a61a467 | [] | no_license | shulu/python_note | e611093ff2af321fbc889167424574b214052b44 | 93b101a1723d2d47b435a25e81e447f0d5d95022 | refs/heads/master | 2021-01-02T22:47:53.717930 | 2019-03-26T08:55:48 | 2019-03-26T08:55:48 | 99,391,822 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,138 | py | # -*- coding: utf-8 -*-
from xml.parsers.expat import ParserCreate
import json
data = {}
textlist = []
class WeatherSaxHandler(object):
def start_element(self, name, attr):
if not name in data:
data[name] = []
data[name].append({'attr' : attr})
def char_data(self, text):
textlist.append(text)
def end_element(self, name):
global textlist
str = ''.join(textlist)
data[name][-1]['text'] = str
textlist = []
def parse_weather(xml):
handler = WeatherSaxHandler()
parser = ParserCreate()
parser.StartElementHandler = handler.start_element
parser.EndElementHandler = handler.end_element
parser.CharacterDataHandler = handler.char_data
parser.Parse(xml)
location = data['yweather:location']
forecast = data['yweather:forecast']
return {
'city': location[0]['attr']['city'],
'country': location[0]['attr']['country'],
'today': {
'text': forecast[0]['attr']['text'],
'low': forecast[0]['attr']['low'],
'high': forecast[0]['attr']['high'],
},
'tomorrow': {
'text': forecast[1]['attr']['text'],
'low': forecast[1]['attr']['low'],
'high': forecast[1]['attr']['high'],
},
}
#可将数据写入json文件
# with open('weather_data.json', 'w') as f:
# json.dump(data, f)
if __name__ == '__main__':
from do_weather_xml import getWeatherXML
xml = getWeatherXML()
d = parse_weather(xml)
print(str(d))
data = r'''<?xml version="1.0" encoding="UTF-8" standalone="yes" ?>
<rss version="2.0" xmlns:yweather="http://xml.weather.yahoo.com/ns/rss/1.0" xmlns:geo="http://www.w3.org/2003/01/geo/wgs84_pos#">
<channel>
<title>Yahoo! Weather - Beijing, CN</title>
<lastBuildDate>Wed, 27 May 2015 11:00 am CST</lastBuildDate>
<yweather:location city="Beijing" region="" country="China"/>
<yweather:units temperature="C" distance="km" pressure="mb" speed="km/h"/>
<yweather:wind chill="28" direction="180" speed="14.48" />
<yweather:atmosphere humidity="53" visibility="2.61" pressure="1006.1" rising="0" />
<yweather:astronomy sunrise="4:51 am" sunset="7:32 pm"/>
<item>
<geo:lat>39.91</geo:lat>
<geo:long>116.39</geo:long>
<pubDate>Wed, 27 May 2015 11:00 am CST</pubDate>
<yweather:condition text="Haze" code="21" temp="28" date="Wed, 27 May 2015 11:00 am CST" />
<yweather:forecast day="Wed" date="27 May 2015" low="20" high="33" text="Partly Cloudy" code="30" />
<yweather:forecast day="Thu" date="28 May 2015" low="21" high="34" text="Sunny" code="32" />
<yweather:forecast day="Fri" date="29 May 2015" low="18" high="25" text="AM Showers" code="39" />
<yweather:forecast day="Sat" date="30 May 2015" low="18" high="32" text="Sunny" code="32" />
<yweather:forecast day="Sun" date="31 May 2015" low="20" high="37" text="Sunny" code="32" />
</item>
</channel>
</rss>
'''
print(parse_weather(data)) | [
"[email protected]"
] | |
d84e266712ab016b57c02f337ed36cb13b123d9a | 75e16fc6883e3e314b21ccf337beb0320bbcae50 | /train.py | 2f18cc8ed8e16739d9b6c577004d390b0b4c6768 | [] | no_license | mihirp1998/convHypernetComp | a0a9404ab6abf29a56733ea88d45a10f397d551d | 6ed0632af965b81ac38cf3ed7bd9215adadb1902 | refs/heads/master | 2021-08-18T11:11:32.464130 | 2020-04-24T23:44:15 | 2020-04-24T23:44:15 | 169,819,367 | 0 | 0 | null | 2019-02-09T18:08:45 | 2019-02-09T01:07:13 | Jupyter Notebook | UTF-8 | Python | false | false | 7,212 | py | import time
import os
import argparse
import numpy as np
import torch
import torch.optim as optim
import torch.optim.lr_scheduler as LS
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torch.utils.data as data
from torchvision import transforms
from torch.nn.parameter import Parameter
#from unet import UNet,Feedforward
parser = argparse.ArgumentParser()
parser.add_argument(
'--batch-size', '-N', type=int, default=128, help='batch size')
parser.add_argument(
'--train', '-f', required=True, type=str, help='folder of training images')
parser.add_argument(
'--max-epochs', '-e', type=int, default=20000, help='max epochs')
parser.add_argument('--lr', type=float, default=0.0005, help='learning rate')
# parser.add_argument('--cuda', '-g', action='store_true', help='enables cuda')
parser.add_argument(
'--iterations', type=int, default=16, help='unroll iterations')
parser.add_argument('--checkpoint', type=int, help='unroll iterations')
parser.add_argument('--update', type=int, help='unroll update')
args = parser.parse_args()
import new_dataset as dataset
train_set = dataset.ImageFolder(root=args.train,train=False,file_name ="outValid15_100Vids.p")
train_loader = data.DataLoader(
dataset=train_set, batch_size=args.batch_size, shuffle=True, num_workers=1)
print('total images: {}; total batches: {}'.format(
len(train_set), len(train_loader)))
import network
hypernet = network.HyperNetwork(train_set.vid_count).cuda()
encoder = network.EncoderCell().cuda()
binarizer = network.Binarizer().cuda()
decoder = network.DecoderCell().cuda()
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
print("hypernet ",count_parameters(hypernet))
print("encoder ",count_parameters(encoder))
print("decoder ",count_parameters(decoder))
print("binarizer ",count_parameters(binarizer))
solver = optim.Adam(
[
{
'params': hypernet.parameters()
}
],
lr=args.lr)
def resume(epoch=None):
if epoch is None:
s = 'iter'
epoch = 0
else:
s = 'epoch'
print("Loaded")
hypernet.load_state_dict(
torch.load('checkpoint100_100vids_wn/hypernet_{}_{:08d}.pth'.format(s, epoch)))
def save(index, epoch=True):
if not os.path.exists('checkpoint100_100vids_wn'):
os.mkdir('checkpoint100_100vids_wn')
if epoch:
s = 'epoch'
else:
s = 'iter'
torch.save(hypernet.state_dict(), 'checkpoint100_100vids_wn/hypernet_{}_{:08d}.pth'.format(s, index))
#
resume()
scheduler = LS.MultiStepLR(solver, milestones=[50, 100, 200, 300, 400], gamma=0.5)
last_epoch = 100
if args.checkpoint:
resume(args.checkpoint)
#last_epoch = 0
scheduler.last_epoch = last_epoch - 1
vepoch=0
index =0
solver.zero_grad()
loss_mini_batch = 0
all_losses = []
for epoch in range(last_epoch + 1, args.max_epochs + 1):
#scheduler.step()
for batch, (data,id_num,name) in enumerate(train_loader):
batch_t0 = time.time()
data = data[0]
batch_size, input_channels, height, width = data.size()
encoder_h_1 = (Variable(
torch.zeros(batch_size, 256, height // 4, width // 4)),
Variable(
torch.zeros(batch_size, 256, height // 4, width // 4)))
encoder_h_2 = (Variable(
torch.zeros(batch_size, 512, height // 8, width // 8)),
Variable(
torch.zeros(batch_size, 512, height // 8, width // 8)))
encoder_h_3 = (Variable(
torch.zeros(batch_size, 512, height // 16, width // 16)),
Variable(
torch.zeros(batch_size, 512, height // 16, width // 16)))
decoder_h_1 = (Variable(
torch.zeros(batch_size, 512, height // 16, width // 16)),
Variable(
torch.zeros(batch_size, 512, height // 16, width // 16)))
decoder_h_2 = (Variable(
torch.zeros(batch_size, 512, height // 8, width // 8)),
Variable(
torch.zeros(batch_size, 512, height // 8, width // 8)))
decoder_h_3 = (Variable(
torch.zeros(batch_size, 256, height // 4, width // 4)),
Variable(
torch.zeros(batch_size, 256, height // 4, width // 4)))
decoder_h_4 = (Variable(
torch.zeros(batch_size, 128, height // 2, width // 2)),
Variable(
torch.zeros(batch_size, 128, height // 2, width // 2)))
encoder_h_1 = (encoder_h_1[0].cuda(), encoder_h_1[1].cuda())
encoder_h_2 = (encoder_h_2[0].cuda(), encoder_h_2[1].cuda())
encoder_h_3 = (encoder_h_3[0].cuda(), encoder_h_3[1].cuda())
decoder_h_1 = (decoder_h_1[0].cuda(), decoder_h_1[1].cuda())
decoder_h_2 = (decoder_h_2[0].cuda(), decoder_h_2[1].cuda())
decoder_h_3 = (decoder_h_3[0].cuda(), decoder_h_3[1].cuda())
decoder_h_4 = (decoder_h_4[0].cuda(), decoder_h_4[1].cuda())
patches = Variable(data.cuda())
#solver.zero_grad()
losses = []
res = patches - 0.5
id_num = Variable(id_num.cuda())
wenc,wdec,wbin = hypernet(id_num,batch_size)
bp_t0 = time.time()
for i in range(args.iterations):
encoded, encoder_h_1, encoder_h_2, encoder_h_3 = encoder(
res,wenc,encoder_h_1, encoder_h_2, encoder_h_3,batch_size)
codes = binarizer(encoded,wbin,batch_size)
output, decoder_h_1, decoder_h_2, decoder_h_3, decoder_h_4 = decoder(
codes,wdec, decoder_h_1, decoder_h_2, decoder_h_3, decoder_h_4,batch_size)
res = res - output
losses.append(res.abs().mean())
all_losses.append(losses)
bp_t1 = time.time()
loss = sum(losses) / args.iterations
loss = loss/args.update
loss.backward()
loss_mini_batch += loss.data[0]
if (index +1) % args.update == 0:
# Do a SGD step once every iter_size iterations
solver.step()
solver.zero_grad()
# print("Iter: %02d, Loss: %4.4f" % (i, loss_mini_batch/10))
batch_t1 = time.time()
print('[TRAIN] Epoch[{}]({}/{}); Loss: {:.6f}; Backpropagation: {:.4f} sec; Batch: {:.4f} sec'.format(epoch, batch + 1,len(train_loader), loss_mini_batch, bp_t1 - bp_t0, batch_t1 -batch_t0))
print(('{:.4f} ' * args.iterations +'\n').format(* [l.data[0] for l in np.array(all_losses).mean(axis=0)]))
loss_mini_batch = 0
all_losses = []
index = (epoch - 1) * len(train_loader) + batch
if index % 700 == 0 and index != 0:
vepoch+=1
#save(vepoch)
#print("scheduled")
#scheduler.step()
# if index % 2000 == 0 and index != 0:
# vepoch+=1
# scheduler.step()
if index % 2000 == 0 and index != 0:
save(0, False)
if epoch % 100 == 0:
save(epoch)
| [
"[email protected]"
] | |
04ca532abc1f721a651008e03b2fc480c37452cf | 9cd25c62e501741bbf4f982058ac60b8cdf815dc | /_unittests/ut_testing/test_template_dl_torch.py | 2ee7a06eb55e118202d4f461b0f1bb82357dabb5 | [
"MIT"
] | permissive | sdpython/lightmlrestapi | c60c2960b271e59750ebfe8fafc9c70304f92cbc | def172965eb197d8ab7f812c3f5f5ce129593cef | refs/heads/master | 2022-07-09T06:56:31.458790 | 2022-05-19T23:46:52 | 2022-05-19T23:46:52 | 110,975,162 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,329 | py | """
@brief test tree node (time=12s)
"""
import os
import unittest
import numpy
from PIL import Image
from pyquickhelper.pycode import get_temp_folder, ExtTestCase
from lightmlrestapi.testing.template_dl_torch import restapi_version, restapi_load, restapi_predict
def get_torch():
try:
import torch # pylint: disable=C0415
return torch
except ImportError:
return None
class TestTemplateDlTorch(ExtTestCase):
@unittest.skipIf(get_torch() is None, reason="no torch")
def test_template_dl_keras(self):
self.assertEqual(restapi_version(), "0.1.1238")
temp = get_temp_folder(__file__, "temp_template_dl_torch")
import torchvision.models as models # pylint: disable=E0401,C0415,R0402
import torch # pylint: disable=E0401,C0415
model = models.squeezenet1_0(pretrained=True)
model_name = os.path.join(temp, "model.torch")
torch.save(model, model_name)
img_input = os.path.join(temp, "..", "data", "wiki_modified2.png")
img_input = numpy.array(Image.open(img_input))
mo = restapi_load({'model': model_name})
pred = restapi_predict(mo, img_input)
self.assertIsInstance(pred, numpy.ndarray)
self.assertEqual(pred.shape, (1, 1000))
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
2951978989a085b93e5ed9357c2bf2d6381a16a0 | 19a2378a7fc2aef762b0e3a70669208818feeaa9 | /src/transformers/models/upernet/modeling_upernet.py | 6143c57e92d598b2db52e36ef60bc3a18dff8085 | [
"Apache-2.0"
] | permissive | pytorch-tpu/transformers | 494ee005c6d156161171f2a8e60f25603189408f | 6112b1c6442aaf7affd2b0676a1cd4eee30c45cf | refs/heads/master | 2023-09-03T19:34:30.326852 | 2023-07-19T20:57:40 | 2023-07-19T20:57:40 | 220,075,881 | 7 | 2 | Apache-2.0 | 2023-09-14T17:58:25 | 2019-11-06T19:40:45 | Python | UTF-8 | Python | false | false | 17,414 | py | # coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch UperNet model. Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation."""
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST = [
"openmmlab/upernet-convnext-tiny",
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
_CONFIG_FOR_DOC = "UperNetConfig"
class UperNetConvModule(nn.Module):
"""
A convolutional block that bundles conv/norm/activation layers. This block simplifies the usage of convolution
layers, which are commonly used with a norm layer (e.g., BatchNorm) and activation layer (e.g., ReLU).
"""
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: Union[int, Tuple[int, int]],
padding: Union[int, Tuple[int, int], str] = 0,
bias: bool = False,
dilation: Union[int, Tuple[int, int]] = 1,
) -> None:
super().__init__()
self.conv = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
padding=padding,
bias=bias,
dilation=dilation,
)
self.batch_norm = nn.BatchNorm2d(out_channels)
self.activation = nn.ReLU()
def forward(self, input: torch.Tensor) -> torch.Tensor:
output = self.conv(input)
output = self.batch_norm(output)
output = self.activation(output)
return output
class UperNetPyramidPoolingBlock(nn.Module):
def __init__(self, pool_scale: int, in_channels: int, channels: int) -> None:
super().__init__()
self.layers = [
nn.AdaptiveAvgPool2d(pool_scale),
UperNetConvModule(in_channels, channels, kernel_size=1),
]
for i, layer in enumerate(self.layers):
self.add_module(str(i), layer)
def forward(self, input: torch.Tensor) -> torch.Tensor:
hidden_state = input
for layer in self.layers:
hidden_state = layer(hidden_state)
return hidden_state
class UperNetPyramidPoolingModule(nn.Module):
"""
Pyramid Pooling Module (PPM) used in PSPNet.
Args:
pool_scales (`Tuple[int]`):
Pooling scales used in Pooling Pyramid Module.
in_channels (`int`):
Input channels.
channels (`int`):
Channels after modules, before conv_seg.
align_corners (`bool`):
align_corners argument of F.interpolate.
"""
def __init__(self, pool_scales: Tuple[int, ...], in_channels: int, channels: int, align_corners: bool) -> None:
super().__init__()
self.pool_scales = pool_scales
self.align_corners = align_corners
self.in_channels = in_channels
self.channels = channels
self.blocks = []
for i, pool_scale in enumerate(pool_scales):
block = UperNetPyramidPoolingBlock(pool_scale=pool_scale, in_channels=in_channels, channels=channels)
self.blocks.append(block)
self.add_module(str(i), block)
def forward(self, x: torch.Tensor) -> List[torch.Tensor]:
ppm_outs = []
for ppm in self.blocks:
ppm_out = ppm(x)
upsampled_ppm_out = nn.functional.interpolate(
ppm_out, size=x.size()[2:], mode="bilinear", align_corners=self.align_corners
)
ppm_outs.append(upsampled_ppm_out)
return ppm_outs
class UperNetHead(nn.Module):
"""
Unified Perceptual Parsing for Scene Understanding. This head is the implementation of
[UPerNet](https://arxiv.org/abs/1807.10221).
"""
def __init__(self, config, in_channels):
super().__init__()
self.config = config
self.pool_scales = config.pool_scales # e.g. (1, 2, 3, 6)
self.in_channels = in_channels
self.channels = config.hidden_size
self.align_corners = False
self.classifier = nn.Conv2d(self.channels, config.num_labels, kernel_size=1)
# PSP Module
self.psp_modules = UperNetPyramidPoolingModule(
self.pool_scales,
self.in_channels[-1],
self.channels,
align_corners=self.align_corners,
)
self.bottleneck = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales) * self.channels,
self.channels,
kernel_size=3,
padding=1,
)
# FPN Module
self.lateral_convs = nn.ModuleList()
self.fpn_convs = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
l_conv = UperNetConvModule(in_channels, self.channels, kernel_size=1)
fpn_conv = UperNetConvModule(self.channels, self.channels, kernel_size=3, padding=1)
self.lateral_convs.append(l_conv)
self.fpn_convs.append(fpn_conv)
self.fpn_bottleneck = UperNetConvModule(
len(self.in_channels) * self.channels,
self.channels,
kernel_size=3,
padding=1,
)
def init_weights(self):
self.apply(self._init_weights)
def _init_weights(self, module):
if isinstance(module, nn.Conv2d):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
def psp_forward(self, inputs):
x = inputs[-1]
psp_outs = [x]
psp_outs.extend(self.psp_modules(x))
psp_outs = torch.cat(psp_outs, dim=1)
output = self.bottleneck(psp_outs)
return output
def forward(self, encoder_hidden_states: torch.Tensor) -> torch.Tensor:
# build laterals
laterals = [lateral_conv(encoder_hidden_states[i]) for i, lateral_conv in enumerate(self.lateral_convs)]
laterals.append(self.psp_forward(encoder_hidden_states))
# build top-down path
used_backbone_levels = len(laterals)
for i in range(used_backbone_levels - 1, 0, -1):
prev_shape = laterals[i - 1].shape[2:]
laterals[i - 1] = laterals[i - 1] + nn.functional.interpolate(
laterals[i], size=prev_shape, mode="bilinear", align_corners=self.align_corners
)
# build outputs
fpn_outs = [self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels - 1)]
# append psp feature
fpn_outs.append(laterals[-1])
for i in range(used_backbone_levels - 1, 0, -1):
fpn_outs[i] = nn.functional.interpolate(
fpn_outs[i], size=fpn_outs[0].shape[2:], mode="bilinear", align_corners=self.align_corners
)
fpn_outs = torch.cat(fpn_outs, dim=1)
output = self.fpn_bottleneck(fpn_outs)
output = self.classifier(output)
return output
class UperNetFCNHead(nn.Module):
"""
Fully Convolution Networks for Semantic Segmentation. This head is the implementation of
[FCNNet](https://arxiv.org/abs/1411.4038>).
Args:
config:
Configuration.
in_channels (int):
Number of input channels.
kernel_size (int):
The kernel size for convs in the head. Default: 3.
dilation (int):
The dilation rate for convs in the head. Default: 1.
"""
def __init__(
self, config, in_index: int = 2, kernel_size: int = 3, dilation: Union[int, Tuple[int, int]] = 1
) -> None:
super().__init__()
self.config = config
self.in_channels = config.auxiliary_in_channels
self.channels = config.auxiliary_channels
self.num_convs = config.auxiliary_num_convs
self.concat_input = config.auxiliary_concat_input
self.in_index = in_index
conv_padding = (kernel_size // 2) * dilation
convs = []
convs.append(
UperNetConvModule(
self.in_channels, self.channels, kernel_size=kernel_size, padding=conv_padding, dilation=dilation
)
)
for i in range(self.num_convs - 1):
convs.append(
UperNetConvModule(
self.channels, self.channels, kernel_size=kernel_size, padding=conv_padding, dilation=dilation
)
)
if self.num_convs == 0:
self.convs = nn.Identity()
else:
self.convs = nn.Sequential(*convs)
if self.concat_input:
self.conv_cat = UperNetConvModule(
self.in_channels + self.channels, self.channels, kernel_size=kernel_size, padding=kernel_size // 2
)
self.classifier = nn.Conv2d(self.channels, config.num_labels, kernel_size=1)
def init_weights(self):
self.apply(self._init_weights)
def _init_weights(self, module):
if isinstance(module, nn.Conv2d):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
def forward(self, encoder_hidden_states: torch.Tensor) -> torch.Tensor:
# just take the relevant feature maps
hidden_states = encoder_hidden_states[self.in_index]
output = self.convs(hidden_states)
if self.concat_input:
output = self.conv_cat(torch.cat([hidden_states, output], dim=1))
output = self.classifier(output)
return output
class UperNetPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = UperNetConfig
main_input_name = "pixel_values"
supports_gradient_checkpointing = True
def _init_weights(self, module):
if isinstance(module, UperNetPreTrainedModel):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def init_weights(self):
"""Initialize the weights"""
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, BackboneMixin):
module.gradient_checkpointing = value
UPERNET_START_DOCSTRING = r"""
Parameters:
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
UPERNET_INPUTS_DOCSTRING = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
[`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See
`attentions` under returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under
returned tensors for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"""UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.""",
UPERNET_START_DOCSTRING,
)
class UperNetForSemanticSegmentation(UperNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.backbone = AutoBackbone.from_config(config.backbone_config)
# Semantic segmentation head(s)
self.decode_head = UperNetHead(config, in_channels=self.backbone.channels)
self.auxiliary_head = UperNetFCNHead(config) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=SemanticSegmenterOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
pixel_values: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
labels: Optional[torch.Tensor] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, SemanticSegmenterOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*):
Ground truth semantic segmentation maps for computing the loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels > 1`, a classification loss is computed (Cross-Entropy).
Returns:
Examples:
```python
>>> from transformers import AutoImageProcessor, UperNetForSemanticSegmentation
>>> from PIL import Image
>>> from huggingface_hub import hf_hub_download
>>> image_processor = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-tiny")
>>> model = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-tiny")
>>> filepath = hf_hub_download(
... repo_id="hf-internal-testing/fixtures_ade20k", filename="ADE_val_00000001.jpg", repo_type="dataset"
... )
>>> image = Image.open(filepath).convert("RGB")
>>> inputs = image_processor(images=image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> logits = outputs.logits # shape (batch_size, num_labels, height, width)
>>> list(logits.shape)
[1, 150, 512, 512]
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
outputs = self.backbone.forward_with_filtered_kwargs(
pixel_values, output_hidden_states=output_hidden_states, output_attentions=output_attentions
)
features = outputs.feature_maps
logits = self.decode_head(features)
logits = nn.functional.interpolate(logits, size=pixel_values.shape[2:], mode="bilinear", align_corners=False)
auxiliary_logits = None
if self.auxiliary_head is not None:
auxiliary_logits = self.auxiliary_head(features)
auxiliary_logits = nn.functional.interpolate(
auxiliary_logits, size=pixel_values.shape[2:], mode="bilinear", align_corners=False
)
loss = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError("The number of labels should be greater than one")
else:
# compute weighted loss
loss_fct = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index)
main_loss = loss_fct(logits, labels)
auxiliary_loss = loss_fct(auxiliary_logits, labels)
loss = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
output = (logits,) + outputs[1:]
else:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
| [
"[email protected]"
] | |
508d6ae083c9e4dd5e88df2f98ff5ffe1ed2f997 | f1790e298bcbf7b26cacd3c27850f243c446b9eb | /courses/pythonBrasil/EstruturaDeDecisao/ex006.py | e023d983077c2e8cfaf09eb5788f0c2eeb6359b5 | [] | no_license | misa9999/python | 36001a1bf0eb842d00b010b02e05b01aa4dfac57 | 251c5226db1bfef4a8445b025f232a27a6924930 | refs/heads/master | 2023-03-04T16:25:48.610233 | 2021-02-22T21:37:51 | 2021-02-22T21:37:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 359 | py | # pede três números e informa o maior deles
# pede os números
n1 = int(input('n1: '))
n2 = int(input('n2: '))
n3 = int(input('n3: '))
maior = n1
# verifica o maior número
if n2 > n1 and n2 > n3:
maior = n2
elif n3 > n1 and n3 > n2:
maior = n3
else:
print('Todos números são iguais')
print(f'{maior} é o maior número.')
| [
"yuukixasuna00@gmailcom"
] | yuukixasuna00@gmailcom |
ad7faa54a46d5965e09d67a6f9c2c498b2bbbec0 | fcd965c9333ee328ec51bc41f5bc0300cc06dc33 | /DailyCoding/invert_tree.py | 9c1f7f53cc075075fb4ac08a3358a5e3da55f484 | [] | no_license | henrylin2008/Coding_Problems | 699bb345481c14dc3faa8bab439776c7070a1cb0 | 281067e872f73a27f76ae10ab0f1564916bddd28 | refs/heads/master | 2023-01-11T11:55:47.936163 | 2022-12-24T07:50:17 | 2022-12-24T07:50:17 | 170,151,972 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 813 | py | # Daily Coding Problem #83
# Problem
# This problem was asked by Google.
#
# Invert a binary tree.
#
# For example, given the following tree:
#
# a
# / \
# b c
# / \ /
# d e f
# should become:
#
# a
# / \
# c b
# \ / \
# f e d
# Solution
# Assuming we could invert the current node's left and right subtrees, all we'd need to do is then switch the left to now become right, and right to become left. The base case is when the node is None and we can just return None for that case. Then we know this works for the leaf node case since switching left and right subtrees doesn't do anything (since they're both None).
def invert(node):
if not node:
return node
left = invert(node.left)
right = invert(node.right)
node.left, node.right = right, left
return node | [
"[email protected]"
] | |
59125fada11245cb2fd16d583346fea7808880f8 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03803/s808013287.py | d06c71951fe118e1fa63c59d59e0f64d918f0c20 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 134 | py | a, b = map(int, input().split())
a = a if a > 1 else 14
b = b if b > 1 else 14
print("Alice" if a > b else "Bob" if a < b else "Draw") | [
"[email protected]"
] | |
076e9da84a5ca277c5b3c7a7d91b7c7594ad151a | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_118/2720.py | ad1a9ec15bca1a694901004c379c73f110bc065c | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 678 | py | def nopalin(li):
count = 0
for i in range(li[0], li[1]):
if ispalin(str(i)):
if ispalin(str(i**2)):
count += 1
return count
def ispalin(a):
return a == a[::-1]
infile = open('C-small-attempt0.in', 'r')
outfile = open('ansfairsq_small.in', 'w')
no = int(infile.readline())
for i in range(no):
li = infile.readline().split()
li[0] = int(li[0])
a = int(li[0]**.5)
if a**2 == li[0]:
li[0] = a
else:
li[0] = a+1
li[1] = int(int(li[1])**.5)+1
ans = nopalin(li)
outfile.write(('Case #{0}: '+str(ans)+'\n').format(i+1))
infile.close()
outfile.close()
| [
"[email protected]"
] | |
a95fdccca63eabb8357efc31abfedb51d7ac8e83 | 17cc8bffed3fadb413506f1545c455d7b9406ed6 | /parts/zodiac/venusian/tests/fixtures/two/mod2.py | 538ca16324ce1614318fbae18501cdb30d38c29a | [] | no_license | stinett/zodiac | f7a4f788942930fa217e7e1c7d525b82a557258f | 22b247719694b0f5aa5135b3cb68c1e84aaf7629 | refs/heads/master | 2020-05-21T01:14:59.949571 | 2014-01-13T15:53:47 | 2014-01-13T15:53:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 90 | py | /home/stine/myenv/zodiac/eggs/venusian-1.0a8-py2.7.egg/venusian/tests/fixtures/two/mod2.py | [
"stine@funkydesktop.(none)"
] | stine@funkydesktop.(none) |
7593e3d69947e1c79ab1ac96622bbf72ed0e4f02 | b15d2787a1eeb56dfa700480364337216d2b1eb9 | /accelbyte_py_sdk/api/seasonpass/models/tier.py | 2f85703a05dbe98fe8aaa8b3cba6d42d71842b3a | [
"MIT"
] | permissive | AccelByte/accelbyte-python-sdk | dedf3b8a592beef5fcf86b4245678ee3277f953d | 539c617c7e6938892fa49f95585b2a45c97a59e0 | refs/heads/main | 2023-08-24T14:38:04.370340 | 2023-08-22T01:08:03 | 2023-08-22T01:08:03 | 410,735,805 | 2 | 1 | MIT | 2022-08-02T03:54:11 | 2021-09-27T04:00:10 | Python | UTF-8 | Python | false | false | 5,204 | py | # Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template file: ags_py_codegen
# AccelByte Gaming Services Seasonpass Service (1.19.0)
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Union
from ....core import Model
class Tier(Model):
"""Tier (Tier)
Properties:
id_: (id) OPTIONAL str
required_exp: (requiredExp) OPTIONAL int
rewards: (rewards) OPTIONAL Dict[str, List[str]]
"""
# region fields
id_: str # OPTIONAL
required_exp: int # OPTIONAL
rewards: Dict[str, List[str]] # OPTIONAL
# endregion fields
# region with_x methods
def with_id(self, value: str) -> Tier:
self.id_ = value
return self
def with_required_exp(self, value: int) -> Tier:
self.required_exp = value
return self
def with_rewards(self, value: Dict[str, List[str]]) -> Tier:
self.rewards = value
return self
# endregion with_x methods
# region to methods
def to_dict(self, include_empty: bool = False) -> dict:
result: dict = {}
if hasattr(self, "id_"):
result["id"] = str(self.id_)
elif include_empty:
result["id"] = ""
if hasattr(self, "required_exp"):
result["requiredExp"] = int(self.required_exp)
elif include_empty:
result["requiredExp"] = 0
if hasattr(self, "rewards"):
result["rewards"] = {
str(k0): [str(i1) for i1 in v0] for k0, v0 in self.rewards.items()
}
elif include_empty:
result["rewards"] = {}
return result
# endregion to methods
# region static methods
@classmethod
def create(
cls,
id_: Optional[str] = None,
required_exp: Optional[int] = None,
rewards: Optional[Dict[str, List[str]]] = None,
**kwargs,
) -> Tier:
instance = cls()
if id_ is not None:
instance.id_ = id_
if required_exp is not None:
instance.required_exp = required_exp
if rewards is not None:
instance.rewards = rewards
return instance
@classmethod
def create_from_dict(cls, dict_: dict, include_empty: bool = False) -> Tier:
instance = cls()
if not dict_:
return instance
if "id" in dict_ and dict_["id"] is not None:
instance.id_ = str(dict_["id"])
elif include_empty:
instance.id_ = ""
if "requiredExp" in dict_ and dict_["requiredExp"] is not None:
instance.required_exp = int(dict_["requiredExp"])
elif include_empty:
instance.required_exp = 0
if "rewards" in dict_ and dict_["rewards"] is not None:
instance.rewards = {
str(k0): [str(i1) for i1 in v0] for k0, v0 in dict_["rewards"].items()
}
elif include_empty:
instance.rewards = {}
return instance
@classmethod
def create_many_from_dict(
cls, dict_: dict, include_empty: bool = False
) -> Dict[str, Tier]:
return (
{k: cls.create_from_dict(v, include_empty=include_empty) for k, v in dict_}
if dict_
else {}
)
@classmethod
def create_many_from_list(
cls, list_: list, include_empty: bool = False
) -> List[Tier]:
return (
[cls.create_from_dict(i, include_empty=include_empty) for i in list_]
if list_
else []
)
@classmethod
def create_from_any(
cls, any_: any, include_empty: bool = False, many: bool = False
) -> Union[Tier, List[Tier], Dict[Any, Tier]]:
if many:
if isinstance(any_, dict):
return cls.create_many_from_dict(any_, include_empty=include_empty)
elif isinstance(any_, list):
return cls.create_many_from_list(any_, include_empty=include_empty)
else:
raise ValueError()
else:
return cls.create_from_dict(any_, include_empty=include_empty)
@staticmethod
def get_field_info() -> Dict[str, str]:
return {
"id": "id_",
"requiredExp": "required_exp",
"rewards": "rewards",
}
@staticmethod
def get_required_map() -> Dict[str, bool]:
return {
"id": False,
"requiredExp": False,
"rewards": False,
}
# endregion static methods
| [
"[email protected]"
] | |
ee3b20fc697c3d1f3fbf397a840d3b576efd3513 | cf1736096051056434ee38407fd0034d07c49044 | /test/testing/test_collect.py | 1f2251bbb1659c5134bb79bda56a52b33275875d | [
"MIT"
] | permissive | paskma/py | 5a41a50381ef7c35bc77b261941d7a0eff8553a8 | 92c92afa7096a167eb70ba82a526b879fbd27afd | refs/heads/master | 2020-05-20T05:49:17.301451 | 2013-01-20T16:01:44 | 2013-01-20T16:01:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,789 | py | from __future__ import generators
import py
from setupdata import setupdatadir
from py.__.test.outcome import Skipped, Failed, Passed, Outcome
def setup_module(mod):
mod.datadir = setupdatadir()
mod.tmpdir = py.test.ensuretemp('test_collect')
def skipboxed():
if py.test.config.option.boxed:
py.test.skip("test does not work with boxed tests")
def test_failing_import_execfile():
dest = datadir / 'failingimport.py'
col = py.test.collect.Module(dest)
py.test.raises(ImportError, col.run)
py.test.raises(ImportError, col.run)
def test_collect_listnames_and_back():
col1 = py.test.collect.Directory(datadir.dirpath())
col2 = col1.join(datadir.basename)
col3 = col2.join('filetest.py')
l = col3.listnames()
assert len(l) == 3
x = col1._getitembynames(l[1:])
assert x.name == "filetest.py"
x = col1._getitembynames("/".join(l[1:]))
assert x.name == "filetest.py"
l2 = x.listnames()
assert len(l2) == 3
def test_finds_tests():
fn = datadir / 'filetest.py'
col = py.test.collect.Module(fn)
l = col.run()
assert len(l) == 2
assert l[0] == 'test_one'
assert l[1] == 'TestClass'
def test_found_certain_testfiles():
tmp = py.test.ensuretemp("found_certain_testfiles")
tmp.ensure('test_found.py')
tmp.ensure('found_test.py')
colitem = py.test.collect.Directory(tmp)
items = list(colitem._tryiter(py.test.collect.Module))
assert len(items) == 2
items = [item.name for item in items]
assert 'test_found.py' in items
assert 'found_test.py' in items
def test_ignored_certain_directories():
tmp = py.test.ensuretemp("ignore_certain_directories")
tmp.ensure("_darcs", 'test_notfound.py')
tmp.ensure("CVS", 'test_notfound.py')
tmp.ensure("{arch}", 'test_notfound.py')
tmp.ensure(".whatever", 'test_notfound.py')
tmp.ensure(".bzr", 'test_notfound.py')
tmp.ensure("normal", 'test_found.py')
tmp.ensure('test_found.py')
colitem = py.test.collect.Directory(tmp)
items = list(colitem._tryiter(py.test.collect.Module))
assert len(items) == 2
for item in items:
assert item.name == 'test_found.py'
def test_failing_import_directory():
class MyDirectory(py.test.collect.Directory):
def filefilter(self, p):
return p.check(fnmatch='testspecial*.py')
mydir = MyDirectory(datadir)
l = mydir.run()
assert len(l) == 1
item = mydir.join(l[0])
assert isinstance(item, py.test.collect.Module)
py.test.raises(ImportError, item.run)
def test_module_file_not_found():
fn = datadir.join('nada','no')
col = py.test.collect.Module(fn)
py.test.raises(py.error.ENOENT, col.run)
def test_syntax_error_in_module():
p = py.test.ensuretemp("syntaxerror1").join('syntax_error.py')
p.write("\nthis is really not python\n")
modpath = datadir.join('syntax_error.py')
col = py.test.collect.Module(modpath)
py.test.raises(SyntaxError, col.run)
def test_disabled_class():
col = py.test.collect.Module(datadir.join('disabled.py'))
l = col.run()
assert len(l) == 1
colitem = col.join(l[0])
assert isinstance(colitem, py.test.collect.Class)
assert not colitem.run()
def test_disabled_module():
col = py.test.collect.Module(datadir.join('disabled_module.py'))
l = col.run()
assert len(l) == 0
class Testsomeclass:
disabled = True
def test_something():
raise ValueError
#class TestWithCustomItem:
# class Item(py.test.collect.Item):
# flag = []
# def execute(self, target, *args):
# self.flag.append(42)
# target(*args)
#
# def test_hello(self):
# assert self.Item.flag == [42]
#
def test_generative_simple():
o = tmpdir.ensure('generativetest', dir=1)
tfile = o.join('test_generative.py')
tfile.write(py.code.Source("""
from __future__ import generators # python2.2!
def func1(arg, arg2):
assert arg == arg2
def test_gen():
yield func1, 17, 3*5
yield func1, 42, 6*7
class TestGenMethods:
def test_gen(self):
yield func1, 17, 3*5
yield func1, 42, 6*7
"""))
col = py.test.collect.Module(tfile)
l = col.run()
assert len(l) == 2
l = col.multijoin(l)
generator = l[0]
assert isinstance(generator, py.test.collect.Generator)
l2 = generator.run()
assert len(l2) == 2
l2 = generator.multijoin(l2)
assert isinstance(l2[0], py.test.collect.Function)
assert isinstance(l2[1], py.test.collect.Function)
assert l2[0].name == '[0]'
assert l2[1].name == '[1]'
assert l2[0].obj.func_name == 'func1'
classlist = l[1].run()
assert len(classlist) == 1
classlist = l[1].multijoin(classlist)
cls = classlist[0]
generator = cls.join(cls.run()[0])
assert isinstance(generator, py.test.collect.Generator)
l2 = generator.run()
assert len(l2) == 2
l2 = generator.multijoin(l2)
assert isinstance(l2[0], py.test.collect.Function)
assert isinstance(l2[1], py.test.collect.Function)
assert l2[0].name == '[0]'
assert l2[1].name == '[1]'
def test_custom_python_collection_from_conftest():
o = tmpdir.ensure('customconfigtest', dir=1)
o.ensure('conftest.py').write("""if 1:
import py
class MyFunction(py.test.collect.Function):
pass
class Directory(py.test.collect.Directory):
def filefilter(self, fspath):
return fspath.check(basestarts='check_', ext='.py')
class myfuncmixin:
Function = MyFunction
def funcnamefilter(self, name):
return name.startswith('check_')
class Module(myfuncmixin, py.test.collect.Module):
def classnamefilter(self, name):
return name.startswith('CustomTestClass')
class Instance(myfuncmixin, py.test.collect.Instance):
pass
""")
checkfile = o.ensure('somedir', 'check_something.py')
checkfile.write("""if 1:
def check_func():
assert 42 == 42
class CustomTestClass:
def check_method(self):
assert 23 == 23
""")
for x in (o, checkfile, checkfile.dirpath()):
config = py.test.config._reparse([x])
#print "checking that %s returns custom items" % (x,)
col = config._getcollector(x)
assert len(list(col._tryiter(py.test.collect.Item))) == 2
#assert items[1].__class__.__name__ == 'MyFunction'
# test that running a session works from the directories
old = o.chdir()
try:
config = py.test.config._reparse([])
out = py.std.cStringIO.StringIO()
session = config._getsessionclass()(config, out)
session.main()
l = session.getitemoutcomepairs(Passed)
assert len(l) == 2
finally:
old.chdir()
# test that running the file directly works
config = py.test.config._reparse([str(checkfile)])
out = py.std.cStringIO.StringIO()
session = config._getsessionclass()(config, out)
session.main()
l = session.getitemoutcomepairs(Passed)
assert len(l) == 2
def test_custom_NONpython_collection_from_conftest():
o = tmpdir.ensure('customconfigtest_nonpython', dir=1)
o.ensure('conftest.py').write("""if 1:
import py
class CustomItem(py.test.collect.Item):
def run(self):
pass
class Directory(py.test.collect.Directory):
def filefilter(self, fspath):
return fspath.check(basestarts='check_', ext='.txt')
def join(self, name):
if not name.endswith('.txt'):
return super(Directory, self).join(name)
p = self.fspath.join(name)
if p.check(file=1):
return CustomItem(p, parent=self)
""")
checkfile = o.ensure('somedir', 'moredir', 'check_something.txt')
for x in (o, checkfile, checkfile.dirpath()):
print "checking that %s returns custom items" % (x,)
config = py.test.config._reparse([x])
col = config._getcollector(x)
assert len(list(col._tryiter(py.test.collect.Item))) == 1
#assert items[1].__class__.__name__ == 'MyFunction'
# test that running a session works from the directories
old = o.chdir()
try:
config = py.test.config._reparse([])
out = py.std.cStringIO.StringIO()
session = config._getsessionclass()(config, out)
session.main()
l = session.getitemoutcomepairs(Passed)
assert len(l) == 1
finally:
old.chdir()
# test that running the file directly works
config = py.test.config._reparse([str(checkfile)])
out = py.std.cStringIO.StringIO()
session = config._getsessionclass()(config, out)
session.main()
l = session.getitemoutcomepairs(Passed)
assert len(l) == 1
def test_order_of_execution_generator_same_codeline():
o = tmpdir.ensure('genorder1', dir=1)
o.join("test_order1.py").write(py.code.Source("""
def test_generative_order_of_execution():
test_list = []
expected_list = range(6)
def list_append(item):
test_list.append(item)
def assert_order_of_execution():
print 'expected order', expected_list
print 'but got ', test_list
assert test_list == expected_list
for i in expected_list:
yield list_append, i
yield assert_order_of_execution
"""))
config = py.test.config._reparse([o])
session = config.initsession()
session.main()
l = session.getitemoutcomepairs(Passed)
assert len(l) == 7
def test_order_of_execution_generator_different_codeline():
o = tmpdir.ensure('genorder2', dir=2)
o.join("test_genorder2.py").write(py.code.Source("""
def test_generative_tests_different_codeline():
test_list = []
expected_list = range(3)
def list_append_2():
test_list.append(2)
def list_append_1():
test_list.append(1)
def list_append_0():
test_list.append(0)
def assert_order_of_execution():
print 'expected order', expected_list
print 'but got ', test_list
assert test_list == expected_list
yield list_append_0
yield list_append_1
yield list_append_2
yield assert_order_of_execution
"""))
config = py.test.config._reparse([o])
session = config.initsession()
session.main()
l = session.getitemoutcomepairs(Passed)
assert len(l) == 4
def test_documentation_virtual_collector_interaction():
rootdir = py.path.local(py.__file__).dirpath("doc")
# HACK
from py.__.doc import conftest as conf
old = conf.option.forcegen
try:
conf.option.forcegen = 1
col = py.test.collect.Directory(rootdir)
x = list(col._tryiter(yieldtype=py.test.collect.Function))
finally:
conf.option.forcegen = old
def test__tryiter_ignores_skips():
tmp = py.test.ensuretemp("_tryiterskip")
tmp.ensure("subdir", "conftest.py").write(py.code.Source("""
import py
class Directory(py.test.collect.Directory):
def run(self):
py.test.skip("intentional")
"""))
col = py.test.collect.Directory(tmp)
try:
list(col._tryiter())
except KeyboardInterrupt:
raise
except:
exc = py.code.ExceptionInfo()
py.test.fail("should not have raised: %s" %(exc,))
def test__tryiter_ignores_failing_collectors():
tmp = py.test.ensuretemp("_tryiterfailing")
tmp.ensure("subdir", "conftest.py").write(py.code.Source("""
bla bla bla
"""))
col = py.test.collect.Directory(tmp)
try:
list(col._tryiter())
except KeyboardInterrupt:
raise
except:
exc = py.code.ExceptionInfo()
py.test.fail("should not have raised: %s" %(exc,))
l = []
list(col._tryiter(reporterror=l.append))
assert len(l) == 2
excinfo, item = l[-1]
assert isinstance(excinfo, py.code.ExceptionInfo)
def test_tryiter_handles_keyboardinterrupt():
tmp = py.test.ensuretemp("tryiterkeyboard")
tmp.ensure("subdir", "conftest.py").write(py.code.Source("""
raise KeyboardInterrupt()
"""))
col = py.test.collect.Directory(tmp)
py.test.raises(KeyboardInterrupt, list, col._tryiter())
def test_check_random_inequality():
tmp = py.test.ensuretemp("ineq")
tmp.ensure("test_x.py").write(py.code.Source("""def test_one():
pass
"""))
col = py.test.collect.Directory(tmp)
fn = col._tryiter().next()
assert fn != 3
assert fn != col
assert fn != [1,2,3]
assert [1,2,3] != fn
assert col != fn
def test_check_generator_collect_problems():
tmp = py.test.ensuretemp("gener_coll")
tmp.ensure("test_one.py").write(py.code.Source("""
def setup_module(mod):
mod.x = [1,2,3]
def check(zzz):
assert zzz
def test_one():
for i in x:
yield check, i
"""))
tmp.ensure("__init__.py")
col = py.test.collect.Module(tmp.join("test_one.py"))
errors = []
l = list(col._tryiter(reporterror=errors.append))
assert len(errors) == 2
def test_check_collect_hashes():
tmp = py.test.ensuretemp("check_collect_hashes")
tmp.ensure("test_one.py").write(py.code.Source("""
def test_1():
pass
def test_2():
pass
"""))
tmp.ensure("test_two.py").write(py.code.Source("""
def test_1():
pass
def test_2():
pass
"""))
tmp.ensure("__init__.py")
col = py.test.collect.Directory(tmp)
l = list(col._tryiter())
assert len(l) == 4
for numi, i in enumerate(l):
for numj, j in enumerate(l):
if numj != numi:
assert hash(i) != hash(j)
assert i != j
def test_check_directory_ordered():
tmpdir = py.test.ensuretemp("test_check_directory_ordered")
fnames = []
for i in range(9, -1, -1):
x = tmpdir.ensure("xdir%d" %(i, ), dir=1)
fnames.append(x.basename)
for i in range(9, -1, -1):
x = tmpdir.ensure("test_file%d.py" % (i,))
fnames.append(x.basename)
fnames.sort()
tmpdir.ensure('adir', dir=1)
fnames.insert(10, 'adir')
col = py.test.collect.Directory(tmpdir)
names = col.run()
assert names == fnames
| [
"[email protected]"
] | |
8968a994c1fe106db75f16913d6a9a08c0008f85 | c2f0dbf82cb28670e83fd2f1da1a1741a59554c5 | /nonlinear/source/backup/deep_qrc.py | fe7c8e7b28306ec9c7d9e028eca9b0d718867beb | [
"MIT"
] | permissive | ZdM87/higher-order-quantum-reservoir | 526b8bf352ca6e07b19b28133b8a6e1af67db2ae | 7fc75d03bb5ff2398d245ed7c5d7939fad00ef96 | refs/heads/master | 2023-08-12T08:18:50.762744 | 2021-10-07T01:44:48 | 2021-10-07T01:44:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,629 | py | import sys
import numpy as np
import scipy as sp
import utils
import qrc
from scipy.special import softmax
def solfmax_layer(states):
states = np.array(states)
return softmax(states)
def linear_combine(u, states, coeffs):
assert(len(coeffs) == len(states))
v = 1 - np.sum(coeffs)
assert(v <= 1 and v >= 0)
v = v * u
for i in range(len(coeffs)):
v += states[i] * coeffs[i]
return v
def softmax_linear_combine(u, states, coeffs):
states = solfmax_layer(states)
return linear_combine(u, states, coeffs)
class DeepQuantumReservoirComputing(object):
def __init_reservoir(self, qparams, nqrc, layer_strength):
I = [[1,0],[0,1]]
Z = [[1,0],[0,-1]]
X = [[0,1],[1,0]]
P0 = [[1,0],[0,0]]
P1 = [[0,0],[0,1]]
self.n_units = qparams.n_units
self.trotter_step = qparams.trotter_step
self.virtual_nodes = qparams.virtual_nodes
self.tau_delta = qparams.tau_delta
self.nqrc = nqrc
self.qubit_count = self.n_units
self.dim = 2**self.qubit_count
self.Zop = [1]*self.qubit_count
self.Xop = [1]*self.qubit_count
self.P0op = [1]
self.P1op = [1]
self.layer_strength = layer_strength
for cursor_index in range(self.qubit_count):
for qubit_index in range(self.qubit_count):
if cursor_index == qubit_index:
self.Xop[qubit_index] = np.kron(self.Xop[qubit_index],X)
self.Zop[qubit_index] = np.kron(self.Zop[qubit_index],Z)
else:
self.Xop[qubit_index] = np.kron(self.Xop[qubit_index],I)
self.Zop[qubit_index] = np.kron(self.Zop[qubit_index],I)
if cursor_index == 0:
self.P0op = np.kron(self.P0op, P0)
self.P1op = np.kron(self.P1op, P1)
else:
self.P0op = np.kron(self.P0op, I)
self.P1op = np.kron(self.P1op, I)
# initialize connection from layer i to layer (i+1)
connections = []
for i in range(nqrc - 1):
alpha = self.layer_strength
if alpha < 0 or alpha > 1:
alpha = np.random.rand()
cs = np.random.rand(self.n_units * self.virtual_nodes)
cs = alpha * cs / np.sum(cs)
connections.append(cs)
self.coeffs = connections
# initialize current states
self.current_states = [None] * nqrc
# Intialize evolution operators
tmp_uops = []
tmp_rhos = []
for i in range(nqrc):
# initialize density matrix
rho = np.zeros( [self.dim, self.dim] )
rho[0,0]=1
if qparams.init_rho != 0:
# initialize random density matrix
rho = gen_random.random_density_matrix(self.dim)
tmp_rhos.append(rho)
# generate hamiltonian
hamiltonian = np.zeros( (self.dim,self.dim) )
# include input qubit for computation
for qubit_index in range(self.qubit_count):
coef = (np.random.rand()-0.5) * 2 * qparams.max_energy
hamiltonian += coef * self.Zop[qubit_index]
for qubit_index1 in range(self.qubit_count):
for qubit_index2 in range(qubit_index1+1, self.qubit_count):
coef = (np.random.rand()-0.5) * 2 * qparams.max_energy
hamiltonian += coef * self.Xop[qubit_index1] @ self.Xop[qubit_index2]
ratio = float(self.tau_delta) / float(self.virtual_nodes)
Uop = sp.linalg.expm(1.j * hamiltonian * ratio)
tmp_uops.append(Uop)
self.init_rhos = tmp_rhos
self.last_rhos = tmp_rhos
self.Uops = tmp_uops
def __feed_forward(self, input_sequence, predict=True, use_lastrho=False):
input_dim, input_length = input_sequence.shape
assert(input_dim == self.nqrc)
dim = 2**self.qubit_count
predict_sequence = None
local_rhos = self.last_rhos
nqrc = self.nqrc
state_list = []
for time_step in range(0, input_length):
for i in reversed(range(nqrc)):
Uop = self.Uops[i]
if use_lastrho == True :
#print('Use last density matrix')
rho = self.last_rhos[i]
else:
rho = self.init_rhos[i]
# Obtain value from the input
value = input_sequence[i, time_step]
# Obtain values from previous layer
if i > 0:
previous_layer_states = self.current_states[i-1]
if previous_layer_states is not None:
value = softmax_linear_combine(value, previous_layer_states, self.coeffs[i-1])
# Replace the density matrix
rho = self.P0op @ rho @ self.P0op + self.Xop[0] @ self.P1op @ rho @ self.P1op @ self.Xop[0]
# (1 + u Z)/2 = (1+u)/2 |0><0| + (1-u)/2 |1><1|
# for input in [-1, 1]
# rho = (1+value)/2 * rho + (1-value)/2 *self.Xop[0] @ rho @ self.Xop[0]
# for input in [0, 1]
rho = (1 - value) * rho + value * self.Xop[0] @ rho @ self.Xop[0]
current_state = []
for v in range(self.virtual_nodes):
# Time evolution of density matrix
rho = Uop @ rho @ Uop.T.conj()
for qubit_index in range(0, self.qubit_count):
expectation_value = np.real(np.trace(self.Zop[qubit_index] @ rho))
current_state.append(expectation_value)
# Size of current_state is Nqubits x Nvirtuals
self.current_states[i] = current_state
local_rhos[i] = rho
# only use state of the last qrc to train
state = np.array(self.current_states[nqrc-1])
state_list.append(state)
state_list = np.array(state_list)
self.last_rhos = local_rhos
if predict:
stacked_state = np.hstack( [state_list, np.ones([input_length, 1])])
print('stacked state {}; Wout {}'.format(stacked_state.shape, self.W_out.shape))
predict_sequence = stacked_state @ self.W_out
return predict_sequence, state_list
def __train(self, input_sequence, output_sequence, buffer, beta):
print('shape', input_sequence.shape, output_sequence.shape)
assert(input_sequence.shape[1] == output_sequence.shape[0])
Nout = output_sequence.shape[1]
self.W_out = np.random.rand(self.n_units * self.virtual_nodes + 1, Nout)
_, state_list = self.__feed_forward(input_sequence, predict=False)
state_list = np.array(state_list)
print('before washingout state list shape', state_list.shape)
state_list = state_list[buffer:, :]
print('after washingout state list shape', state_list.shape)
# discard the transitient state for training
V = np.reshape(state_list, [-1, self.n_units * self.virtual_nodes])
V = np.hstack( [state_list, np.ones([V.shape[0], 1]) ] )
print('output seq', output_sequence.shape)
discard_output = output_sequence[buffer:, :]
print('discard output seq', discard_output.shape)
#S = np.reshape(output_sequence_list, [-1])
S = np.reshape(discard_output, [discard_output.shape[0]*discard_output.shape[1], -1])
print('V S', V.shape, S.shape)
self.W_out = np.linalg.pinv(V, rcond = beta) @ S
print('bf Wout', self.W_out.shape)
def train_to_predict(self, input_sequence, output_sequence, buffer, qparams, nqrc, layer_strength):
self.__init_reservoir(qparams, nqrc, layer_strength)
self.__train(input_sequence, output_sequence, buffer, qparams.beta)
def predict(self, input_sequence, output_sequence, buffer, use_lastrho):
prediction_sequence, _ = self.__feed_forward(input_sequence, \
predict=True, use_lastrho=use_lastrho)
pred = prediction_sequence[buffer:, :]
out = output_sequence[buffer:, :]
loss = np.sum((pred - out)**2)/np.sum(pred**2)
return prediction_sequence, loss
def init_forward(self, qparams, input_seq, nqrc, layer_strength, init_rs):
if init_rs == True:
self.__init_reservoir(qparams, nqrc, layer_strength)
_, state_list = self.__feed_forward(input_seq, predict=False)
return state_list
def get_loss(qrcparams, buffer, train_input_seq, train_output_seq, \
val_input_seq, val_output_seq, nqrc, layer_strength):
model = DeepQuantumReservoirComputing()
train_input_seq = np.array(train_input_seq)
train_output_seq = np.array(train_output_seq)
model.train_to_predict(train_input_seq, train_output_seq, buffer, qrcparams, nqrc, layer_strength)
train_pred_seq, train_loss = model.predict(train_input_seq, train_output_seq, buffer=buffer, use_lastrho=False)
#print("train_loss={}, shape".format(train_loss), train_pred_seq_ls.shape)
# Test phase
val_input_seq = np.array(val_input_seq)
val_output_seq = np.array(val_output_seq)
val_pred_seq, val_loss = model.predict(val_input_seq, val_output_seq, buffer=0, use_lastrho=True)
#print("val_loss={}, shape".format(val_loss), val_pred_seq_ls.shape)
return train_pred_seq, train_loss, val_pred_seq, val_loss
| [
"[email protected]"
] | |
f36ba303596cbb747219a2c2e7f7ef047a3de25d | 555c6ae723f2466673b7e6aeea11e7071461bfb3 | /sakura/common/tools.py | 66706081a63c70c046e108faf53597dc919d3359 | [] | no_license | riteshms/sakura | 788ade92cd0f65fb891a737d0113b807bf955a33 | e8d2b4454c26ea80f6275a1bde293db38db73d30 | refs/heads/master | 2021-07-05T10:58:41.774892 | 2017-09-20T09:03:15 | 2017-09-22T14:53:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 920 | py | import sys, gevent
class StdoutProxy(object):
def __init__(self, stdout):
self.stdout = stdout
def write(self, s):
self.stdout.write(s)
self.stdout.flush()
def __getattr__(self, attr):
return getattr(self.stdout, attr)
def set_unbuffered_stdout():
sys.stdout = StdoutProxy(sys.stdout)
def wait_greenlets(*greenlets):
gevent.joinall(greenlets, count=1)
class SimpleAttrContainer:
def __init__(self, **kwargs):
for k, v in kwargs.items():
v = self.load_val(v)
setattr(self, k, v)
def load_val(self, v):
if isinstance(v, dict):
v = SimpleAttrContainer(**v)
elif isinstance(v, tuple):
v = tuple(self.load_val(v2) for v2 in v)
elif isinstance(v, list):
v = list(self.load_val(v2) for v2 in v)
return v
def _asdict(self):
return self.__dict__.copy()
| [
"[email protected]"
] | |
9ddee95515d2c6dfb8656282798aa121eb5100be | af62d7e773257ce7c6fb294590c1339e38c134e2 | /iampacks/cross/mercadopago/views.py | 9d8d80d127df578b3665c28b262e94353ad97708 | [] | no_license | sebasgoldberg/iampacks | 1e3355fc3a2818766bde0b0614883d1df8a1db76 | 887acb4ea83ff1c89a5b70383faa1f30a811d7e7 | refs/heads/master | 2021-01-10T04:09:57.226591 | 2019-02-14T15:02:16 | 2019-02-14T15:02:16 | 52,726,621 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 41 | py | # coding=utf-8
# Create your views here.
| [
"[email protected]"
] | |
209b79c613856a3a9efc9586a621842d4e69098e | f28591fab50d9b7a539c66b5a81fc91d1bc2ce64 | /py3/def/uint16_bigendian_tobytes.py | 718ccd6205a399b3b6fa9f1c5e6402ea7d33c9fa | [] | no_license | tnzw/tnzw.github.io | b8a5fe1f8479736bbf2b3594d511a1282939a3b3 | 6d95968db793cebcfa77cb49eecd987f821350db | refs/heads/master | 2023-04-21T14:22:49.849859 | 2023-03-31T15:55:01 | 2023-03-31T15:55:01 | 176,712,013 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 504 | py | # uint16_bigendian_tobytes.py Version 1.0.0
# Copyright (c) 2020 Tristan Cavelier <[email protected]>
# This program is free software. It comes without any warranty, to
# the extent permitted by applicable law. You can redistribute it
# and/or modify it under the terms of the Do What The Fuck You Want
# To Public License, Version 2, as published by Sam Hocevar. See
# http://www.wtfpl.net/ for more details.
def uint16_bigendian_tobytes(uint16):
return bytes(((uint16 >> 8) & 0xFF, uint16 & 0xFF))
| [
"[email protected]"
] | |
7b74cc681fdd50f712c42f1604cb790300fe0a4d | ef821468b081ef2a0b81bf08596a2c81e1c1ef1a | /PythonWebBasics_Django/petstagram/pets/urls.py | 212f52bff8daa5f925d309e180d924a318f1cc1b | [] | no_license | Ivaylo-Atanasov93/The-Learning-Process | 71db22cd79f6d961b9852f140f4285ef7820dd80 | 354844e2c686335345f6a54b3af86b78541ed3f3 | refs/heads/master | 2023-03-30T20:59:34.304207 | 2021-03-29T15:23:05 | 2021-03-29T15:23:05 | 294,181,544 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 276 | py | from django.urls import path
from pets.views import pets_list, show_pet_details, like_pet
urlpatterns = [
path('', pets_list, name='list pets'),
path('details/<int:pk>/', show_pet_details, name='pet details'),
path('like/<int:pk>/', like_pet, name='like pet'),
]
| [
"[email protected]"
] | |
9bea5e1eada06fa70192f113563e4516d9e6a21e | dd097c7ae744227b0312d762ee0482a3380ff8c6 | /makenei_from_ele_xyz.py | 5af3305f3e285e46149ab1da568e6c0ac15fdcba | [] | no_license | moflaher/workspace_python | 0d6e98274d923a721db2b345f65c20b02ca59d08 | 6551e3602ead3373eafce10d11ce7b96bdcb106f | refs/heads/master | 2023-03-06T02:15:01.945481 | 2023-03-01T19:15:51 | 2023-03-01T19:15:51 | 20,814,932 | 3 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,260 | py | from __future__ import division,print_function
import numpy as np
import matplotlib as mpl
import scipy as sp
from datatools import *
import matplotlib.tri as mplt
import matplotlib.pyplot as plt
#from mpl_toolkits.basemap import Basemap
import os as os
name='kit4'
xyz=np.genfromtxt(name + '.xyz')
ele=np.genfromtxt(name + '.ele')
if 1==0:
maxnei=np.histogram(ele,bins=ele.max()-1)[0].max()
nnodes=xyz.shape[0]
noderange=np.arange(1,nnodes+1)
xmin=xyz[:,0].min()
xmax=xyz[:,0].max()
ymin=xyz[:,1].min()
ymax=xyz[:,1].max()
neighbourlist=np.zeros([xyz.shape[0],maxnei])
for i in range(1,xyz.shape[0]+1):
print i
idx=np.where(ele==i)[0]
tneilist=np.unique(ele[idx,:])
tneilist=tneilist[tneilist!=i]
neighbourlist[i-1,0:len(tneilist)]=tneilist
fp=open(name + '.nei','w')
fp.write('%d\n' % nnodes)
fp.write('%d\n' % maxnei)
fp.write('%f %f %f %f\n' % (xmax, ymin, xmin, ymax))
for i in range(0,nnodes):
fp.write('%d %f %f %d %f %u %u %u %u %u %u %u %u\n' % (i+1, xyz[i,0], xyz[i,1], 0 ,xyz[i,2],neighbourlist[i,0],neighbourlist[i,1],neighbourlist[i,2],neighbourlist[i,3],neighbourlist[i,4],neighbourlist[i,5],neighbourlist[i,6],neighbourlist[i,7]) )
fp.close()
| [
"[email protected]"
] | |
b0b14da84042b9971c28982f9d77f519829a1047 | aa1352a2f32c0c36194d3a6f8e683adba487a3eb | /FiRoom_backend/users/urls.py | 461aad0c0a56efdbc04145b7094f3c83cd7a2a12 | [] | no_license | Ace-bb/FiRoom_backend | 6c98d01c40e8de31ccbe86beaeada6c62516705e | efd4d9c1d7265e42f56638d5374a569a146acc03 | refs/heads/main | 2023-03-30T15:48:21.376390 | 2021-03-23T15:53:48 | 2021-03-23T15:53:48 | 338,780,869 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 143 | py | from django.urls import path
from . import views
urlpatterns = [
path('masterIdentify/uploadCertification', views.uploadCertification),
] | [
"[email protected]"
] | |
c4006da28b8476755c3f41bf5a0fcdea5757ca98 | c50e7eb190802d7849c0d0cea02fb4d2f0021777 | /src/azure-firewall/azext_firewall/vendored_sdks/v2020_07_01/v2020_07_01/aio/operations/_express_route_connections_operations.py | a0bfb04f1caa8c809a50e9e6dd906b6a5def7b1f | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | Azure/azure-cli-extensions | c1615b19930bba7166c282918f166cd40ff6609c | b8c2cf97e991adf0c0a207d810316b8f4686dc29 | refs/heads/main | 2023-08-24T12:40:15.528432 | 2023-08-24T09:17:25 | 2023-08-24T09:17:25 | 106,580,024 | 336 | 1,226 | MIT | 2023-09-14T10:48:57 | 2017-10-11T16:27:31 | Python | UTF-8 | Python | false | false | 21,558 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteConnectionsOperations:
"""ExpressRouteConnectionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _create_or_update_initial(
self,
resource_group_name: str,
express_route_gateway_name: str,
connection_name: str,
put_express_route_connection_parameters: "_models.ExpressRouteConnection",
**kwargs
) -> "_models.ExpressRouteConnection":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRouteGatewayName': self._serialize.url("express_route_gateway_name", express_route_gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(put_express_route_connection_parameters, 'ExpressRouteConnection')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteConnection', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ExpressRouteConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}/expressRouteConnections/{connectionName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
express_route_gateway_name: str,
connection_name: str,
put_express_route_connection_parameters: "_models.ExpressRouteConnection",
**kwargs
) -> AsyncLROPoller["_models.ExpressRouteConnection"]:
"""Creates a connection between an ExpressRoute gateway and an ExpressRoute circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_gateway_name: The name of the ExpressRoute gateway.
:type express_route_gateway_name: str
:param connection_name: The name of the connection subresource.
:type connection_name: str
:param put_express_route_connection_parameters: Parameters required in an
ExpressRouteConnection PUT operation.
:type put_express_route_connection_parameters: ~azure.mgmt.network.v2020_07_01.models.ExpressRouteConnection
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ExpressRouteConnection or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_07_01.models.ExpressRouteConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteConnection"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
express_route_gateway_name=express_route_gateway_name,
connection_name=connection_name,
put_express_route_connection_parameters=put_express_route_connection_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRouteGatewayName': self._serialize.url("express_route_gateway_name", express_route_gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}/expressRouteConnections/{connectionName}'} # type: ignore
async def get(
self,
resource_group_name: str,
express_route_gateway_name: str,
connection_name: str,
**kwargs
) -> "_models.ExpressRouteConnection":
"""Gets the specified ExpressRouteConnection.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_gateway_name: The name of the ExpressRoute gateway.
:type express_route_gateway_name: str
:param connection_name: The name of the ExpressRoute connection.
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteConnection, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_07_01.models.ExpressRouteConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRouteGatewayName': self._serialize.url("express_route_gateway_name", express_route_gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}/expressRouteConnections/{connectionName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
express_route_gateway_name: str,
connection_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRouteGatewayName': self._serialize.url("express_route_gateway_name", express_route_gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}/expressRouteConnections/{connectionName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
express_route_gateway_name: str,
connection_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes a connection to a ExpressRoute circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_gateway_name: The name of the ExpressRoute gateway.
:type express_route_gateway_name: str
:param connection_name: The name of the connection subresource.
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
express_route_gateway_name=express_route_gateway_name,
connection_name=connection_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRouteGatewayName': self._serialize.url("express_route_gateway_name", express_route_gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}/expressRouteConnections/{connectionName}'} # type: ignore
async def list(
self,
resource_group_name: str,
express_route_gateway_name: str,
**kwargs
) -> "_models.ExpressRouteConnectionList":
"""Lists ExpressRouteConnections.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_gateway_name: The name of the ExpressRoute gateway.
:type express_route_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteConnectionList, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_07_01.models.ExpressRouteConnectionList
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteConnectionList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRouteGatewayName': self._serialize.url("express_route_gateway_name", express_route_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteConnectionList', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}/expressRouteConnections'} # type: ignore
| [
"[email protected]"
] | |
c0118ad0e0f29fc84614d3d9782b2b74eeeff8b8 | 7ee1fd7584f8770cd2381d85f797bf85cb9b4b67 | /usuarios/applications/users/migrations/0002_auto_20200318_0722.py | a428a6912352c1390b859456cca63c55c637915a | [] | no_license | neunapp/usuariosdj | 3171160fdf6898d07d6b353d034c70801e4bc21b | 3fe69b7357757baa5d799b614f232d75ed659502 | refs/heads/master | 2022-12-01T16:51:00.432272 | 2020-09-17T14:28:21 | 2020-09-17T14:28:21 | 237,993,639 | 4 | 2 | null | 2022-11-22T05:17:26 | 2020-02-03T15:10:33 | Python | UTF-8 | Python | false | false | 550 | py | # Generated by Django 3.0.3 on 2020-03-18 07:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='user',
name='apellidos',
field=models.CharField(blank=True, max_length=30),
),
migrations.AlterField(
model_name='user',
name='nombres',
field=models.CharField(blank=True, max_length=30),
),
]
| [
"[email protected]"
] | |
c2108381f0aa3dc54ea2812312d80e98117659a0 | 5574620c834f96d4baf50d6aa349242dae7c17af | /126.word-ladder-ii.py | 1ab52f8b88f5662f72456dcd442d5aab3fe52358 | [] | no_license | Ming-H/leetcode | 52dceba5f9a605afbdaa65e286a37205873e21bb | 057cee4b830603ac12976ed7d5cea8d06a9b46a0 | refs/heads/main | 2023-09-02T21:30:48.796395 | 2023-09-01T01:59:48 | 2023-09-01T01:59:48 | 489,290,172 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 959 | py | #
# @lc app=leetcode id=126 lang=python3
#
# [126] Word Ladder II
#
import collections
import string
class Solution:
#def findLadders(self, beginWord: str, endWord: str, wordList: List[str]) -> List[List[str]]:
def findLadders(self, start, end, dic):
dic.add(end)
level = {start}
parents = collections.defaultdict(set)
while level and end not in parents:
next_level = collections.defaultdict(set)
for node in level:
for char in string.ascii_lowercase:
for i in range(len(start)):
n = node[:i]+char+node[i+1:]
if n in dic and n not in parents:
next_level[n].add(node)
level = next_level
parents.update(next_level)
res = [[end]]
while res and res[0][0] != start:
res = [[p]+r for r in res for p in parents[r[0]]]
return res
| [
"[email protected]"
] | |
7e749d9cfdf6b6af07e6c5e6d4938e412ae0aeb3 | d26aebefdc6358f63e050f7712589e9fd9f4a258 | /cat_api_proj/settings.py | 02f54b418b70047175a1723d0eec397f4420c924 | [] | no_license | matthewgstillman/Cats_API | e93c7319a26c6bb06ed1be211e8f588edc4dc871 | c0918930d663527535e35d02c5c7ac098dbf6aa4 | refs/heads/master | 2020-03-29T20:47:06.514147 | 2018-09-25T21:17:48 | 2018-09-25T21:17:48 | 150,330,437 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,132 | py | """
Django settings for cat_api_proj project.
Generated by 'django-admin startproject' using Django 1.10.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '^xbh+%sdv@pz=-$3#roy0^a!k(rfx$nfxr--v@(uui#jw^h8@&'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'apps.cat_api',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'cat_api_proj.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'cat_api_proj.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
7e9302d32787862d768a2e2a3a7eeea66f8a542c | fc2fb2118ea02867d559bf8027e54e3c6b652cfd | /devItems/spring-2020/SEEAccuracyImprove/jirasoftware/step1_vectorize_text.py | 80e2c2b832ef7448c32e352447bb35267dbcc71c | [] | no_license | pdhung3012/SoftwareStoryPointsPrediction | 2431ad599e0fba37617cfd467de1f4f1afed56cc | 520990663cb42adcac315b75cd4eb1150c3fc86c | refs/heads/master | 2023-08-29T15:16:30.413766 | 2021-09-18T17:12:20 | 2021-09-18T17:12:20 | 254,596,878 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,611 | py | from sklearn.feature_extraction.text import TfidfVectorizer
import pandas as pd
from nltk.tokenize import word_tokenize
import os
import numpy as np
import gensim
from sklearn.decomposition import PCA
from sklearn.random_projection import GaussianRandomProjection
nameSystem='jirasoftware'
fopVectorAllSystems='vector_tfidf_original_'+nameSystem+'/'
# fopTextPreprocess='te'+nameSystem+'/'
fopDataset='../../dataset/'
import stanza
def addDependenciesToSentence(docObj):
lstSentences=docObj.sentences
lstOutput=[]
for sen in lstSentences:
depends=sen._dependencies
lstDepInfo=[]
# depends=dict(depends)
for deKey in depends:
strElement=' '.join([deKey[2].text,deKey[0].text,deKey[1]])
lstDepInfo.append(strElement)
strDep=' '.join(lstDepInfo)
lstOutput.append((strDep))
strResult=' '.join(lstOutput)
return strResult
def addDependenciesToSentenceCompact(docObj):
lstSentences=docObj.sentences
lstOutput=[]
for sen in lstSentences:
depends=sen._dependencies
lstDepInfo=[]
# depends=dict(depends)
for deKey in depends:
strElement=' '.join([deKey[1]])
lstDepInfo.append(strElement)
strDep=' '.join(lstDepInfo)
lstOutput.append((strDep))
strResult=' '.join(lstOutput)
return strResult
def addDependenciesToSentencePOS(docObj):
lstSentences=docObj.sentences
lstOutput=[]
for sen in lstSentences:
words=sen._words
lstDepInfo=[]
# depends=dict(depends)
for w in words:
strElement=' '.join([w.upos])
lstDepInfo.append(strElement)
strDep=' '.join(lstDepInfo)
lstOutput.append((strDep))
strResult=' '.join(lstOutput)
return strResult
def preprocess(textInLine):
text = textInLine.lower()
doc = word_tokenize(text)
# doc = [word for word in doc if word in words]
# doc = [word for word in doc if word.isalpha()]
return ' '.join(doc)
def createDirIfNotExist(fopOutput):
try:
# Create target Directory
os.mkdir(fopOutput)
print("Directory ", fopOutput, " Created ")
except FileExistsError:
print("Directory ", fopOutput, " already exists")
from os import listdir
from os.path import isfile, join
arrFiles = [f for f in listdir(fopDataset) if isfile(join(fopDataset, f))]
createDirIfNotExist(fopVectorAllSystems)
# createDirIfNotExist(fopTextPreprocess)
nlp = stanza.Pipeline() # This sets up a default neural pipeline in English
for file in arrFiles:
# if not file.endswith('csv'):
# continue
if not file.endswith(nameSystem+'.csv'):
continue
fileCsv = fopDataset + file
fpVectorItemCate=fopVectorAllSystems+file.replace('.csv','')+'_category.csv'
fpVectorItemReg = fopVectorAllSystems + file.replace('.csv','') + '_regression.csv'
fpTextInfo = fopVectorAllSystems + file.replace('.csv', '') + '_textInfo.csv'
raw_data = pd.read_csv(fileCsv)
raw_data_2 = pd.read_csv(fileCsv)
columnId=raw_data['issuekey']
columnRegStory=raw_data_2['storypoint']
raw_data.loc[raw_data.storypoint <= 2, 'storypoint'] = 0 # small
raw_data.loc[(raw_data.storypoint > 2) & (raw_data.storypoint <= 8), 'storypoint'] = 1 # medium
raw_data.loc[(raw_data.storypoint > 8) & (raw_data.storypoint <= 15), 'storypoint'] = 2 # large
raw_data.loc[raw_data.storypoint > 15, 'storypoint'] = 3 # very large
columnCateStory = raw_data['storypoint']
titles_and_descriptions = []
for i in range(0, len(raw_data['description'])):
strContent = ' '.join([str(raw_data['title'][i]),' . ', str(raw_data['description'][i])])
titles_and_descriptions.append(str(strContent))
text_after_tokenize = []
listDependences=[]
index=0
for lineStr in titles_and_descriptions:
lineAppend = preprocess(lineStr)
strToAdd = lineAppend
# try:
# doc = nlp(lineStr)
# strDepend = addDependenciesToSentencePOS(doc)
# strToAdd = ' '.join([lineAppend, strDepend])
# # strToAdd = ' '.join([strDepend])
# except:
# print('{} error on issue {}'.format(index,columnId[index]))
text_after_tokenize.append(strToAdd)
index=index+1
columnTitleRow='no,text\n'
csv = open(fpTextInfo, 'w')
csv.write(columnTitleRow)
for i in range(0, len(text_after_tokenize)):
strItem=text_after_tokenize[i].replace(',',' ')
csv.write(','.join([str(i+1),strItem]))
if(i<(len(text_after_tokenize)-1)):
csv.write('\n')
csv.close()
# get vector using TF-IDF
vectorizer = TfidfVectorizer(ngram_range=(1, 4))
X = vectorizer.fit_transform(text_after_tokenize)
X = X.toarray()
# X = PCA().fit(X)
pca = PCA(n_components=100)
X = pca.fit_transform(X)
# srp=GaussianRandomProjection(n_components=3)
# X=srp.fit_transform(X)
print('end vectorize')
lenVectorOfWord = len(X[0])
columnTitleRow = "no,story,"
for i in range(0,lenVectorOfWord):
item='feature-'+str(i+1)
columnTitleRow = ''.join([columnTitleRow, item])
if i!=lenVectorOfWord-1:
columnTitleRow = ''.join([columnTitleRow, ","])
columnTitleRow = ''.join([columnTitleRow, "\n"])
csv = open(fpVectorItemCate, 'w')
csv.write(columnTitleRow)
csv2 = open(fpVectorItemReg, 'w')
csv2.write(columnTitleRow)
corpusVector = []
for i in range(0,len(text_after_tokenize)):
# arrTokens = word_tokenize(str(text_after_tokenize[i]))
# if not has_vector_representation(dictWordVectors, str(text_after_tokenize[i])):
# continue
# # arrTokens = word_tokenize(str(text_after_tokenize[i]))
vector= X[i]
corpusVector.append(vector)
# strVector=','.join(vector)
strCate=str(columnCateStory[i])
strReg=str(columnRegStory[i])
# strRow=''.join([str(i+1),',','S-'+str(columnStoryPoints[i]),])
# strRow = ''.join([str(i + 1), ',', 'S-' + strCate, ])
strRow = ''.join([str(i + 1), ',', '' + strCate, ])
strRow2 = ''.join([str(i + 1), ',', '' + strReg, ])
for j in range(0,lenVectorOfWord):
strRow=''.join([strRow,',',str(vector[j])])
strRow2 = ''.join([strRow2, ',', str(vector[j])])
strRow = ''.join([strRow, '\n'])
strRow2 = ''.join([strRow2, '\n'])
csv.write(strRow)
csv2.write(strRow2)
csv.close()
csv2.close()
print('Finish {}'.format(file))
| [
"[email protected]"
] | |
f4d523bf8b922bf42041001ffdc30993d9ab3efc | d6780d2a5126bff23d0a46c7376f5085063a3a4e | /backend/chat/api/v1/viewsets.py | 211c8f530ed018dec88234b4bfad728e6cf050ca | [] | no_license | crowdbotics-apps/test-25190 | 2721890ce00a97b8168e1188fe5d96ff86e1b6e4 | 48ce90fb307e5d98963a73e8c07a9a631b370f59 | refs/heads/master | 2023-03-24T23:43:59.659900 | 2021-03-22T15:53:21 | 2021-03-22T15:53:21 | 350,400,795 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,983 | py | from rest_framework import authentication
from chat.models import (
Message,
ThreadMember,
MessageAction,
ThreadAction,
ForwardedMessage,
Thread,
)
from .serializers import (
MessageSerializer,
ThreadMemberSerializer,
MessageActionSerializer,
ThreadActionSerializer,
ForwardedMessageSerializer,
ThreadSerializer,
)
from rest_framework import viewsets
class MessageActionViewSet(viewsets.ModelViewSet):
serializer_class = MessageActionSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = MessageAction.objects.all()
class ForwardedMessageViewSet(viewsets.ModelViewSet):
serializer_class = ForwardedMessageSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = ForwardedMessage.objects.all()
class ThreadActionViewSet(viewsets.ModelViewSet):
serializer_class = ThreadActionSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = ThreadAction.objects.all()
class ThreadViewSet(viewsets.ModelViewSet):
serializer_class = ThreadSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Thread.objects.all()
class ThreadMemberViewSet(viewsets.ModelViewSet):
serializer_class = ThreadMemberSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = ThreadMember.objects.all()
class MessageViewSet(viewsets.ModelViewSet):
serializer_class = MessageSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Message.objects.all()
| [
"[email protected]"
] | |
748188ed28fde4abc2b26a91c727a0d58176ac3f | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2484/60691/284691.py | e7ae022ea8ced39460e70e67ef7cc820c09dc498 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 447 | py | def union(s1, s2):
l = s1.split(' ')
l1 = s2.split(' ')
if l1 == ['']:
return len(l)
for i in range(len(l1)):
if (not l1[i] in l) and (l1[i] != ' '):
l.append(l1[i])
print(l)
return len(l)
n = int(input())
useless = []
arra = []
arrb = []
for i in range(n):
useless.append(input())
arra.append(input())
arrb.append(input())
for i in range(n):
print(union(arra[i], arrb[i])) | [
"[email protected]"
] | |
4f53fca6de83c3e1a1350e96ab8c05885220e402 | a5fabc6d6341925b587fecb082dc70c0d1b95619 | /py_FPA/FPA_Fil.py | 0e0dabdbc7f14bd77822cefddf3a45b70a4ed9d5 | [] | no_license | illuminous/pythonScripts | fcfef9c0fb9bd3155edcf02b56bbec563ff08b2a | 1bb69a1bb1e10f6041274c027cc0ab06c7a9efed | refs/heads/master | 2022-10-22T09:35:48.088907 | 2022-09-28T22:35:32 | 2022-09-28T22:35:32 | 120,963,663 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,844 | py | import glob, arcgisscripting
import os
# Create the Geoprocessor object
gp = arcgisscripting.create()
gp.CheckOutExtension("spatial")
gp.AddToolBox
gp.toolbox = "management"
zones = ['RM_WY']#'EA_IL', 'EA_IN', 'NW_WA', 'SA_PR','NR_MT','CA_CA', 'EA_IA', 'EA_MI', 'EA_MN', 'EA_MO', 'EA_NH', 'EA_NJ', 'EA_OH', 'EA_PA', 'EA_WI', 'EA_WV', 'GB_ID', 'GB_NV', 'GB_UT', 'GB_WY', 'NR_ID','NR_MT', 'NR_ND', 'NW_OR', 'MW_WA', 'RM_CO', 'RM_KS', 'RM_NE', 'RM_SD', 'RM_WY', 'SA_AL', 'SA_AR', 'SA_FL', 'SA_GA', 'SA_KY', 'SA_LA', 'SA_MD','SA_MS', 'SA_NC', 'SA_OK', 'SA_SC', 'SA_TN', 'SA_TX', 'SA_VA', 'SW_AZ', 'SW_NM', 'SW_TX'] #these are FPUs
names = ['_003']#'_001', '_002', '_003', '_004', '_005', '_006', '_007', '_008', '_009', '_010', '_011', '_012', '_013', '_014', '_015']
oldnames = ['_STANDARD0_burnprob.asc_FLP.txt', '_STANDARD0_burnprob.asc', '_STANDARD0_burnprob.asc_MeanIntensity.asc', '_STANDARD0_burnprob.asc_FireSizeList.txt', '_TREATMENTS0_burnprob.asc', '_TREATMENTS0_burnprob.asc_FireSizeList.txt', '_TREATMENTS0_burnprob.asc_FLP.txt', '_TREATMENTS0_burnprob.asc_MeanIntensity.asc']
#directory = 'C:\\Working\FPA\extracted\\simresults\\test'
###-------------------------------------------------------------------------------------------
###First Section: Rename the FPA files into a consistent format
##for zone in zones:
## for name in names:
## for oldname in oldnames:
## oldfilename = 'C:\\Working\FPA\extracted\\simresults\\%s%s%s' %(zone, name, oldname)
## newnames = dict()
## newnames['_STANDARD0_burnprob.asc'] = '%s%s_SB.asc' %(zone, name)
## newnames['_STANDARD0_burnprob.asc_FLP.txt'] = '%s%s_FLP.csv' %(zone, name)
## newnames['_STANDARD0_burnprob.asc_MeanIntensity.asc'] = '%s%s_MI.asc' %(zone, name)
## newnames['_STANDARD0_burnprob.asc_FireSizeList.txt'] = '%s%s_FSL.txt' %(zone, name)
## newnames['_TREATMENTS0_burnprob.asc'] = '%s%s_TB.asc' %(zone, name)
## newnames['_TREATMENTS0_burnprob.asc_FireSizeList.txt'] = '%s%s_TFSL.txt' %(zone, name)
## newnames['_TREATMENTS0_burnprob.asc_FLP.txt'] = '%s%s_TFLP.csv' %(zone, name)
## newnames['_TREATMENTS0_burnprob.asc_MeanIntensity.asc'] = '%s%s_TMI.asc' %(zone, name)
## print 'oldfilename = ',oldfilename
## if os.access(oldfilename,os.F_OK):
## print 'renaming file...', oldfilename
## newfilename = newnames[oldname]
## os.rename(oldfilename, newfilename)
## else:
## print"file is not fixed or does not exist"
##
##
#####-------------------------------------------------------------------------------------------
##### Second Section: add x, y from files to create a layer
##### Set the spatial reference
##### NOTE: you must have the "Coordinate Systems" turned on
##### To do this click tools/options and check "Coordinate Systems"
##### in the General Tab
##print "Add x, y from files to create a layer"
##for zone in zones:
## for name in names:
## print name
## try:
## # Prepare the variables
## in_Table = 'C:\\Working\\FPA\\renamed\\%s%s_FLP.csv' %(zone, name)
## in_x = "XPos"
## in_y = "YPos"
## out_Layer = "%s%s_Pbr" %(zone, name)
## spref = "C:\\Program Files\\ArcGIS\\Coordinate Systems\\Geographic Coordinate Systems\\North America\\North American Datum 1983.prj"
##
## # Make the XY event...
## gp.MakeXYEventLayer(in_Table, in_x, in_y, out_Layer, spref)
##
## # Save to a layer file
## gp.SaveToLayerFile(out_Layer, 'C:/Working/FPA/layers/%s%s_Pbr.lyr' %(zone, name))
##
## except:
## # If an error occurred print the message to the screen
## print gp.GetMessages()
#######------------------------------------------------------------------------------------------
####### Third Section: convert new feature layers to rasters
##print "Convert new feature layers to rasters"
##
##
##for zone in zones:
## for name in names:
## print name
## try:
## # Set local variables
## InFeatures = 'C:\\Working\\FPA\\layers\\%s%s_Pbr.lyr' %(zone, name)
## InField = "PBurn"
## OutRaster = 'C:\\Working\\FPA\\PBR_Raster\\%s%s_Pb' %(zone, name)
## InCellSize = "270"
##
## # Process: FeatureToRaster_conversion
## gp.FeatureToRaster_conversion(InFeatures, InField, OutRaster, InCellSize)
##
## except:
## # Print error message if an error occurs
## print gp.GetMessages()
##print "Convert new feature layers to rasters Fil1"
##
##
##for zone in zones:
## for name in names:
## print name
## try:
## # Set local variables
## InFeatures = 'C:\\Working\\FPA\\layers\\%s%s_Pbr.lyr' %(zone, name)
## InField = "Fil1"
## OutRaster = 'C:\\Working\\FPA\\Fil1_Raster\\%s%s_F1' %(zone, name)
## InCellSize = "270"
##
## # Process: FeatureToRaster_conversion
## gp.FeatureToRaster_conversion(InFeatures, InField, OutRaster, InCellSize)
##
## except:
## # Print error message if an error occurs
## print gp.GetMessages()
##
##print "Convert new feature layers to rasters Fil2"
##
##for zone in zones:
## for name in names:
## print name
## try:
## # Set local variables
## InFeatures = 'C:\\Working\\FPA\\layers\\%s%s_Pbr.lyr' %(zone, name)
## InField = "Fil2"
## OutRaster = 'C:\\Working\\FPA\\Fil2_Raster\\%s%s_F2' %(zone, name)
## InCellSize = "270"
##
## # Process: FeatureToRaster_conversion
## gp.FeatureToRaster_conversion(InFeatures, InField, OutRaster, InCellSize)
##
## except:
## # Print error message if an error occurs
## print gp.GetMessages()
##print "Convert new feature layers to rasters Fil3"
##
##for zone in zones:
## for name in names:
## print name
## try:
## # Set local variables
## InFeatures = 'C:\\Working\\FPA\\layers\\%s%s_Pbr.lyr' %(zone, name)
## InField = "Fil3"
## OutRaster = 'C:\\Working\\FPA\\Fil3_Raster\\%s%s_F3' %(zone, name)
## InCellSize = "270"
##
## # Process: FeatureToRaster_conversion
## gp.FeatureToRaster_conversion(InFeatures, InField, OutRaster, InCellSize)
##
## except:
## # Print error message if an error occurs
## print gp.GetMessages()
####
##print "Convert new feature layers to rasters Fil4"
##
##for zone in zones:
## for name in names:
## print name
## try:
## # Set local variables
## InFeatures = 'C:\\Working\\FPA\\layers\\%s%s_Pbr.lyr' %(zone, name)
## InField = "Fil4"
## OutRaster = 'C:\\Working\\FPA\\Fil4_Raster\\%s%s_F4' %(zone, name)
## InCellSize = "270"
##
## # Process: FeatureToRaster_conversion
## gp.FeatureToRaster_conversion(InFeatures, InField, OutRaster, InCellSize)
##
## except:
## # Print error message if an error occurs
## print gp.GetMessages()
##
##print "Convert new feature layers to rasters Fil5"
##
##for zone in zones:
## for name in names:
## print name
## try:
## # Set local variables
## InFeatures = 'C:\\Working\\FPA\\layers\\%s%s_Pbr.lyr' %(zone, name)
## InField = "Fil5"
## OutRaster = 'C:\\Working\\FPA\\Fil5_Raster\\%s%s_F5' %(zone, name)
## InCellSize = "270"
##
## # Process: FeatureToRaster_conversion
## gp.FeatureToRaster_conversion(InFeatures, InField, OutRaster, InCellSize)
##
## except:
## # Print error message if an error occurs
## print gp.GetMessages()
##
##print "Convert new feature layers to rasters Fil6"
##
##for zone in zones:
## for name in names:
## print name
## try:
## # Set local variables
## InFeatures = 'C:\\Working\\FPA\\layers\\%s%s_Pbr.lyr' %(zone, name)
## InField = "Fil6"
## OutRaster = 'C:\\Working\\FPA\\Fil6_Raster\\%s%s_F6' %(zone, name)
## InCellSize = "270"
##
## # Process: FeatureToRaster_conversion
## gp.FeatureToRaster_conversion(InFeatures, InField, OutRaster, InCellSize)
##
## except:
## # Print error message if an error occurs
## print gp.GetMessages()
#####------------------------------------------------------------------------------------------
##### Forth Section: Clip Rasters to FPA FPU extents and reproject to landfire
##
##
for zone in zones:
print zone
for name in names:
print names
try:
#coordysFPU = "C:\Program Files\ArcGIS\Coordinate Systems\Geographic Coordinate Systems\North America\North American Datum 1983.prj"
coordsys = "C:\Program Files\ArcGIS\Coordinate Systems\Projected Coordinate Systems\Continental\North America\USA Contiguous Albers Equal Area Conic USGS.prj"
InMask = 'C:/working/FPA/fpa_270'
InWhereClause = "FPU_ID = '%s%s'" % (zone, name)
LFRaster = 'C:\\working\\FPA\\PBR_Raster\\%s%s_PB' %(zone, name)
#OutRaster = 'C:\\working\\FPA\\Fil2_Raster_clip\\%s%s_o' % (zone, name)
IntRaster = 'C:\\working\\FPA\\PBR_Raster_clip\\%s%s_i' % (zone, name)
OutRaster2 = 'C:\\working\\FPA\\PBR_Raster_clip\\%s%s_2' % (zone, name)
OutRaster3 = 'C:\\working\\FPA\\PBR_Raster_clip\\%s%s_c' % (zone, name)
OutRaster4 = 'C:\\working\\FPA\\PBR_Raster_clip\\%s%s_PB' % (zone, name)
kms = 'C:\\working\\FPA\\PBR_Raster_clip\\%s%s_c' % (zone, name)
## if gp.exists (OutRaster4):
## print OutRaster4 + "exists"
## else:
#Process: Copy Raster, using copy from aml
gp.copy(LFRaster, IntRaster)
#gp.Times_sa(LFRaster, 10000, OutRaster)
#gp.Int_sa(OutRaster, IntRaster, "INTEGER")
gp.DefineProjection_Management(IntRaster, coordsys)
gp.copy(IntRaster, OutRaster2)
#Process: Extract by zone mask delet this....gp.AddToolbox("C:/Program Files/ArcGIS/ArcToolbox/Toolboxes/Spatial Analyst Tools.tbx")
#gp.ExtractByMask_sa(OutRaster2, InMask2, OutRaster3)
#gp.Con_sa(OutRaster2, InMask, OutRaster3, "Value >= 0")
gp.ExtractByAttributes_sa(InMask, InWhereClause, OutRaster3)
Ras = gp.Describe(kms)
print Ras.Extent
print InWhereClause
tempEnvironment7 = gp.extent
gp.extent = Ras.Extent
#gp.ExtractByAttributes_sa(OutRaster2, InWhereClause, OutRaster3)
gp.ExtractByMask_sa(OutRaster2, OutRaster3, OutRaster4)
gp.delete_management(IntRaster)
#gp.delete_management(OutRaster)
gp.delete_management(OutRaster2)
gp.delete_management(OutRaster3)
print "next one"
except:
# Print error message if an error occurs
print gp.GetMessages()
##
##for zone in zones:
## print zone
## for name in names:
## print names
## try:
## #coordysFPU = "C:\Program Files\ArcGIS\Coordinate Systems\Geographic Coordinate Systems\North America\North American Datum 1983.prj"
## coordsys = "C:\Program Files\ArcGIS\Coordinate Systems\Projected Coordinate Systems\Continental\North America\USA Contiguous Albers Equal Area Conic USGS.prj"
## InMask = 'C:/working/FPA/fpa_270'
## InWhereClause = "FPU_ID = '%s%s'" % (zone, name)
## LFRaster = 'C:\\working\\FPA\\Fil2_Raster\\%s%s_F2' %(zone, name)
## #OutRaster = 'C:\\working\\FPA\\Fil2_Raster_clip\\%s%s_o' % (zone, name)
## IntRaster = 'C:\\working\\FPA\\Fil2_Raster_clip\\%s%s_i' % (zone, name)
## OutRaster2 = 'C:\\working\\FPA\\Fil2_Raster_clip\\%s%s_2' % (zone, name)
## OutRaster3 = 'C:\\working\\FPA\\Fil2_Raster_clip\\%s%s_c' % (zone, name)
## OutRaster4 = 'C:\\working\\FPA\\Fil2_Raster_clip\\%s%s_F2' % (zone, name)
## kms = 'C:\\working\\FPA\\Fil2_Raster_clip\\%s%s_c' % (zone, name)
#### if gp.exists (OutRaster4):
#### print OutRaster4 + "exists"
#### else:
## #Process: Copy Raster, using copy from aml
## gp.copy(LFRaster, IntRaster)
## #gp.Times_sa(LFRaster, 10000, OutRaster)
## #gp.Int_sa(OutRaster, IntRaster, "INTEGER")
## gp.DefineProjection_Management(IntRaster, coordsys)
## gp.copy(IntRaster, OutRaster2)
## #Process: Extract by zone mask delet this....gp.AddToolbox("C:/Program Files/ArcGIS/ArcToolbox/Toolboxes/Spatial Analyst Tools.tbx")
## #gp.ExtractByMask_sa(OutRaster2, InMask2, OutRaster3)
## #gp.Con_sa(OutRaster2, InMask, OutRaster3, "Value >= 0")
## gp.ExtractByAttributes_sa(InMask, InWhereClause, OutRaster3)
## Ras = gp.Describe(kms)
## print Ras.Extent
## print InWhereClause
## tempEnvironment7 = gp.extent
## gp.extent = Ras.Extent
## #gp.ExtractByAttributes_sa(OutRaster2, InWhereClause, OutRaster3)
## gp.ExtractByMask_sa(OutRaster2, OutRaster3, OutRaster4)
## gp.delete_management(IntRaster)
## #gp.delete_management(OutRaster)
## gp.delete_management(OutRaster2)
## gp.delete_management(OutRaster3)
## print "next one"
## except:
## # Print error message if an error occurs
## print gp.GetMessages()
##for zone in zones:
## print zone
## for name in names:
## print names
## try:
## #coordysFPU = "C:\Program Files\ArcGIS\Coordinate Systems\Geographic Coordinate Systems\North America\North American Datum 1983.prj"
## coordsys = "C:\Program Files\ArcGIS\Coordinate Systems\Projected Coordinate Systems\Continental\North America\USA Contiguous Albers Equal Area Conic USGS.prj"
## InMask = 'C:/working/FPA/fpa_270'
## InWhereClause = "FPU_ID = '%s%s'" % (zone, name)
## LFRaster = 'C:\\working\\FPA\\Fil6_Raster\\%s%s_F6' %(zone, name)
## #OutRaster = 'C:\\working\\FPA\\Fil2_Raster_clip\\%s%s_o' % (zone, name)
## IntRaster = 'C:\\working\\FPA\\Fil6_Raster_clip\\%s%s_i' % (zone, name)
## OutRaster2 = 'C:\\working\\FPA\\Fil6_Raster_clip\\%s%s_2' % (zone, name)
## OutRaster3 = 'C:\\working\\FPA\\Fil6_Raster_clip\\%s%s_c' % (zone, name)
## OutRaster4 = 'C:\\working\\FPA\\Fil6_Raster_clip\\%s%s_F6' % (zone, name)
## kms = 'C:\\working\\FPA\\Fil6_Raster_clip\\%s%s_c' % (zone, name)
#### if gp.exists (OutRaster4):
#### print OutRaster4 + "exists"
#### else:
## #Process: Copy Raster, using copy from aml
## gp.copy(LFRaster, IntRaster)
## #gp.Times_sa(LFRaster, 10000, OutRaster)
## #gp.Int_sa(OutRaster, IntRaster, "INTEGER")
## gp.DefineProjection_Management(IntRaster, coordsys)
## gp.copy(IntRaster, OutRaster2)
## #Process: Extract by zone mask delet this....gp.AddToolbox("C:/Program Files/ArcGIS/ArcToolbox/Toolboxes/Spatial Analyst Tools.tbx")
## #gp.ExtractByMask_sa(OutRaster2, InMask2, OutRaster3)
## #gp.Con_sa(OutRaster2, InMask, OutRaster3, "Value >= 0")
## gp.ExtractByAttributes_sa(InMask, InWhereClause, OutRaster3)
## Ras = gp.Describe(kms)
## print Ras.Extent
## print InWhereClause
## tempEnvironment7 = gp.extent
## gp.extent = Ras.Extent
## #gp.ExtractByAttributes_sa(OutRaster2, InWhereClause, OutRaster3)
## gp.ExtractByMask_sa(OutRaster2, OutRaster3, OutRaster4)
## gp.delete_management(IntRaster)
## #gp.delete_management(OutRaster)
## gp.delete_management(OutRaster2)
## gp.delete_management(OutRaster3)
## print "next one"
## except:
## # Print error message if an error occurs
## print gp.GetMessages()
######### Fifth Section: Creates new mosaic
##for zone in zones:
## print zone
## for name in names:
## print name
## gp.WorkSpace ='C:\\working\\FPA\\MT_20090608\\test\\out'
## OutWorkspace ='C:\\working\\FPA\\MT_20090608\\test\\out'
## OutRaster4 = 'C:\\working\\FPA\\MT_20090608\\test\\out\\%s%s_f3' % (zone, name)
## rasters = gp.ListRasters("*%s%s_f3*"%(zone, name), "GRID")
## raster = rasters.next()
## namegrid = '%s%s_f3'%(zone, name)
## mystring=""
## while raster:
## mystring+=(raster) + "; "
## raster = rasters.next()
## else:
## mystring=mystring.rstrip("; ") # r meaning strip the ; on the right
## gp.MosaicToNewRaster_management(mystring, OutWorkspace, namegrid, "#", "#", "#", "#", "FIRST", "#")
## print 'mosaic complete!'
| [
"[email protected]"
] | |
5152b34d823d6a11128d9e66f1e9e53e70047cb9 | d2c4934325f5ddd567963e7bd2bdc0673f92bc40 | /tests/model_control/detailed/transf_BoxCox/model_control_one_enabled_BoxCox_PolyTrend_NoCycle_AR.py | b8ab652e2b3ff4f4365950f4e030704063914475 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | jmabry/pyaf | 797acdd585842474ff4ae1d9db5606877252d9b8 | afbc15a851a2445a7824bf255af612dc429265af | refs/heads/master | 2020-03-20T02:14:12.597970 | 2018-12-17T22:08:11 | 2018-12-17T22:08:11 | 137,104,552 | 0 | 0 | BSD-3-Clause | 2018-12-17T22:08:12 | 2018-06-12T17:15:43 | Python | UTF-8 | Python | false | false | 151 | py | import pyaf.tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['BoxCox'] , ['PolyTrend'] , ['NoCycle'] , ['AR'] ); | [
"[email protected]"
] | |
6f353343ebfd3b1bb1b40a1c028e27b4db514c59 | 7a550d2268bc4bc7e2fec608ffb1db4b2e5e94a0 | /1401-1500/1475-Final Prices With a Special Discount in a Shop/1475-Final Prices With a Special Discount in a Shop.py | 4b181402e0898044f9b11bafe9ab82e4848e82b7 | [
"MIT"
] | permissive | jiadaizhao/LeetCode | be31bd0db50cc6835d9c9eff8e0175747098afc6 | 4ddea0a532fe7c5d053ffbd6870174ec99fc2d60 | refs/heads/master | 2021-11-05T04:38:47.252590 | 2021-10-31T09:54:53 | 2021-10-31T09:54:53 | 99,655,604 | 52 | 28 | MIT | 2020-10-02T12:47:47 | 2017-08-08T05:57:26 | C++ | UTF-8 | Python | false | false | 362 | py | class Solution:
def finalPrices(self, prices: List[int]) -> List[int]:
n = len(prices)
result = [0] * n
St = []
for i in range(n - 1, -1, -1):
while St and prices[i] < St[-1]:
St.pop()
result[i] = prices[i] - (St[-1] if St else 0)
St.append(prices[i])
return result
| [
"[email protected]"
] | |
ec8f221cde5fdf597b6c8d7493464006d316c717 | d2dda11e125068512c5c0db0f24b80bc53c94ce3 | /LeetCode/Ex0/Ex88.py | 730a58387b0e8f11def225a9734664b6064cfec2 | [] | no_license | JasonVann/CrackingCodingInterview | f90163bcd37e08f6a41525f9f95663d5f42dd8e6 | 8f9327a1879949f61b462cc6c82e00e7c27b8b07 | refs/heads/master | 2021-09-02T09:28:34.553704 | 2018-01-01T12:05:12 | 2018-01-01T12:05:12 | 110,519,907 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 813 | py | class Ex88(object):
def merge(self, nums1, m, nums2, n):
"""
:type nums1: List[int]
:type m: int
:type nums2: List[int]
:type n: int
:rtype: void Do not return anything, modify nums1 in-place instead.
"""
k = m + n - 1
i = m - 1
j = n - 1
while j >= 0 and i >= 0:
#print i, j, k
if nums1[i] > nums2[j]:
nums1[k] = nums1[i]
i -= 1
k -= 1
else :
nums1[k] = nums2[j]
k -= 1
j -= 1
#print 'b', nums1, i, j, k
if k >= 0 and j >= 0:
nums1[:k+1] = nums2[:j+1]
return nums1
ex88 = Ex88()
nums1=[2,0]
nums2=[1]
print 88, ex88.merge(nums1, 1, nums2, 1)
| [
"[email protected]"
] | |
e4e5b5b777d0c2876ccd93f0e0687824c3d9acc0 | 53706aea0f1358c3589b9afa8a94f1c902c2c494 | /algorithms/tmax/apps/record_trajectory_manually.py | 29adb24df33967fa011df4579ea3ccccb99c3b98 | [
"MIT"
] | permissive | alex-petrenko/landmark-exploration | 6df0eae63ba501c3509e1264f8f99101ff0df345 | faaeff84176de34e2ab4c18b24ee022bd069299e | refs/heads/master | 2021-06-28T16:43:41.299431 | 2019-06-02T00:09:09 | 2019-06-02T00:09:09 | 164,031,073 | 4 | 1 | MIT | 2021-06-18T20:53:45 | 2019-01-03T22:53:03 | Python | UTF-8 | Python | false | false | 3,668 | py | import datetime
import pickle
import sys
import time
from os.path import join
from threading import Thread
from pynput.keyboard import Key, Listener
from algorithms.utils.algo_utils import main_observation
from algorithms.utils.env_wrappers import reset_with_info
from algorithms.tmax.agent_tmax import AgentTMAX
from algorithms.tmax.tmax_utils import parse_args_tmax
from algorithms.topological_maps.topological_map import TopologicalMap
from algorithms.utils.trajectory import Trajectory
from utils.envs.atari import atari_utils
from utils.envs.doom import doom_utils
from utils.envs.envs import create_env
from utils.envs.generate_env_map import generate_env_map
from utils.timing import Timing
from utils.utils import log, ensure_dir_exists
terminate = False
current_actions = []
key_to_action = None
# noinspection PyCallingNonCallable
def on_press(key):
if key == Key.esc:
global terminate
terminate = True
return False
global current_actions
action = key_to_action(key)
if action is not None:
if action not in current_actions:
current_actions.append(action)
# noinspection PyCallingNonCallable
def on_release(key):
global current_actions
action = key_to_action(key)
if action is not None:
if action in current_actions:
current_actions.remove(action)
def record_trajectory(params, env_id):
def make_env_func():
e = create_env(env_id, skip_frames=True)
e.seed(0)
return e
env = make_env_func()
map_img, coord_limits = generate_env_map(make_env_func)
env_obs, info = reset_with_info(env)
obs = main_observation(env_obs)
done = False
m = TopologicalMap(obs, directed_graph=False, initial_info=info, verbose=True)
trajectory = Trajectory(env_idx=-1)
frame = 0
t = Timing()
while not done and not terminate:
with t.timeit('one_frame'):
env.render()
if len(current_actions) > 0:
action = current_actions[-1]
else:
action = 0
trajectory.add(obs, action, info)
m.add_landmark(obs, info, update_curr_landmark=True)
env_obs, rew, done, info = env.step(action)
obs = main_observation(env_obs)
took_seconds = t.one_frame
desired_fps = 15
wait_seconds = (1.0 / desired_fps) - took_seconds
wait_seconds = max(0.0, wait_seconds)
time.sleep(wait_seconds)
frame += 1
env.render()
time.sleep(0.2)
trajectory_dir = trajectory.save(params.experiment_dir())
m.save_checkpoint(trajectory_dir, map_img=map_img, coord_limits=coord_limits, verbose=True)
env.close()
return 0
def main():
args, params = parse_args_tmax(AgentTMAX.Params)
env_id = args.env
global key_to_action
if 'dmlab' in env_id:
from utils.envs.dmlab import play_dmlab
key_to_action = play_dmlab.key_to_action
elif 'atari' in env_id:
key_to_action = atari_utils.key_to_action
elif 'doom' in env_id:
key_to_action = doom_utils.key_to_action
else:
raise Exception('Unknown env')
# start keypress listener (to pause/resume execution or exit)
def start_listener():
with Listener(on_press=on_press, on_release=on_release) as listener:
listener.join()
listener_thread = Thread(target=start_listener)
listener_thread.start()
status = record_trajectory(params, args.env)
if not terminate:
log.debug('Press ESC to exit...')
listener_thread.join()
return status
if __name__ == '__main__':
sys.exit(main())
| [
"[email protected]"
] | |
522b229ed5441f337e5ffaf6d29ee042868dfd53 | f48512f1f42d55fabc9ab46f448138e771b78c68 | /sphinx/conf.py | d3bc27682716395cd7e2c284cb4cb038eaed7629 | [] | no_license | HussainAther/chipseq | 07871167f894ba612d6ca476b5a6fb37440c682a | 6d12752c15a0368a5d4b40af6f5916d2c04c767f | refs/heads/master | 2020-03-08T07:49:44.997059 | 2018-04-04T03:58:42 | 2018-04-04T03:58:42 | 128,004,481 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,167 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# snakemake documentation build configuration file, created by
# sphinx-quickstart on Thu Jul 27 17:54:40 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'snakemake'
copyright = '2017, Hussain Ather'
author = 'Hussain Ather'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
'donate.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'snakemakedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'snakemake.tex', 'snakemake Documentation',
'Hussain Ather', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'snakemake', 'snakemake Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'snakemake', 'snakemake Documentation',
author, 'snakemake', 'One line description of project.',
'Miscellaneous'),
]
| [
"[email protected]"
] | |
85f2ca2dc3e54d1d6bd9647dad7d3cbe14a37fcb | 3f7d5999bb7e5a75454c8df2c5a8adcd1a8341ff | /tests/unit/mock/procenv.py | a97081a52d2332cb01f5d0017c20c9c3ff26917c | [] | no_license | ansible-collection-migration/ansible.fortios | f7b1a7a0d4b69c832403bee9eb00d99f3be65e74 | edad6448f7ff4da05a6c856b0e7e3becd0460f31 | refs/heads/master | 2020-12-18T13:08:46.739473 | 2020-02-03T22:10:49 | 2020-02-03T22:10:49 | 235,393,556 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,735 | py | # (c) 2016, Matt Davis <[email protected]>
# (c) 2016, Toshio Kuratomi <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import json
from contextlib import contextmanager
from io import BytesIO, StringIO
from ansible_collections.ansible.fortios.tests.unit.compat import unittest
from ansible.module_utils.six import PY3
from ansible.module_utils._text import to_bytes
@contextmanager
def swap_stdin_and_argv(stdin_data='', argv_data=tuple()):
"""
context manager that temporarily masks the test runner's values for stdin and argv
"""
real_stdin = sys.stdin
real_argv = sys.argv
if PY3:
fake_stream = StringIO(stdin_data)
fake_stream.buffer = BytesIO(to_bytes(stdin_data))
else:
fake_stream = BytesIO(to_bytes(stdin_data))
try:
sys.stdin = fake_stream
sys.argv = argv_data
yield
finally:
sys.stdin = real_stdin
sys.argv = real_argv
@contextmanager
def swap_stdout():
"""
context manager that temporarily replaces stdout for tests that need to verify output
"""
old_stdout = sys.stdout
if PY3:
fake_stream = StringIO()
else:
fake_stream = BytesIO()
try:
sys.stdout = fake_stream
yield fake_stream
finally:
sys.stdout = old_stdout
class ModuleTestCase(unittest.TestCase):
def setUp(self, module_args=None):
if module_args is None:
module_args = {'_ansible_remote_tmp': '/tmp', '_ansible_keep_remote_files': False}
args = json.dumps(dict(ANSIBLE_MODULE_ARGS=module_args))
# unittest doesn't have a clean place to use a context manager, so we have to enter/exit manually
self.stdin_swap = swap_stdin_and_argv(stdin_data=args)
self.stdin_swap.__enter__()
def tearDown(self):
# unittest doesn't have a clean place to use a context manager, so we have to enter/exit manually
self.stdin_swap.__exit__(None, None, None)
| [
"[email protected]"
] | |
77ceb0223c09fd8f4e1e20c731296bf4051c99b9 | f4fce41f2b3cba606d2a36075de356434602d1c0 | /xwing_rulebook/rules/migrations/0005_auto_20170114_1838.py | dacc057a40679a2cab8dc474b609d6614fdb0e31 | [] | no_license | lvisintini/xwing-rulebook | 24a392c12a2b13027e7cf65b9cc41e8a21585e3c | 6dc7ac58f962a4928843364dcfc077638384dc16 | refs/heads/master | 2021-01-22T19:41:32.960048 | 2018-07-25T10:53:35 | 2018-07-25T10:53:35 | 85,224,617 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 542 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-14 18:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rules', '0004_auto_20170114_0010'),
]
operations = [
migrations.AlterField(
model_name='content',
name='file',
field=models.FilePathField(blank=True, null=True, path='/home/lvisintini/src/xwing-rulebook/xwing_rulebook/static', recursive=True),
),
]
| [
"[email protected]"
] | |
7d538662625104748e0d8ce3db20225b2e8cb6a1 | 30ec40dd6a81dbee73e7f14c144e20495960e565 | /kubernetes/test/test_v1_persistent_volume.py | 3f2fe9cff5949e5488c0fb3e07e8efbe740227a8 | [
"Apache-2.0"
] | permissive | jonathan-kosgei/client-python | ae5a46968bcee19a3c62e1cefe227131ac9e7200 | 4729e6865d810824cafa312b4d06dfdb2d4cdb54 | refs/heads/master | 2021-01-20T14:59:10.435626 | 2017-05-08T16:55:51 | 2017-05-08T16:55:51 | 90,700,132 | 1 | 0 | null | 2017-05-09T03:50:42 | 2017-05-09T03:50:42 | null | UTF-8 | Python | false | false | 883 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.6.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_persistent_volume import V1PersistentVolume
class TestV1PersistentVolume(unittest.TestCase):
""" V1PersistentVolume unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1PersistentVolume(self):
"""
Test V1PersistentVolume
"""
model = kubernetes.client.models.v1_persistent_volume.V1PersistentVolume()
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
60478f86d50f53b1d361c8a21deef2a43bb1ef65 | 6c33e4baeda15398a910382ed64f646825591dd0 | /run_project/02_Pima_Indian.py | 893c61f5e55315aec58778663dd56e783fb4b587 | [] | no_license | ss820938ss/pythonProject_deeplearning | 0ad26797299df1eb5b549bd2a5309502d58a495c | baa55f492f07da955b45573ac52d7f61f2d0ee0d | refs/heads/master | 2023-07-09T15:50:37.319896 | 2021-08-10T05:15:46 | 2021-08-10T05:15:46 | 391,771,606 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,308 | py | # pandas 라이브러리를 불러옵니다.
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# 피마 인디언 당뇨병 데이터셋을 불러옵니다. 불러올 때 각 컬럼에 해당하는 이름을 지정합니다.
df = pd.read_csv('../dataset/pima-indians-diabetes.csv',
names=["pregnant", "plasma", "pressure", "thickness", "insulin", "BMI", "pedigree", "age", "class"])
# 처음 5줄을 봅니다.
print(df.head(5))
# 데이터의 전반적인 정보를 확인해 봅니다.
print(df.info())
# 각 정보별 특징을 좀더 자세히 출력합니다.
print(df.describe())
# 데이터 중 임신 정보와 클래스 만을 출력해 봅니다.
print(df[['plasma', 'class']])
# 데이터 간의 상관관계를 그래프로 표현해 봅니다.
colormap = plt.cm.gist_heat # 그래프의 색상 구성을 정합니다.
plt.figure(figsize=(12, 12)) # 그래프의 크기를 정합니다.
# 그래프의 속성을 결정합니다. vmax의 값을 0.5로 지정해 0.5에 가까울 수록 밝은 색으로 표시되게 합니다.
sns.heatmap(df.corr(), linewidths=0.1, vmax=0.5, cmap=colormap, linecolor='white', annot=True)
plt.show()
grid = sns.FacetGrid(df, col='class')
grid.map(plt.hist, 'plasma', bins=10)
plt.show()
# 딥러닝을 구동하는 데 필요한 케라스 함수를 불러옵니다.
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
# 필요한 라이브러리를 불러옵니다.
import numpy
import tensorflow as tf
# 실행할 때마다 같은 결과를 출력하기 위해 설정하는 부분입니다.
numpy.random.seed(3)
tf.random.set_seed(3)
# 데이터를 불러 옵니다.
dataset = numpy.loadtxt("../dataset/pima-indians-diabetes.csv", delimiter=",")
X = dataset[:, 0:8]
Y = dataset[:, 8]
# 모델을 설정합니다.
model = Sequential()
model.add(Dense(12, input_dim=8, activation='relu'))
model.add(Dense(8, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
# 모델을 컴파일합니다.
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
# 모델을 실행합니다.
model.fit(X, Y, epochs=200, batch_size=10)
# 결과를 출력합니다.
print("\n Accuracy: %.4f" % (model.evaluate(X, Y)[1]))
| [
"[email protected]"
] | |
98d4948b986f60c3d4fb60f286e763e24192f924 | 71b11008ab0455dd9fd2c47107f8a27e08febb27 | /09、UI自动化测试及黑马头条项目实战/day08/03代码/等待操作.py | 2c49f02ff15dd038da5d5fa6df7401ffbae20dc0 | [] | no_license | zmh19941223/heimatest2021 | 49ce328f8ce763df0dd67ed1d26eb553fd9e7da4 | 3d2e9e3551a199bda9945df2b957a9bc70d78f64 | refs/heads/main | 2023-08-25T17:03:31.519976 | 2021-10-18T05:07:03 | 2021-10-18T05:07:03 | 418,348,201 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,090 | py | import time
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from selenium.webdriver.common.by import By
from utils import get_element, input_text
des_cap = {
"platformName" : "android" , #表示的是android 或者ios
"platformVersion" : "5.1.1", #表示的是平台系统的版本号
"deviceName" : "****", #表示的是设备的ID名称(如果只有一个设备可以用****来代替)
"appPackage" : "com.android.settings", #表示app的包名
"appActivity" : ".Settings", #表示的是app的界面名
"resetKeyboard": True, # 重置设备的输入键盘
"unicodeKeyboard": True # 采用unicode编码输入
####"".module_main.activity.MainActivity""
} #定义字典参数
driver = webdriver.Remote("http://localhost:4723/wd/hub", des_cap)
# 找到wlan元素
wlan_btn = By.XPATH, "//*[@text='WLAN']"
get_element(driver, wlan_btn).click()
time.sleep(2)
# 通过等待及按下和抬起实现长按的操作
TouchAction(driver).press(x=467, y=569).wait(3000).release().perform()
time.sleep(3)
driver.quit()
| [
"[email protected]"
] | |
6effd0d2562d816e58d889808b24eb4eba7e3903 | 77900cdd9a815caf1cd04705321ca93f5072179f | /Project2/Project2/.history/blog/models_20211114171729.py | abf6466b64bc0655018e82ba7b33ac84f8f2a026 | [] | no_license | Bom19990111/helloword_python | 717799d994223d65de5adaeabecf396ff2bc1fb7 | 2ee2e67a60043f03c1ce4b070470c7d2dcdc72a7 | refs/heads/master | 2023-09-06T04:17:02.057628 | 2021-11-21T20:00:46 | 2021-11-21T20:00:46 | 407,063,273 | 0 | 1 | null | 2021-11-21T20:00:47 | 2021-09-16T07:18:35 | Python | UTF-8 | Python | false | false | 378 | py | from django.db import models
# Create your models here.
class Blog(models.Model):
title = models.CharField(max_length=250, blank=True)
slug = models.SlugField(max_length=250, blank=True)
author = models.ForeignKey(User, on_delete=models.CASCADE)
description = models.TextField()
date = models.DateField()
def __str__(self):
return self.title
| [
"[email protected]"
] | |
29d01b80c275af77e63f5fcd122ffecdf2cc0a5c | e4839f9c32cb57a2b907f2b3eb05f87b7cdcd9e5 | /templates/login/kamran.py | d2b1b5983fde4557ae6b6f00fd9127123f826e74 | [] | no_license | kalantarlikamran2/ali | 4fe900cfa67f40b3db1e2b850383871505e5249f | 413ece9ed5c3c45281dfbf3d5ce17af1da787e4a | refs/heads/main | 2023-01-03T15:51:35.826027 | 2020-11-04T20:46:29 | 2020-11-04T20:46:29 | 308,950,996 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 115 | py | `
class ilkUygulama(App):
def build(self):
return Label(text='Merhaba Dünya!')
ilkUygulama().run() | [
"[email protected]"
] | |
e46bcceeb1e79b9edce031b21c399f957deb42c3 | 1843fd5ccb4377240e664acd21ba5a9369eca2ab | /bluebottle/cms/migrations/0052_auto_20171027_1419.py | f9984292ed4b5065b5abffe1a367865208d217c1 | [
"BSD-2-Clause"
] | permissive | raux/bluebottle | ba2e576cebcb6835065004c410b22bd8a6b9ee29 | 49d92b5deb289c1539f99122abc20f845577b879 | refs/heads/master | 2020-03-27T03:20:11.465491 | 2018-08-23T13:09:25 | 2018-08-23T13:09:25 | 145,854,614 | 0 | 0 | null | 2018-08-23T13:05:00 | 2018-08-23T13:04:59 | null | UTF-8 | Python | false | false | 513 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2017-10-27 12:19
from __future__ import unicode_literals
from django.db import migrations, connection
from django.core.management import call_command
def migrate_homepage(apps, schema_editor):
call_command('migrate_homepage', tenant=connection.tenant.schema_name)
class Migration(migrations.Migration):
dependencies = [
('cms', '0051_auto_20171024_1631'),
]
operations = [
migrations.RunPython(migrate_homepage)
]
| [
"[email protected]"
] | |
715184433e0880b06cffd93aafcb0ed70d537c8d | 930822ded3de346524648244a6f8edc3e7a2a038 | /leetcode/maxProfit.py | 876984ef35022d6239ba77600de1160b2c0cb8db | [] | no_license | xy2333/Leetcode | 4ad317fa21d3b3c37859e76b25a87993c22ca1b2 | 5915e039868527d624ee4f0ad431d23c6ed2d8bd | refs/heads/master | 2020-06-19T13:30:14.755534 | 2019-08-12T02:12:03 | 2019-08-12T02:12:03 | 196,726,468 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 279 | py | class Solution:
def maxProfit(self, prices):
if len(prices) <= 1:
return 0
maxprofit = 0
minrise = prices[0]
for i in range(1,len(prices)):
if prices[i] < minrise:
minrise = prices[i]
else:
maxprofit = max(maxprofit,prices[i]-minrise)
return maxprofit | [
"[email protected]"
] | |
4fe953b347177ae6fed8e7472e954e208a94074c | 4984a934467631ffe35dd277594184671284d5f6 | /Python: Create a Grid/Create a Grid 05/graphics_environment.py | 9a989e310b7cb8d3e4ba4501dd2d8cd2254679c0 | [
"MIT"
] | permissive | poly451/Tutorials | 8ce2a9d1166f7d63b1ce1e92bded2da16a7d29ef | 27ca5dc09bc69d2bced44cddbe4e7fd201c0e66f | refs/heads/master | 2022-05-17T06:42:16.833986 | 2022-05-10T02:24:54 | 2022-05-10T02:24:54 | 183,793,357 | 16 | 24 | null | null | null | null | UTF-8 | Python | false | false | 56,529 | py | import sys, os
import pygame
import constants
import utils
# -----------------------------------------------------------
# class Event
# -----------------------------------------------------------
class Event:
def __init__(self, mydict):
# print("mydict: {}".format(mydict))
self.index = mydict["index"]
if not type(self.index) == type(123):
raise ValueError("Error!")
self.condition = mydict["condition"]
s = mydict["value"].lower().strip()
if s == "true":
self._is_fulfillled = True
elif s == "false":
self._is_fulfillled = False
else:
raise ValueError("Error")
# ----
@property
def is_fulfilled(self):
print("Getting value Event.has_been_done. Is: {}".format(self._is_fulfillled))
return self._is_fulfillled
@is_fulfilled.setter
def is_fulfilled(self, new_value):
print("_is_fulfilled being set to {}".format(new_value))
if not new_value in [True, False]:
raise ValueError("Error!")
self._is_fulfillled = new_value
@is_fulfilled.deleter
def is_fulfilled(self):
del self._is_fulfillled
def fileline(self):
s = "index: {}\ncondition: {}\nvalue: {}\n"
s = s.format(self.index, self.condition, self.is_fulfilled)
return s
def debug_print(self):
s = "index: {}, condition: {}, value: {} ({})"
s = s.format(self.index, self.condition, self.is_fulfilled, type(self.is_fulfilled))
print(s)
# -----------------------------------------------------------
# class Events
# -----------------------------------------------------------
class Events:
def __init__(self, zone_name, map_name):
self.zone_name = zone_name
self.map_name = map_name
self.events = []
def read_data(self):
filepath = os.path.join("data", "zones", self.zone_name, self.map_name, "events.txt")
print("filepath: {}".format(filepath))
if utils.file_is_empty(filepath) == True:
return False
mylist = utils.read_data_file(filepath, 3)
for a_dict in mylist:
# print(a_dict)
new_elem = Event(a_dict)
self.events.append(new_elem)
def save_data(self):
s = ""
for elem in self.events:
s += "{}\n".format(elem.fileline())
print("debug map_name: {}".format(self.map_name))
print("debug s: {}".format(s))
# if self.map_name == "map01":
# raise NotImplemented
# else:
filepath = os.path.join("data", "zones", self.zone_name, self.map_name, "events.txt")
with open(filepath, "w") as f:
f.write(s)
def set_a_value(self, condition, value):
if not value in [False, True]:
raise ValueError("Error")
# ----
"""Looks up whether a particular condition in events.txt was satisfied."""
for elem in self.events:
# print("key: -{}-, condition: -{}-".format(elem.condition, condition))
if elem.condition == condition:
elem.is_fulfilled = value
return True
return False
def get_a_value(self, condition):
if len(condition) == 0:
raise ValueError("Error!")
# ----
for elem in self.events:
if elem.condition == condition:
return elem.is_fulfilled
return None
def debug_print(self):
for elem in self.events:
elem.debug_print()
# -----------------------------------------------------------
# class Action
# -----------------------------------------------------------
class Action(pygame.sprite.Sprite):
def __init__(self, x, y, name):
super().__init__()
self.x, self.y = x, y
self.name = name
self.command = ""
self.image_display = ""
self.data = ""
self.inventory_condition = ""
self.game_condition = ""
self.dialog_text = ""
self.comment = ""
self.completed = False
# ----
self.image = None
self.rect = None
def read_data(self, zone_name, map_name):
filepath = os.path.join("data", "zones", zone_name, map_name, "actions.txt")
mylines = utils.read_data_file(filepath, 8)
if mylines is None or len(mylines) == 0:
raise ValueError("Error!")
# ----
target_dict = {}
for elem in mylines:
if elem["name"] == self.name:
target_dict = elem
if len(target_dict) == 0:
s = "The name {} was not found in {}".format(self.name, target_dict)
raise ValueError(s)
self.command = target_dict["command"]
if not self.command in constants.MAP_COMMANDS:
raise ValueError("Error! {} is not in {}".format(target_dict["command"], constants.MAP_COMMANDS))
self.image_display = target_dict["image_display"]
self.data = target_dict["data"]
self.inventory_condition = target_dict["inventory_condition"]
if self.inventory_condition == "none":
self.inventory_condition = None
# Need to be able to check that the player has successfully
# completed a conversation.
# Perhaps also check to see that the conversation is in the
# events file.
self.game_condition = target_dict["game_condition"].lower().strip()
if self.game_condition == "none":
self.game_condition = None
self.dialog_text = target_dict["dialog_text"]
self.comment = target_dict["comment"]
self.completed = False
# ----
self.image_display = self.image_display.replace(" ", "_")
if self.image_display.find(".png") == -1:
self.image_display = "{}.png".format(self.image_display)
filepath = utils.get_filepath(self.image_display)
if filepath is None:
s = "I wasn't able to find a path for the file: {}".format(self.image_display)
raise ValueError(s)
try:
self.image = pygame.image.load(filepath).convert_alpha()
except Exception as e:
print(e)
s = "Couldn't open: {}".format(filepath)
raise ValueError(s)
self.image = pygame.transform.scale(self.image, (constants.TILESIZE, constants.TILESIZE))
self.rect = self.image.get_rect()
self.rect = self.rect.move(self.x * constants.TILESIZE, self.y * constants.TILESIZE)
# ----
def conditions_passed(self, inventory, events):
if self.inventory_condition in ["any", None]:
if self.game_condition in ["any", None]:
print("There was no inventory or game condition.")
return True
# ----
if not self.inventory_condition in ["any", None]:
print("There is an inventory condition: {}".format(self.inventory_condition))
the_item = inventory.get_item_by_name(self.inventory_condition)
if the_item is None:
print("the item ({}) was not found in the inventory".format(self.inventory_condition))
return False
if not self.game_condition in ["any", None]:
print("There is a game condition: {}".format(self.game_condition))
this_value = events.get_a_value(self.game_condition)
if this_value is None:
print("An event that matched this game condition was not found.")
raise ValueError("Error!")
return this_value
else:
return True
# ---------------------
def parse_data(self):
print("data string: {}".format(self.data))
mylist = self.data.split(";")
mylist = [i.strip() for i in mylist if len(i.strip()) > 0]
print("mylist: {}".format(mylist))
mydict = {}
mydict = utils.key_value(mylist[0], mydict)
mydict = utils.key_value(mylist[1], mydict)
return mydict
def display_text(self):
return self.dialog_text
def debug_print(self):
s = "x,y: ({},{}), name: {}, command: {}, image_display: {}, data: {}, inventory_condition: {}, " \
"game_condition: {}, dialog_text: {}, comment: {}, completed: {}"
s = s.format(self.x, self.y, self.name, self.command, self.image_display, self.data,
self.inventory_condition, self.game_condition, self.dialog_text, self.comment, self.completed)
print(s)
# -----------------------------------------------------------
# class Actions
# -----------------------------------------------------------
class Actions:
def __init__(self, zone_name, map_name):
self.zone_name = zone_name
self.map_name = map_name
self.actions = []
self.keep_looping = True
self.all_sprites = pygame.sprite.Group()
self.init_pygame()
def init_pygame(self):
pygame.init()
self.BG_COLOR = constants.BG_COLOR
self.clock = pygame.time.Clock()
pygame.display.set_caption("Enter {}".format(constants.TITLE))
self.screen = pygame.display.set_mode((constants.SCREEN_WIDTH, constants.SCREEN_HEIGHT))
self.font = pygame.font.Font(None, 40)
# self.font = pygame.font.SysFont(constants.FONT_NAME, constants.FONT_SIZE)
def read_data(self):
self._load_map()
#################
# Class Actions #
#################
def _load_map(self):
print("Loading _load_map")
filename = "{}_actions.txt".format(self.map_name)
filepath = os.path.join("data", "zones", self.zone_name, self.map_name, filename)
print("opening zone filepath: {}".format(filepath))
with open(filepath, "r") as f:
mytiles = f.readlines()
mytiles = [i.strip() for i in mytiles if len(i.strip()) > 0]
mytiles = [i[3:] for i in mytiles[2:]]
# ------------------------------------------------------------------
self.obstacles = []
for col, tiles in enumerate(mytiles):
print(tiles)
list_tiles = tiles.split(";")
list_tiles = [i.strip() for i in list_tiles if len(i.strip()) > 0]
for row, tile in enumerate(list_tiles):
print(tile)
if tile == "..":
pass
elif len(tile) > 0:
new_action = Action(row, col, tile)
new_action.read_data(self.zone_name, self.map_name)
self.actions.append(new_action)
else:
s = "Error! I don't recognize this: -{}-".format(tile)
raise ValueError(s)
def conditions_passed(self, player, events):
current_action = self.get_action(player.x, player.y)
if current_action is None:
# We should not have reached this point unless there was an action
# on the player's tile.
raise ValueError("Error!")
current_action.debug_print()
return current_action.conditions_passed(player.inventory, events)
def remove_tile(self, x, y):
print("Number of actions BEFORE: {}".format(len(self.actions)))
mylist = []
self.all_sprites = pygame.sprite.Group()
for elem in self.actions:
if elem.x == x and elem.y == y:
pass
else:
mylist.append(elem)
print("Number of actions AFTER: {}".format(len(mylist)))
self.actions = mylist
def get_action(self, x, y):
print(self.actions)
if self.actions is None:
raise ValueError("Error!")
if len(self.actions) == 0:
raise ValueError("Error!")
# ----
for action in self.actions:
if action.x == x and action.y == y:
return action
return None
# ---------------------------------------------------
def handle_events(self):
# catch all events here
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.keep_looping = False
return True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
self.keep_looping = False
return True
else:
print("I don't recognize this event.key in handle_events: {}".format(event.key))
# ------------------------------------------------------
def update_classes(self, all_sprites):
for elem in self.actions:
all_sprites.add(elem)
return all_sprites
def draw(self):
self.screen.fill(self.BG_COLOR)
self.update_classes(self.all_sprites)
# ----
self.all_sprites.update()
self.all_sprites.draw(self.screen)
# ----
pygame.display.flip()
def main(self):
while self.keep_looping == True:
self.handle_events()
self.draw()
# ------------------------------------------------------
def debug_print(self):
for elem in self.actions:
elem.debug_print()
def debug_test(self):
for an_action in self.actions:
if len(an_action.game_condition) == 0:
return False
return True
# -----------------------------------------------------------
# class Trigger
# -----------------------------------------------------------
class Trigger:
def __init__(self, x, y, name):
self.x, self.y = x, y
self.name = name
self.command = ""
self.data = ""
self.inventory_condition = ""
self.game_condition = ""
self.comment = ""
self.completed = False
def read_data(self, zone_name, map_name):
filepath = os.path.join("data", "zones", zone_name, map_name, "triggers.txt")
print("filepath: {}".format(filepath))
mylines = utils.read_data_file(filepath, 6)
if mylines is None: return False
if len(mylines) == 0: return False
# print("mylines: {}".format(mylines))
# ----
was_entered = False
for mydict in mylines:
# print("mydict: {}".format(mydict))
# print("self.name: -{}- == mydict[name]: -{}-".format(self.name, mydict["name"]))
# print("-{}=={}-".format(self.name, mydict["name"]))
if self.name == mydict["name"].replace(":", ""):
was_entered = True
self.command = mydict["command"]
if not self.command in constants.MAP_COMMANDS:
raise ValueError("{} not in {}".format(self.command, constants.MAP_COMMANDS))
self.data = mydict["data"]
self.inventory_condition = mydict["inventory_condition"].lower().strip()
if self.inventory_condition == "none": self.inventory_condition = None
self.game_condition = mydict["game_condition"]
if self.game_condition == "none": self.game_condition = None
self.comment = mydict["comment"]
if was_entered == False:
raise ValueError("Error!")
def parse_data(self):
# print("data string: {}".format(self.data))
if self.data.find("zone_name") == -1 or self.data.find("map_name") == -1:
s = "current_trigger.data: {}".format(self.data)
raise ValueError(s)
mylist = self.data.split(";")
mylist = [i.strip() for i in mylist if len(i.strip()) > 0]
mydict = {}
# print(mylist)
mydict = utils.key_value(mylist[0], mydict)
mydict = utils.key_value(mylist[1], mydict)
return mydict
def conditions_passed(self, inventory, events):
if self.inventory_condition in ["any", None]:
if self.game_condition in ["any", None]:
print("There was no inventory or game condition.")
return True
# ----
if not self.inventory_condition in ["any", None]:
print("There is an inventory condition: {}".format(self.inventory_condition))
the_item = inventory.get_item_by_name(self.inventory_condition)
if the_item is None:
print("the item ({}) was not found in the inventory".format(self.inventory_condition))
return False
if not self.game_condition in ["any", None]:
print("There is a game condition: {}".format(self.game_condition))
this_value = events.get_a_value(self.game_condition)
if this_value is None:
print("An event that matched this game condition was not found.")
raise ValueError("Error!")
return this_value
else:
return True
def debug_print(self):
s = "x,y: ({},{}), name: {}, command: {}, data: {}, " \
"inventory_condition: {}, game_condition: {}, comment: {}, completed: {}"
s = s.format(self.x, self.y, self.name, self.command,
self.data, self.inventory_condition, self.game_condition, self.comment, self.completed)
print(s)
# -----------------------------------------------------------
# class Triggers
# -----------------------------------------------------------
class Triggers:
def __init__(self, zone_name, map_name):
self.zone_name = zone_name
self.map_name = map_name
self.triggers = []
def read_data(self):
self.load_map()
def load_map(self):
filename = "{}_triggers.txt".format(self.map_name)
filepath = os.path.join("data", "zones", self.zone_name, self.map_name, filename)
# print("opening zone filepath: {}".format(filepath))
with open(filepath, "r") as f:
mytiles = f.readlines()
mytiles = [i.strip() for i in mytiles if len(i.strip()) > 0]
# mytiles = mytiles[3:]
# [print(i) for i in mytiles]
mytiles = [i[3:] for i in mytiles[2:]]
# [print(i) for i in mytiles]
# ------------------------------------------------------------------
for col, tiles in enumerate(mytiles):
list_tiles = tiles.split(";")
list_tiles = [i.strip() for i in list_tiles if len(i.strip()) > 0]
for row, tile in enumerate(list_tiles):
# print("tile: {}".format(tiles))
# print("row: {}, tile: {}".format(row, tile))
if tile != "..":
# if tile.find("a") == -1:
new_trigger = Trigger(row, col, tile)
new_trigger.read_data(self.zone_name, self.map_name)
# new_trigger.debug_print()
self.triggers.append(new_trigger)
elif tile == "..":
pass
else:
s = "Error! I don't recognize this: {}".format(tile)
raise ValueError(s)
def get_trigger(self, x, y):
for trigger in self.triggers:
if trigger.x == x:
if trigger.y == y:
return trigger
return None
def conditions_passed(self, player, events):
current_trigger = self.get_trigger(player.x, player.y)
if current_trigger is None:
# We should not have reached this point unless there was an action
# on the player's tile.
raise ValueError("Error!")
# current_trigger.debug_print()
return current_trigger.conditions_passed(player.inventory, events)
def debug_print(self):
for elem in self.triggers:
elem.debug_print()
def debug_test(self):
for a_trigger in self.triggers:
if not a_trigger.game_condition is None:
if len(a_trigger.game_condition) == 0:
return False
return True
def __len__(self):
return len(self.triggers)
# -----------------------------------------------------------
# class Walkable
# -----------------------------------------------------------
"""
As you can see, class Grass uses inheritance. We do this so that
we can add this class--which is now a subclass of the pygame.sprite.Sprite
class and so, now, is itself a Sprite--to a pygame.sprite.Group.
If none of that makes any sense to you, don't worry!
I would recommend that you start using inheritance and,
as you see how it works, you will come
to understand it. And, please, ask questions! Ask me, ask on
Stack Overflow (https://stackoverflow.com/) or even Twitter.
"""
class Walkable(pygame.sprite.Sprite):
def __init__(self, mydict):
super().__init__()
self.x = mydict["x"]
self.y = mydict["y"]
self.kind = mydict["kind"]
self.species = mydict["species"]
# self.trigger = mydict["trigger"]
self.image_filename = ""
self.image = None
self.rect = None
self.comment = ""
# ----
if self.kind == "npc_dead" and self.species == 0:
self.image_filename = utils.get_filepath("dead_person_cobblestones.png")
elif self.kind == "npc_dead" and self.species == 1:
self.image_filename = utils.get_filepath("dead_person_light_green_tile.png")
elif self.kind == "chair" and self.species == 1:
self.image_filename = utils.get_filepath("chair_right_light_green_tile.png")
elif self.kind == "chair" and self.species == 2:
self.image_filename = utils.get_filepath("chair_left_light_green_tile.png")
elif self.kind == "chair" and self.species == 3:
self.image_filename = utils.get_filepath("chair_down_light_green_tile.png")
elif self.kind == "chair" and self.species == 4:
self.image_filename = utils.get_filepath("chair_up_light_green_tile.png")
elif self.kind == "tile" and self.species == 0:
self.image_filename = utils.get_filepath("floor_tile_light_green.png")
elif self.kind == "cobblestones" and self.species == 0:
self.image_filename = utils.get_filepath("cobblestones11.png")
elif self.kind == "cobblestones" and self.species == 1:
self.image_filename = utils.get_filepath("cobblestones11_light01.png")
elif self.kind == "cobblestones" and self.species == 2:
self.image_filename = utils.get_filepath("cobblestones11_light02.png")
elif self.kind == "portal" and self.species == 0:
self.image_filename = utils.get_filepath("portal_cobblestones02.png")
elif self.kind == "grass" and self.species == 0:
self.image_filename = utils.get_filepath("grass02.png")
elif self.kind == "flowers" and self.species == 0:
self.image_filename = utils.get_filepath("flowers01_grass01.png")
elif self.kind == "strawberries" and self.species == 0:
self.image_filename = utils.get_filepath("strawberries01.png")
elif self.kind == "mushrooms" and self.species == 0:
self.image_filename = utils.get_filepath("mushrooms01_grass01.png")
elif self.kind == "forest" and self.species == 0:
self.image_filename = utils.get_filepath("forest_02.png")
elif self.kind == "portal" and self.species == 1:
self.image_filename = utils.get_filepath("portal_grass.png")
elif self.kind == "portal" and self.species == 2:
self.image_filename = utils.get_filepath("portal_grass02.png")
elif self.kind == "portal" and self.species == 3:
self.image_filename = utils.get_filepath("portal_pub01.png")
elif self.kind == "pub" and self.species == 0:
self.image_filename = utils.get_filepath("building09.png")
elif self.kind == "pub" and self.species == 1:
self.image_filename = utils.get_filepath("building09_grass.png")
elif self.kind == "provisioner" and self.species == 0:
self.image_filename = utils.get_filepath("building07.png")
elif self.kind == "provisioner" and self.species == 1:
self.image_filename = utils.get_filepath("building07_grass.png")
elif self.kind == "black" and self.species == 0:
self.image_filename = utils.get_filepath("black.png")
elif self.kind == "empty" and self.species == 0:
self.image_filename = utils.get_filepath("empty.png")
else:
s = "Couldn't find: kind: {}, species: {}".format(self.kind, self.species)
raise ValueError(s)
# ----
# self.filepath = os.path.join("data", "images", self.image_filename)
try:
self.image = pygame.image.load(self.image_filename).convert_alpha()
except:
s = "Couldn't open: {}".format(self.image_filename)
raise ValueError(s)
self.image = pygame.transform.scale(self.image, (constants.TILESIZE, constants.TILESIZE))
self.rect = self.image.get_rect()
self.rect = self.rect.move(self.x * constants.TILESIZE, self.y * constants.TILESIZE)
def _collide(self, dx=0, dy=0, obstacles=None):
for a_tile in obstacles:
if a_tile.x == self.x + dx and a_tile.y == self.y + dy:
return True
return False
def move(self, dx=0, dy=0, obstacles=None):
if not self._collide(dx, dy, obstacles):
self.x += dx
self.y += dy
# self.rect = self.rect.move(self.x * TILESIZE, self.y * TILESIZE)
self.rect = self.rect.move(dx * constants.TILESIZE, dy * constants.TILESIZE)
# print("Player has moved. x,y: {},{}; dx={}, dy={}".format(self.x, self.y, dx, dy))
def debug_print(self):
raise NotImplemented
print("filepath: {}".format(self.filepath))
print("x,y: {},{}".format(self.x, self.y))
# -----------------------------------------------------------
# class Walkables
# -----------------------------------------------------------
class Walkables:
def __init__(self, zone_name, map_name):
if zone_name == None:
raise ValueError("Error!")
if len(zone_name) == 0:
raise ValueError("Error!")
if map_name == None:
raise ValueError("Error!")
if len(map_name) == 0:
raise ValueError("Error!")
self.zone_name = zone_name
self.map_name = map_name
# ----
self.all_sprites = pygame.sprite.Group()
self.init_pygame()
self.loop_index = 0
# self.walkables = self.read_data()
self.walkables = []
self.keep_looping = True
# if self.walkables is None:
# raise ValueError("Doh!")
def read_data(self):
self._load_map()
def _load_map(self):
filename = "{}_walkables.txt".format(self.map_name)
filepath = os.path.join("data", "zones", self.zone_name, self.map_name, filename)
with open(filepath, "r") as f:
mytiles = f.readlines()
mytiles = [i.strip() for i in mytiles if len(i.strip()) > 0]
mytiles = [i[3:] for i in mytiles[2:]]
# [print(i) for i in mytiles]
# ------------------------------------------------------------------
# t1 = False
# t2 = False
big_list = []
for col, tiles in enumerate(mytiles):
tile_list = tiles.split(";")
tile_list = [i.strip() for i in tile_list if len(i.strip()) > 0]
for row, tile in enumerate(tile_list):
# print("tile: {}".format(tile))
if tile == "a0":
mydict = {}
mydict["x"] = row
mydict["y"] = col
mydict["kind"] = "npc_dead"
mydict["species"] = 0
mydict["trigger"] = ""
mywalk = Walkable(mydict)
big_list.append(mywalk)
if tile == "a1":
mydict = {}
mydict["x"] = row
mydict["y"] = col
mydict["kind"] = "npc_dead"
mydict["species"] = 1
mydict["trigger"] = ""
mywalk = Walkable(mydict)
big_list.append(mywalk)
elif tile == "f1":
mydict = {}
mydict["x"] = row
mydict["y"] = col
mydict["kind"] = "chair"
mydict["species"] = 1
mydict["trigger"] = ""
mywalk = Walkable(mydict)
big_list.append(mywalk)
# print("obstacle (wall) tile added at x,y: ({},{})".format(row, col))
elif tile == "f2":
mydict = {}
mydict["x"] = row
mydict["y"] = col
mydict["kind"] = "chair"
mydict["species"] = 2
mydict["trigger"] = ""
mywalk = Walkable(mydict)
big_list.append(mywalk)
elif tile == "f3":
mydict = {}
mydict["x"] = row
mydict["y"] = col
mydict["kind"] = "chair"
mydict["species"] = 3
mydict["trigger"] = ""
mywalk = Walkable(mydict)
big_list.append(mywalk)
elif tile == "f4":
mydict = {}
mydict["x"] = row
mydict["y"] = col
mydict["kind"] = "chair"
mydict["species"] = 4
mydict["trigger"] = ""
mywalk = Walkable(mydict)
big_list.append(mywalk)
elif tile == "c0":
mydict = {}
mydict["x"] = row
mydict["y"] = col
mydict["kind"] = "pub"
mydict["species"] = 0
mydict["trigger"] = ""
mywalk = Walkable(mydict)
big_list.append(mywalk)
elif tile == "c1":
mydict = {}
mydict["x"] = row
mydict["y"] = col
mydict["kind"] = "pub"
mydict["species"] = 1
mydict["trigger"] = ""
mywalk = Walkable(mydict)
big_list.append(mywalk)
elif tile == "e0":
mydict = {}
mydict["x"] = row
mydict["y"] = col
mydict["kind"] = "provisioner"
mydict["species"] = 0
mydict["trigger"] = ""
mywalk = Walkable(mydict)
big_list.append(mywalk)
elif tile == "e1":
mydict = {}
mydict["x"] = row
mydict["y"] = col
mydict["kind"] = "provisioner"
mydict["species"] = 1
mydict["trigger"] = ""
mywalk = Walkable(mydict)
big_list.append(mywalk)
elif tile == "d0":
# cobblestones
mydict = {}
mydict["x"] = row
mydict["y"] = col
mydict["kind"] = "cobblestones"
mydict["species"] = 0
mydict["trigger"] = ""
mywalk = Walkable(mydict)
big_list.append(mywalk)
# if row == 5 and col == 8:
# mydict["comment"] = "load_map;map01"
# t1 = True
# elif row == 6 and col == 8:
# mydict["comment"] = "load_map;map01"
# t2 = True
elif tile == "d1":
mydict = {}
mydict["x"] = row
mydict["y"] = col
mydict["kind"] = "cobblestones"
mydict["species"] = 1
mydict["trigger"] = ""
mywalk = Walkable(mydict)
big_list.append(mywalk)
elif tile == "d2":
mydict = {}
mydict["x"] = row
mydict["y"] = col
mydict["kind"] = "cobblestones"
mydict["species"] = 2
mydict["trigger"] = ""
mywalk = Walkable(mydict)
big_list.append(mywalk)
elif tile == "h0":
mydict = {}
mydict["x"] = row
mydict["y"] = col
mydict["kind"] = "tile"
mydict["species"] = 0
mydict["trigger"] = ""
mywalk = Walkable(mydict)
big_list.append(mywalk)
elif tile == "b0":
mydict = {}
mydict["x"] = row
mydict["y"] = col
mydict["kind"] = "black"
mydict["species"] = 0
mydict["trigger"] = ""
mywalk = Walkable(mydict)
big_list.append(mywalk)
elif tile == "l0":
mydict = {}
mydict["x"] = row
mydict["y"] = col
mydict["kind"] = "portal"
mydict["species"] = 0
mydict["trigger"] = ""
mywalk = Walkable(mydict)
big_list.append(mywalk)
elif tile == "l1":
mydict = {}
mydict["x"] = row
mydict["y"] = col
mydict["kind"] = "portal"
mydict["species"] = 1
mydict["trigger"] = ""
mywalk = Walkable(mydict)
big_list.append(mywalk)
elif tile == "l2":
mydict = {}
mydict["x"] = row
mydict["y"] = col
mydict["kind"] = "portal"
mydict["species"] = 2
mydict["trigger"] = ""
mywalk = Walkable(mydict)
big_list.append(mywalk)
elif tile == "g0":
mydict = {}
mydict["x"], mydict["y"] = row, col
mydict["kind"] = "grass"
mydict["species"] = 0
mydict["trigger"] = ""
mywalk = Walkable(mydict)
big_list.append(mywalk)
elif tile == "m0":
mydict = {}
mydict["x"], mydict["y"] = row, col
mydict["kind"] = "mushrooms"
mydict["species"] = 0
mydict["trigger"] = ""
mywalk = Walkable(mydict)
big_list.append(mywalk)
elif tile == "f0":
mydict = {}
mydict["x"], mydict["y"] = row, col
mydict["kind"] = "forest"
mydict["species"] = 0
mydict["trigger"] = ""
mywalk = Walkable(mydict)
big_list.append(mywalk)
elif tile == "h0":
mydict = {}
mydict["x"], mydict["y"] = row, col
mydict["kind"] = "flowers"
mydict["species"] = 0
mydict["trigger"] = ""
mywalk = Walkable(mydict)
big_list.append(mywalk)
elif tile == "s0":
mydict = {}
mydict["x"], mydict["y"] = row, col
mydict["kind"] = "strawberries"
mydict["species"] = 0
mydict["trigger"] = ""
mywalk = Walkable(mydict)
big_list.append(mywalk)
elif tile == "..":
mydict = {}
mydict["x"] = row
mydict["y"] = col
mydict["kind"] = "empty"
mydict["species"] = 0
mydict["trigger"] = ""
mywalk = Walkable(mydict)
big_list.append(mywalk)
else:
mydict = {}
mydict["x"] = row
mydict["y"] = col
mydict["kind"] = "empty"
mydict["species"] = 0
mydict["trigger"] = ""
mywalk = Walkable(mydict)
big_list.append(mywalk)
# if t1 == False or t2 == False:
# raise ValueError("Something went wrong.")
self.walkables = big_list
def init_pygame(self):
pygame.init()
self.BG_COLOR = constants.BG_COLOR
self.clock = pygame.time.Clock()
pygame.display.set_caption("Enter {}".format(constants.TITLE))
self.screen = pygame.display.set_mode((constants.SCREEN_WIDTH, constants.SCREEN_HEIGHT))
self.font = pygame.font.Font(None, 40)
# self.font = pygame.font.SysFont(constants.FONT_NAME, constants.FONT_SIZE)
def handle_events(self):
# catch all events here
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.keep_looping = False
return True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
self.keep_looping = False
return True
else:
print("I don't recognize this event.key in handle_events: {}".format(event.key))
def update_classes(self, all_sprites):
for elem in self.walkables:
all_sprites.add(elem)
return all_sprites
def draw(self):
self.screen.fill(self.BG_COLOR)
self.update_classes(self.all_sprites)
# ----
self.all_sprites.update()
self.all_sprites.draw(self.screen)
# ----
pygame.display.flip()
def main(self):
while self.keep_looping == True:
self.handle_events()
self.draw()
def __len__(self):
return len(self.walkables)
def __getitem__(self, item):
return self.walkables[item]
def __next__(self):
if self.loop_index >= len(self.walkables):
self.loop_index = 0
raise StopIteration
else:
this_value = self.walkables[self.loop_index]
self.loop_index += 1
return this_value
def __iter__(self):
return self
def debug_print(self):
print("Number of grasses: {}".format(len(self.walkables)))
if len(self.walkables) == 0:
s = "Error! There are no grasses to print."
raise ValueError(s)
for grass in self.walkables:
grass.debug_print()
# -----------------------------------------------------------
# class Obstacle
# -----------------------------------------------------------
class Obstacle(pygame.sprite.Sprite):
def __init__(self, mydict):
super().__init__()
self.x = mydict["x"]
self.y = mydict["y"]
self.kind = mydict["kind"]
self.species = mydict["species"]
self.trigger = mydict["trigger"]
# self.image_filename = mydict["image_filename"]
self.image_filename = ""
self.image = None
self.rect = None
# ----
if self.kind == "wall" and self.species == 0:
self.image_filename = os.path.join("structures", "brick_wall02.png")
elif self.kind == "stove" and self.species == 0:
self.image_filename = os.path.join("structures", "stove01.png")
elif self.kind == "table" and self.species == 0:
self.image_filename = os.path.join("structures", "table_large_empty_01.png")
elif self.kind == "table" and self.species == 1:
self.image_filename = os.path.join("structures", "table_and_wine_02.png")
elif self.kind == "counter" and self.species == 0:
self.image_filename = os.path.join("structures", "tile_counter.png")
elif self.kind == "empty" and self.species == 0:
self.image_filename = "empty.png"
elif self.kind == "streetlamp" and self.species == 0:
self.image_filename = os.path.join("structures", "streetlamp02.png")
elif self.kind == "streetlamp" and self.species == 1:
self.image_filename = os.path.join("structures", "streetlamp03.png")
elif self.kind == "forest" and self.species == 0:
self.image_filename = os.path.join("nature_tiles", "medievalTile_48.png")
elif self.kind == "rocks" and self.species == 0:
self.image_filename = os.path.join("nature_tiles", "pile_of_rocks01_grey01.png")
else:
s = "I can't find kind: {}, species: {} ({})".format(self.kind, self.species, type(self.species))
raise ValueError(s)
# ----
filepath = os.path.join("data", "images", self.image_filename)
try:
self.image = pygame.image.load(filepath).convert_alpha()
except Exception as e:
print(e)
s = "Couldn't open: {}".format(filepath)
raise ValueError(s)
self.image = pygame.transform.scale(self.image, (constants.TILESIZE, constants.TILESIZE))
self.rect = self.image.get_rect()
self.rect = self.rect.move(self.x * constants.TILESIZE, self.y * constants.TILESIZE)
def debug_print(self):
s = "(x,y): {},{}; kind:{}, species: {}, trigger: {}, image_filename: {}, rect: {}"
s = s.format(self.x, self.y, self.kind, self.species, self.trigger, self.image_filename, self.rect)
print(s)
# -----------------------------------------------------------
# class Obstacles
# -----------------------------------------------------------
class Obstacles:
def __init__(self, zone_name, map_name):
self.zone_name = zone_name
self.map_name = map_name
self.init_pygame()
self.obstacles = []
self.loop_index = 0
self.keep_looping = True
self.all_sprites = pygame.sprite.Group()
def read_data(self):
self._load_map()
def _load_map(self):
filename = "{}_obstacles.txt".format(self.map_name)
filepath = os.path.join("data", "zones", self.zone_name, self.map_name, filename)
print("Reading in obstacle map ...")
print("zone: {}, map: {}".format(self.zone_name, self.map_name))
print("filepath for obstacle file: {}".format(filepath))
with open(filepath, "r") as f:
mytiles = f.readlines()
mytiles = [i.strip() for i in mytiles if len(i.strip()) > 0]
mytiles = [i[3:] for i in mytiles[2:]]
# ------------------------------------------------------------------
self.obstacles = []
for col, tiles in enumerate(mytiles):
list_tiles = tiles.split(";")
list_tiles = [i.strip() for i in list_tiles if len(i.strip()) > 0]
for row, tile in enumerate(list_tiles):
if tile == "w0":
mydict = {}
mydict["x"] = row
mydict["y"] = col
mydict["kind"] = "wall"
mydict["species"] = 0
mydict["trigger"] = ""
my_obstacle = Obstacle(mydict)
self.obstacles.append(my_obstacle)
elif tile == "s0":
mydict = {}
mydict["x"] = row
mydict["y"] = col
mydict["kind"] = "stove"
mydict["species"] = 0
mydict["trigger"] = ""
my_obstacle = Obstacle(mydict)
self.obstacles.append(my_obstacle)
elif tile == "l0":
mydict = {}
mydict["x"] = row
mydict["y"] = col
mydict["kind"] = "streetlamp"
mydict["species"] = 0
mydict["trigger"] = ""
my_obstacle = Obstacle(mydict)
self.obstacles.append(my_obstacle)
# print("obstacle (wall) tile added at x,y: ({},{})".format(row, col))
elif tile == "t0":
mydict = {}
mydict["x"] = row
mydict["y"] = col
mydict["kind"] = "table"
mydict["species"] = 0
mydict["trigger"] = ""
my_obstacle = Obstacle(mydict)
self.obstacles.append(my_obstacle)
elif tile == "t1":
mydict = {}
mydict["x"] = row
mydict["y"] = col
mydict["kind"] = "table"
mydict["species"] = 1
mydict["trigger"] = ""
my_obstacle = Obstacle(mydict)
self.obstacles.append(my_obstacle)
elif tile == "c0":
mydict = {}
mydict["x"] = row
mydict["y"] = col
mydict["kind"] = "counter"
mydict["species"] = 0
mydict["trigger"] = ""
my_obstacle = Obstacle(mydict)
self.obstacles.append(my_obstacle)
elif tile == "l1":
mydict = {}
mydict["x"] = row
mydict["y"] = col
mydict["kind"] = "streetlamp"
mydict["species"] = 1
mydict["trigger"] = ""
my_obstacle = Obstacle(mydict)
self.obstacles.append(my_obstacle)
elif tile == "f0":
mydict = {}
mydict["x"] = row
mydict["y"] = col
mydict["kind"] = "forest"
mydict["species"] = 0
mydict["trigger"] = ""
my_obstacle = Obstacle(mydict)
self.obstacles.append(my_obstacle)
elif tile == "m0":
mydict = {}
mydict["x"] = row
mydict["y"] = col
mydict["kind"] = "rocks"
mydict["species"] = 0
mydict["trigger"] = ""
my_obstacle = Obstacle(mydict)
self.obstacles.append(my_obstacle)
elif tile == "..":
pass
else:
s = "Error! I don't recognize this: {}".format(tile)
raise ValueError(s)
def init_pygame(self):
pygame.init()
self.BG_COLOR = constants.BG_COLOR
self.clock = pygame.time.Clock()
pygame.display.set_caption("Enter {}".format(constants.TITLE))
self.screen = pygame.display.set_mode((constants.SCREEN_WIDTH, constants.SCREEN_HEIGHT))
self.font = pygame.font.Font(None, 40)
# self.font = pygame.font.SysFont(constants.FONT_NAME, constants.FONT_SIZE)
def collision(self, x, y):
for a_tile in self.obstacles:
if a_tile.kind == "empty":
continue
if a_tile.x == x:
# print("tile y: {}, player y: {}".format(a_tile.y, y))
if a_tile.y == y:
print("tile x,y: ({},{}), player x,y: ({},{})".format(a_tile.x, a_tile.y, x, y))
return True
return False
# --------------------------------------------------------
def handle_events(self):
# catch all events here
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.keep_looping = False
return True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
self.keep_looping = False
return True
else:
print("I don't recognize this event.key in handle_events: {}".format(event.key))
# ------------------------------------------------------
def update_classes(self, all_sprites):
for elem in self.obstacles:
all_sprites.add(elem)
return all_sprites
def draw(self):
self.screen.fill(self.BG_COLOR)
self.update_classes(self.all_sprites)
# ----
self.all_sprites.update()
self.all_sprites.draw(self.screen)
# ----
pygame.display.flip()
def main(self):
while self.keep_looping == True:
self.handle_events()
self.draw()
# --------------------------------------------------------
def __len__(self):
return len(self.obstacles)
def __getitem__(self, item):
return self.obstacles[item]
def __next__(self):
if self.loop_index >= len(self.obstacles):
self.loop_index = 0
raise StopIteration
else:
this_value = self.obstacles[self.loop_index]
self.loop_index += 1
return this_value
def __iter__(self):
return self
def debug_print(self):
for elem in self.obstacles:
elem.debug_print()
# -----------------------------------------------------------
# class Environment
# -----------------------------------------------------------
class Environment:
def __init__(self, zone_name, map_name):
if zone_name is None or map_name is None:
raise ValueError("Error!")
if len(zone_name) == 0 or len(map_name) == 0:
raise ValueError("Error!")
self.zone_name = zone_name
self.map_name = map_name
# ----
self.init_pygame()
# ----
self.zone_description = ""
self.obstacles = Obstacles(self.zone_name, self.map_name)
self.walkables = Walkables(self.zone_name, self.map_name)
self.triggers = Triggers(self.zone_name, self.map_name)
self.actions = Actions(self.zone_name, self.map_name)
self.events = Events(self.zone_name, self.map_name)
# self.triggers = None
# ----
self.all_sprites = pygame.sprite.Group()
self.keep_looping = True
# def read_data(self):
# filepath = os.path.join("data", "zones", self.zone_name, "zone_init.txt")
# mylist = utils.read_data_file(filepath, 6)
# mydict = mylist[0]
# self.zone_description = mydict["zone_description"]
# self.obstacles.read_data()
# self.walkables.read_data()
def read_data(self):
self.walkables.read_data()
self.obstacles.read_data()
if not self.triggers is None:
self.triggers.read_data()
if not self.actions is None:
self.actions.read_data()
if not self.events is None:
self.events.read_data()
def init_pygame(self):
pygame.init()
self.BG_COLOR = constants.BG_COLOR
self.clock = pygame.time.Clock()
pygame.display.set_caption("Enter {}".format(constants.TITLE))
self.screen = pygame.display.set_mode((constants.SCREEN_WIDTH, constants.SCREEN_HEIGHT))
self.font = pygame.font.Font(None, 40)
# self.font = pygame.font.SysFont(constants.FONT_NAME, constants.FONT_SIZE)
def get_walkable_tile(self, x, y):
# for a_tile in self.obstacles:
# if a_tile.x == x:
# if a_tile.y == y:
# return a_tile
for a_tile in self.walkables:
if a_tile.x == x:
if a_tile.y == y:
return a_tile
return None
def handle_events(self):
# catch all events here
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.keep_looping = False
return True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
self.keep_looping = False
return True
# else:
# print("I don't recognize this event.key in handle_events: {}".format(event.key))
def update_classes(self, all_sprites):
all_sprites = self.obstacles.update_classes(all_sprites)
all_sprites = self.walkables.update_classes(all_sprites)
all_sprites = self.actions.update_classes(all_sprites)
return all_sprites
def draw(self):
self.screen.fill(self.BG_COLOR)
self.all_sprites = self.update_classes(self.all_sprites)
# ----
self.all_sprites.update()
self.all_sprites.draw(self.screen)
# ----
pygame.display.flip()
def main(self):
self.clock.tick(constants.FRAME_RATE)
while self.keep_looping == True:
self.handle_events()
self.draw()
self.goodbye()
self.myquit()
def goodbye(self):
print("Goodbye!")
def myquit(self):
pygame.quit()
def debug_print(self):
s = "zone_name: {}\nzone_description: {}"
s = s.format(self.zone_name, self.zone_description)
print(s)
self.obstacles.debug_print()
self.walkables.debug_print()
# ************************************************************
def debug_obstacles():
myobstacles = Obstacles(zone_name, map_name)
myobstacles.read_data()
myobstacles.main()
def debug_walkables():
myobstacles = Walkables(zone_name, map_name)
myobstacles.read_data()
myobstacles.main()
def debug_environment():
myenv = Environment(zone_name, map_name)
myenv.read_data()
myenv.main()
def debug_triggers():
mytriggers = Triggers(zone_name, map_name)
mytriggers.read_data()
mytriggers.debug_print()
def debug_actions(zone_name, map_name):
from graphics_fauna import Player
myplayer = Player(player_name, zone_name, map_name)
myplayer.read_data()
myplayer.x = 3
myplayer.y = 2
myevents = Events(zone_name, map_name)
myevents.read_data()
# ----
myactions = Actions(zone_name, map_name)
myactions.read_data()
myactions.main()
# myactions.conditions_passed(myplayer, myevents)
def test_action(zone_name, map_name):
x, y = 3, 2
x, y = 2, 2
# ----
myevents = Events(zone_name, map_name)
myevents.read_data()
# ----
from graphics_fauna import Player
myplayer = Player(zone_name, map_name)
myplayer.read_data()
# ----
myactions = Actions(zone_name, map_name)
myactions.read_data()
myaction = myactions.get_action(x, y)
if myaction is None:
print("There is no actioin on tile: {},{}".format(x, y))
else:
print("Action found:")
myaction.debug_print()
# ----
the_result = myaction.conditions_passed(myplayer.inventory, myevents)
print("The result: {}".format(the_result))
# ----
print("####")
myaction.debug_print()
def debug_events():
myevents = Events(zone_name, map_name)
myevents.read_data()
myevents.debug_print()
# if myevents.mark_game_condition_completed("conversation_with_random_completed") == False:
# raise ValueError("Error!")
# myevents.save_data()
player_name = "henry"
# zone_name = "swindon_pub_stub"
# zone_name = "swindon"
zone_name = "easthaven"
map_name = "map00"
if __name__ == "__main__":
# debug_obstacles()
# debug_walkables()
debug_environment()
# debug_triggers()
# debug_actions(zone_name, map_name)
# test_action(zone_name, map_name)
# debug_events() | [
"[email protected]"
] | |
3fbbc5c0f26e5caddce1e220e7e5e9422ca33c8c | 6b8366581101e183592ff5d65ba6c228223ef30d | /mp/tokenizer.py | 086a0397d2c952e98301d843c4d9d4c387465260 | [
"MIT"
] | permissive | BPI-STEAM/mpfshell-lite | a3af795502d20f990d2a084a106f3964beb94392 | e603c2abb942bf45c18519883e1b72760c4db04f | refs/heads/master | 2020-07-22T13:11:57.651653 | 2019-08-27T03:58:41 | 2019-08-27T03:58:41 | 207,213,197 | 3 | 1 | MIT | 2019-09-09T03:06:46 | 2019-09-09T03:06:46 | null | UTF-8 | Python | false | false | 2,163 | py | ##
# The MIT License (MIT)
#
# Copyright (c) 2016 Stefan Wendler
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
##
import re
class Token(object):
STR = "STR"
QSTR = "QSTR"
def __init__(self, kind, value=None):
self._kind = kind
self._value = value
@property
def kind(self):
return self._kind
@property
def value(self):
return self._value
def __repr__(self):
if isinstance(self.value, str):
v = "'%s'" % self.value
else:
v = str(self.value)
return "Token('%s', %s)" % (self.kind, v)
class Tokenizer(object):
def __init__(self):
valid_fnchars = "A-Za-z0-9_%#~@/\$!\*\.\+\-\:\\\\"
tokens = [
(r'[%s]+' % valid_fnchars, lambda scanner, token: Token(Token.STR, token)),
(r'"[%s ]+"' % valid_fnchars, lambda scanner, token: Token(Token.QSTR, token[1:-1])),
(r'[ ]', lambda scanner, token: None)
]
self.scanner = re.Scanner(tokens)
def tokenize(self, string):
# print(string, self.scanner.scan(string))
return self.scanner.scan(string)
| [
"[email protected]"
] | |
2eaff4c0a947d923f02ef3bdabca7291afd10e81 | f518506fb620fd29a2db876c05de813508eda519 | /Project19/manage.py | e4a8f6d8ec98276420722e54947fecad37d976b3 | [] | no_license | Niharika3128/Django5-6 | 07435ae9088659e2d192cda60542aee5214e0637 | be3055ca91da45c37f9ec1adb626eea335477746 | refs/heads/master | 2020-06-02T04:28:37.016405 | 2019-06-09T17:28:33 | 2019-06-09T17:28:33 | 191,035,169 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 541 | py | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Project19.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"[email protected]"
] | |
0315c96e5abc1d9431a14f6e69cc73e0f1086007 | 5864e86954a221d52d4fa83a607c71bacf201c5a | /eve/client/script/ui/station/lobby.py | 23ab9fb309b4ecb67c73b5253ea850ec54d24f23 | [] | no_license | connoryang/1v1dec | e9a2303a01e5a26bf14159112b112be81a6560fd | 404f2cebf13b311e754d45206008918881496370 | refs/heads/master | 2021-05-04T02:34:59.627529 | 2016-10-19T08:56:26 | 2016-10-19T08:56:26 | 71,334,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 47,461 | py | #Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\eve\client\script\ui\station\lobby.py
import blue
from carbon.common.script.util.format import FmtAmt, CaseFoldCompare
from carbonui.control.basicDynamicScroll import BasicDynamicScroll
from carbonui.control.scrollentries import SE_BaseClassCore
from carbonui.primitives.container import Container
from carbonui.primitives.flowcontainer import FlowContainer
from carbonui.primitives.frame import Frame
from carbonui.primitives.line import Line
from carbonui.primitives.sprite import Sprite
from carbonui.util.various_unsorted import SortListOfTuples, NiceFilter
from eve.client.script.ui.control.buttonGroup import ButtonGroup
from eve.client.script.ui.control.buttons import BigButton, ToggleButtonGroup, ToggleButtonGroupButton, Button
from eve.client.script.ui.control.entries import Get as GetListEntry
from eve.client.script.ui.control.eveIcon import GetLogoIcon, CheckCorpID
from eve.client.script.ui.control.eveLabel import CaptionLabel, EveLabelSmall, EveLabelMedium, Label
from eve.client.script.ui.control.eveScroll import Scroll
from eve.client.script.ui.control.eveWindow import Window
from eve.client.script.ui.control.infoIcon import InfoIcon
from eve.client.script.ui.control.tabGroup import TabGroup
from eve.client.script.ui.control.themeColored import LineThemeColored
from eve.client.script.ui.control.utilMenu import UtilMenu
from eve.client.script.ui.quickFilter import QuickFilterEdit
from eve.client.script.ui.station import stationServiceConst
import log
import sys
from inventorycommon.util import IsNPC
import uthread
import carbonui.const as uiconst
import localization
import telemetry
import collections
import invCont
import invCtrl
import const
import evegraphics.settings as gfxsettings
from eve.client.script.ui.shared.inventory.invWindow import Inventory as InventoryWindow
from utillib import KeyVal
COLOR_UNDOCK = (0.75,
0.6,
0.0,
1.0)
COLOR_CQ = (0.0,
0.713,
0.75,
1.0)
MAX_CORP_DESC_LENGTH = 140
MAX_CORP_DESC_LINES = 1
BIGBUTTONSIZE = 48
SMALLBUTTONSIZE = 32
BUTTONGAP = 4
AGENTSPANEL = 'agentsPanel'
GUESTSPANEL = 'guestsPanel'
OFFICESPANEL = 'officesPanel'
INVENTORYPANEL = 'inventoryPanel'
class LobbyToggleButtonGroupButton(ToggleButtonGroupButton):
@apply
def displayRect():
fget = ToggleButtonGroupButton.displayRect.fget
def fset(self, value):
ToggleButtonGroupButton.displayRect.fset(self, value)
self.label.width = uicore.ReverseScaleDpi(self.displayWidth) - 6
buttonHeight = uicore.ReverseScaleDpi(self.displayHeight)
if self.label.textheight < buttonHeight:
self.label.top = (buttonHeight - self.label.textheight) / 2
self.label.align = uiconst.CENTERTOP
else:
self.label.top = 0
self.label.align = uiconst.CENTER
return property(**locals())
class CounterBox(Container):
_number = 0
def ApplyAttributes(self, attributes):
Container.ApplyAttributes(self, attributes)
self.label = Label(parent=self, align=uiconst.CENTER, fontPath='res:/UI/Fonts/EveSansNeue-Expanded.otf', fontsize=10)
Frame(bgParent=self, texturePath='res:/UI/Texture/Shared/counterFrame.png', cornerSize=8, offset=-1, color=(0.2, 0.2, 0.2, 1))
if 'text' in attributes:
self.text = attributes.text
else:
self.display = False
@apply
def text():
def fget(self):
return self._number
def fset(self, value):
self._number = value
self.label.text = value
self.width = max(14, self.label.textwidth + 8)
self.height = max(14, self.label.textheight)
if self.label.text:
self.display = True
else:
self.display = False
return property(**locals())
class Lobby(Window):
__guid__ = 'form.Lobby'
__notifyevents__ = ['OnCharNowInStation',
'OnCharNoLongerInStation',
'OnProcessStationServiceItemChange',
'OnAgentMissionChange',
'OnStandingSet',
'OnCorporationChanged',
'OnCorporationMemberChanged',
'OnPrimaryViewChanged',
'OnSetDevice']
default_windowID = 'lobby'
default_top = 16
default_width = 223
default_captionLabelPath = 'UI/Station/StationServices'
default_pinned = True
undockCont = None
undock_button_is_locked = False
selectedGroupButtonID = None
@staticmethod
def default_height(*args):
return uicore.desktop.height - 100
@staticmethod
def default_left(*args):
return uicore.desktop.width - Lobby.default_width - 16
def OnPrimaryViewChanged(self, oldViewInfo, newViewInfo):
self.UpdateCQButton(newViewInfo.name)
def OnSetDevice(self):
bottom = self.top + self.height
if bottom > uicore.desktop.height:
self.height = max(self.default_minSize[1], uicore.desktop.height - self.top)
right = self.left + self.width
if right > uicore.desktop.width:
self.width = max(self.default_minSize[0], uicore.desktop.width - self.left)
def ApplyAttributes(self, attributes):
self.viewState = sm.GetService('viewState')
if not settings.user.ui.Get('stationservicebtns', 1):
minWidth = BIGBUTTONSIZE + (BIGBUTTONSIZE + BUTTONGAP) * 3 + 14
minHeight = 495
else:
minWidth = SMALLBUTTONSIZE + (SMALLBUTTONSIZE + BUTTONGAP) * 5 + 10
minHeight = 470
self.default_minSize = (minWidth, minHeight)
Window.ApplyAttributes(self, attributes)
self.stationSvc = sm.GetService('station')
self.guestScroll = None
self.sr.serviceAccessCache = {}
self.SetWndIcon(None)
self.HideHeader()
self.scope = 'station'
self.MakeUnKillable()
self.MakeUnstackable()
self.SetTopparentHeight(0)
main = self.sr.main
main.clipChildren = True
self.corpLogoParent = Container(name='corpLogoParent', align=uiconst.TOTOP, height=160, parent=main)
self.corpName = CaptionLabel(parent=main, align=uiconst.TOTOP, name='corpName', uppercase=False)
self.undockparent = Container(name='undockparent', align=uiconst.TOTOP, height=78, parent=main)
self.AddCQButton(parent=self.undockparent)
self.AddUndockButton(parent=self.undockparent)
EveLabelMedium(text=localization.GetByLabel('UI/Station/StationServices'), align=uiconst.TOTOP, parent=main, bold=True, padding=(6, 6, 6, 0))
self.serviceButtons = FlowContainer(name='serviceButtons', align=uiconst.TOTOP, parent=main, contentSpacing=(BUTTONGAP, BUTTONGAP), padding=(6, 6, 3, 6))
btnGroup = ToggleButtonGroup(name='btnGroup', parent=main, align=uiconst.TOTOP, height=32, padding=(6, 6, 6, 6), idx=-1, callback=self.OnButtonGroupSelection, autoHeight=True)
self.mainButtonGroup = btnGroup
self.guestsPanel = Container(name=GUESTSPANEL, parent=main, padding=const.defaultPadding)
self.quickFilter = QuickFilterEdit(name='quickFilterEdit', parent=self.guestsPanel)
self.quickFilter.ReloadFunction = lambda : self.ShowGuests()
self.guestScroll = BasicDynamicScroll(parent=self.guestsPanel, padTop=const.defaultPadding + self.quickFilter.height)
guestSettingsMenu = UtilMenu(menuAlign=uiconst.TOPRIGHT, parent=self.guestsPanel, align=uiconst.TOPRIGHT, GetUtilMenu=self.SettingMenu, texturePath='res:/UI/Texture/SettingsCogwheel.png', width=18, height=18, iconSize=18)
self.userType = settings.user.ui.Get('guestCondensedUserList', False)
self.agentsPanel = Container(name=AGENTSPANEL, parent=main, padding=const.defaultPadding)
self.agentFinderBtn = Button(label=localization.GetByLabel('UI/AgentFinder/AgentFinder'), parent=self.agentsPanel, align=uiconst.CENTERTOP, func=uicore.cmd.OpenAgentFinder)
self.agentScroll = Scroll(parent=self.agentsPanel, padTop=const.defaultPadding + self.agentFinderBtn.height)
self.officesPanel = Container(name=OFFICESPANEL, parent=main, padding=const.defaultPadding)
self.officesButtons = FlowContainer(name='officesButtons', align=uiconst.TOTOP, parent=self.officesPanel, contentSpacing=(4, 4), centerContent=True)
self.officesScroll = Scroll(parent=self.officesPanel, padTop=const.defaultPadding)
agentsButton = btnGroup.AddButton(AGENTSPANEL, '<center>' + localization.GetByLabel('UI/Station/Lobby/Agents'), self.agentsPanel, btnClass=LobbyToggleButtonGroupButton, hint=localization.GetByLabel('Tooltips/StationServices/AgentsTab_descrtiption'))
agentsButton.name = 'stationInformationTabAgents'
guestsButton = btnGroup.AddButton(GUESTSPANEL, '<center>' + localization.GetByLabel('UI/Station/Lobby/Guests'), self.guestsPanel, btnClass=LobbyToggleButtonGroupButton, hint=localization.GetByLabel('Tooltips/StationServices/GuestsTab_description'))
guestsButton.counter = CounterBox(parent=guestsButton, align=uiconst.TOPRIGHT, left=2, top=-5)
self.guestsButton = guestsButton
btnGroup.AddButton(OFFICESPANEL, '<center>' + localization.GetByLabel('UI/Station/Lobby/Offices'), self.officesPanel, btnClass=LobbyToggleButtonGroupButton, hint=localization.GetByLabel('Tooltips/StationServices/OfficesTab_description'))
activePanel = settings.user.ui.Get('stationsLobbyTabs', AGENTSPANEL)
if settings.char.windows.Get('dockshipsanditems', 0):
self.inventoryPanel = Container(name=INVENTORYPANEL, parent=main)
self.sr.shipsContainer = Container(parent=self.inventoryPanel, state=uiconst.UI_HIDDEN, padding=const.defaultPadding)
self.sr.itemsContainer = Container(parent=self.inventoryPanel, state=uiconst.UI_HIDDEN, padding=const.defaultPadding)
tabs = [[localization.GetByLabel('UI/Station/Ships'),
self.sr.shipsContainer,
self,
'lobby_ships'], [localization.GetByLabel('UI/Station/Items'),
self.sr.itemsContainer,
self,
'lobby_items']]
self.inventoryTabs = TabGroup(name='inventoryPanel', parent=self.inventoryPanel, idx=0)
self.inventoryTabs.Startup(tabs, 'lobbyInventoryPanel', autoselecttab=True, UIIDPrefix='lobbyInventoryPanelTab')
self.invButton = btnGroup.AddButton(INVENTORYPANEL, '<center>' + localization.GetByLabel('UI/Station/Lobby/Hangars'), self.inventoryPanel, btnClass=LobbyToggleButtonGroupButton, hint='<b>%s</b><br>%s' % (localization.GetByLabel('Tooltips/StationServices/Hangars'), localization.GetByLabel('Tooltips/StationServices/Hangars_description')))
elif activePanel == INVENTORYPANEL:
activePanel = AGENTSPANEL
btnGroup.SelectByID(activePanel)
myDefaultView = 'hangar' if session.userid % 2 == 1 else 'station'
curView = collections.namedtuple('FakeViewInfo', ['name'])(settings.user.ui.Get('defaultDockingView', myDefaultView))
self.OnPrimaryViewChanged(curView, curView)
self.LoadOwnerInfo()
self.LoadServiceButtons()
if self.destroyed:
return
sm.RegisterNotify(self)
self.UpdateGuestTabText()
def OnButtonGroupSelection(self, buttonID):
settings.user.ui.Set('stationsLobbyTabs', buttonID)
self.selectedGroupButtonID = buttonID
if buttonID == AGENTSPANEL:
self.ShowAgents()
elif buttonID == GUESTSPANEL:
self.ShowGuests()
elif buttonID == OFFICESPANEL:
self.ShowOffices()
elif buttonID == INVENTORYPANEL:
if not len(self.sr.shipsContainer.children):
self.LayoutShipsAndItems()
def SettingMenu(self, menuParent):
showCompact = settings.user.ui.Get('guestCondensedUserList', False)
menuParent.AddCheckBox(text=localization.GetByLabel('UI/Chat/ShowCompactMemberList'), checked=bool(showCompact), callback=(self.ShowGuests, not showCompact))
def AddCQButton(self, parent):
scale = 1.0
self.cqCont = Container(name='cqCont', align=uiconst.TOLEFT_PROP, width=0.5, parent=parent, state=uiconst.UI_PICKCHILDREN, padding=3)
width = 63 * scale
height = 34 * scale
self.cqSpriteCont = Container(name='cq', align=uiconst.CENTERTOP, width=width, height=height, top=3, parent=self.cqCont, state=uiconst.UI_NORMAL)
self.cqSprites = []
spacing = 30 * scale
for i in xrange(3):
s = Sprite(parent=self.cqSpriteCont, texturePath='res:/UI/Texture/classes/Lobby/{0}.png'.format(i + 1), align=uiconst.CENTERTOP, width=-width, height=height, left=0, state=uiconst.UI_DISABLED)
s.color = COLOR_CQ
self.cqSprites.insert(0, s)
self.cqLabel = EveLabelMedium(parent=self.cqCont, align=uiconst.CENTERTOP, top=8 + height, width=100)
self.UpdateCQButton()
self.cqSpriteCont.OnClick = self.OnCQClicked
self.cqSpriteCont.OnMouseEnter = self.OnCQMouseEnter
self.cqSpriteCont.OnMouseExit = self.OnCQMouseExit
def OnCQClicked(self, *args):
self.OnCQMouseExit()
for i, s in enumerate(self.cqSprites):
uicore.animations.SpGlowFadeIn(s, glowColor=(0.8, 0.8, 0.1, 0.3), glowExpand=1, loops=1, duration=1.0, curveType=uiconst.ANIM_WAVE, timeOffset=(3 - i) * 0.1)
if self.IsInCQ():
self.EnterHangar()
else:
self.EnterCQ()
def OnCQMouseEnter(self, *args):
self.AnimateCQSprites((0.8, 1, 1))
def OnCQMouseExit(self, *args):
self.AnimateCQSprites(COLOR_CQ[:3])
def AnimateCQSprites(self, endColor):
for i, s in enumerate(self.cqSprites):
uicore.animations.SpColorMorphTo(s, startColor=(s.color.r, s.color.g, s.color.b), endColor=endColor, duration=0.1)
def UpdateCQButton(self, viewName = None):
isInCQ = False
if viewName is not None:
isInCQ = viewName == 'station'
else:
isInCQ = self.IsInCQ()
if isInCQ:
self.cqLabel.text = '<center>' + localization.GetByLabel('UI/Commands/EnterHangar') + '</center>'
else:
self.cqLabel.text = '<center>' + localization.GetByLabel('UI/Commands/EnterCQ') + '</center>'
self.cqCont.height = self.cqLabel.height + self.cqSpriteCont.height + 6
def IsInCQ(self):
viewStateSvc = sm.GetService('viewState')
currentView = viewStateSvc.GetCurrentView()
if currentView is not None and currentView.name == 'station':
return True
else:
return False
def AddUndockButton(self, parent):
scale = 1.0
self.undockCont = Container(name='undockCont', align=uiconst.TORIGHT_PROP, width=0.5, parent=parent, state=uiconst.UI_PICKCHILDREN, padding=3)
width = 63 * scale
height = 34 * scale
self.undockSpriteCont = Container(name='undock', align=uiconst.CENTERTOP, width=width, height=height, top=3, parent=self.undockCont, state=uiconst.UI_NORMAL)
self.undockSprites = []
spacing = 30 * scale
for i in xrange(3):
s = Sprite(parent=self.undockSpriteCont, texturePath='res:/UI/Texture/classes/Lobby/{0}.png'.format(i + 1), align=uiconst.CENTERTOP, width=width, height=height, left=0, state=uiconst.UI_DISABLED)
s.color = COLOR_UNDOCK
self.undockSprites.append(s)
self.undockLabel = EveLabelMedium(parent=self.undockCont, align=uiconst.CENTERTOP, top=8 + height, width=100)
self.UpdateUndockButton()
self.undockCont.height = self.undockLabel.height + height + 6
self.undockSpriteCont.OnClick = self.OnUndockClicked
self.undockSpriteCont.OnMouseEnter = self.OnUndockMouseEnter
self.undockSpriteCont.OnMouseExit = self.OnUndockMouseExit
if self.undock_button_is_locked:
self._DisableUndockButton()
def OnUndockClicked(self, *args):
if sm.GetService('station').PastUndockPointOfNoReturn():
return
uthread.new(self.AttemptToUndock).context = 'UndockButtonThread'
def LockCQButton(self):
self.cqCont.opacity = 0.5
self.cqCont.state = uiconst.UI_DISABLED
def UnlockCQButton(self):
self.cqCont.opacity = 1.0
self.cqCont.state = uiconst.UI_NORMAL
def _DisableUndockButton(self):
if self.undockCont is not None:
self.undockCont.opacity = 0.5
self.undockCont.state = uiconst.UI_DISABLED
def _EnableUndockButton(self):
if self.undockCont is not None:
self.undockCont.opacity = 1.0
self.undockCont.state = uiconst.UI_NORMAL
def LockUndockButton(self):
self.undock_button_is_locked = True
self._DisableUndockButton()
def UnlockUndockButton(self):
self.undock_button_is_locked = False
self._EnableUndockButton()
def AttemptToUndock(self):
exiting = sm.GetService('station').Exit()
if exiting:
self.LockCQButton()
def OnUndockMouseEnter(self, *args):
self.AnimateUndockSprites((1, 1, 0.8))
def OnUndockMouseExit(self, *args):
self.AnimateUndockSprites(COLOR_UNDOCK[:3])
def AnimateUndockSprites(self, endColor):
if sm.GetService('station').PastUndockPointOfNoReturn():
return
for i, s in enumerate(self.undockSprites):
uicore.animations.SpColorMorphTo(s, startColor=(s.color.r, s.color.g, s.color.b), endColor=endColor, duration=0.1)
def SetUndockProgress(self, undockProgress):
if undockProgress is None:
self.UpdateUndockButton()
return
i = int(undockProgress * 3)
if i < 3:
self.UpdateUndockButton()
uicore.animations.SpGlowFadeIn(self.undockSprites[i], glowColor=(1.0, 1.0, 0.8, 0.2), glowExpand=1, loops=1, duration=0.2)
else:
self.undockLabel.text = '<center>' + localization.GetByLabel('UI/Station/UndockingConfirmed') + '</center>'
for i, s in enumerate(self.undockSprites):
uicore.animations.StopAllAnimations(s)
s.glowColor = (0, 0, 0, 0)
uicore.animations.SpColorMorphTo(s, startColor=(1, 0.8, 0), endColor=(1, 0, 0), loops=1000, duration=1, curveType=uiconst.ANIM_WAVE, timeOffset=i * 0.1 - 0.5, includeAlpha=False)
uicore.animations.SpGlowFadeIn(s, glowColor=(1.0, 1.0, 0.8, 0.2), glowExpand=1, loops=1000, duration=1, curveType=uiconst.ANIM_WAVE, timeOffset=i * 0.1)
def UpdateUndockButton(self):
if self.stationSvc.exitingstation:
self.undockLabel.text = '<center>' + localization.GetByLabel('UI/Station/AbortUndock') + '</center>'
self.LockCQButton()
else:
self.undockLabel.text = '<center>' + localization.GetByLabel('UI/Neocom/UndockBtn') + '</center>'
self.UnlockCQButton()
def EnterCQ(self, *args):
if self.viewState.HasActiveTransition():
return
sm.GetService('cmd').CmdEnterCQ()
def EnterHangar(self, *args):
if self.viewState.HasActiveTransition():
return
sm.GetService('cmd').CmdEnterHangar()
def OnScale_(self, *args):
return
height = 0
for each in self.sr.main.children:
if each.align in (uiconst.TOTOP, uiconst.TOBOTTOM):
height += each.padTop + each.height + each.padBottom
height += 160
self.SetMinSize([self.minsize[0], height])
def LayoutShipsAndItems(self):
self.sr.itemsContainer.Flush()
itemsContainer = invCont.StationItems(name='stationItems', parent=self.sr.itemsContainer, showControls=True, state=uiconst.UI_NORMAL)
self.sr.shipsContainer.Flush()
shipsContainer = invCont.StationShips(name='stationShips', parent=self.sr.shipsContainer, showControls=True, state=uiconst.UI_NORMAL)
self.invButton.OnDropData = itemsContainer.OnDropData
self.sr.itemsContainer.OnDropData = itemsContainer.OnDropData
self.sr.shipsContainer.OnDropData = shipsContainer.OnDropData
def OnProcessStationServiceItemChange(self, stationID, solarSystemID, serviceID, stationServiceItemID, isEnabled):
if self.destroyed or stationID != eve.session.stationid:
return
for icon in self.serviceButtons.children:
if hasattr(icon, 'stationServiceIDs') and serviceID in icon.stationServiceIDs:
self.SetServiceButtonState(icon, [serviceID])
def OnAgentMissionChange(self, actionID, agentID, tutorialID = None):
if self.selectedGroupButtonID == AGENTSPANEL:
self.ShowAgents()
def OnCorporationChanged(self, corpID, change):
blue.pyos.synchro.Yield()
self.LoadButtons()
def OnStandingSet(self, fromID, toID, rank):
if self.selectedGroupButtonID == AGENTSPANEL:
self.ShowAgents()
def SetServiceButtonState(self, button, serviceIDs):
for serviceID in serviceIDs:
currentstate = sm.GetService('station').GetServiceState(serviceID)
if currentstate is not None:
if self.sr.serviceAccessCache.has_key(serviceID):
del self.sr.serviceAccessCache[serviceID]
if not currentstate.isEnabled:
button.Disable()
button.serviceStatus = localization.GetByLabel('UI/Station/Lobby/Disabled')
button.serviceEnabled = False
else:
button.Enable()
button.serviceStatus = localization.GetByLabel('UI/Station/Lobby/Enabled')
button.serviceEnabled = True
def LoadServiceButtons(self):
parent = self.serviceButtons
parent.Flush()
icon = None
stationservicebtns = settings.user.ui.Get('stationservicebtns', 1)
btnsize = BIGBUTTONSIZE
if stationservicebtns:
btnsize = SMALLBUTTONSIZE
haveServices = self.GetCurrentStationServices()
for service in reversed(haveServices):
button = BigButton(parent=parent, width=btnsize, height=btnsize, name=service.name, align=uiconst.NOALIGN)
button.Startup(btnsize, btnsize, iconOpacity=0.75)
button.cmdStr = service.command
button.stationServiceIDs = service.maskServiceIDs
button.displayName = service.label
button.OnClick = (self.OnSvcBtnClick, button)
button.serviceStatus = localization.GetByLabel('UI/Station/Lobby/Enabled')
button.serviceEnabled = True
if hasattr(service, 'iconID'):
button.SetTexturePath(service.iconID)
else:
button.SetTexturePath(service.texturePath)
self.SetServiceButtonState(button, service.maskServiceIDs)
button.LoadTooltipPanel = self.LoadServiceButtonTooltipPanel
def GetCurrentStationServices(self):
serviceMask = eve.stationItem.serviceMask
haveServices = []
for serviceData in stationServiceConst.serviceData:
if stationServiceConst.serviceIDAlwaysPresent in serviceData.maskServiceIDs:
haveServices.append(serviceData)
continue
elif serviceMask & sum(serviceData.maskServiceIDs) > 0:
if serviceData.name == 'navyoffices':
if not sm.GetService('facwar').CheckStationElegibleForMilitia():
continue
elif serviceData.name == 'securityoffice':
if not sm.GetService('securityOfficeSvc').CanAccessServiceInStation(session.stationid2):
continue
haveServices.append(serviceData)
return haveServices
def LoadServiceButtonTooltipPanel(self, tooltipPanel, tooltipOwner, *args):
tooltipPanel.LoadGeneric3ColumnTemplate()
command = uicore.cmd.commandMap.GetCommandByName(tooltipOwner.cmdStr)
tooltipPanel.AddCommandTooltip(command)
if not tooltipOwner.serviceEnabled:
tooltipPanel.AddLabelMedium(text=localization.GetByLabel('UI/Station/Lobby/Disabled'), color=(1, 0, 0, 1), bold=True, colSpan=tooltipPanel.columns)
def OnSvcBtnClick(self, btn, *args):
self.CheckCanAccessService(btn.name)
sm.GetService('station').LoadSvc(btn.name)
def CheckCanAccessService(self, serviceName):
for serviceData in stationServiceConst.serviceData:
if serviceData.name == serviceName:
corpStationMgr = None
now = blue.os.GetWallclockTime()
for stationServiceID in serviceData.maskServiceIDs:
doCheck = 1
time, result = (None, None)
if self.sr.serviceAccessCache.has_key(stationServiceID):
time, result = self.sr.serviceAccessCache[stationServiceID]
if time + const.MIN * 5 > now:
doCheck = 0
if doCheck:
if corpStationMgr is None:
corpStationMgr = sm.GetService('corp').GetCorpStationManager()
try:
corpStationMgr.DoStandingCheckForStationService(stationServiceID)
self.sr.serviceAccessCache[stationServiceID] = (now, None)
except Exception as e:
self.sr.serviceAccessCache[stationServiceID] = (now, e)
sys.exc_clear()
time, result = self.sr.serviceAccessCache[stationServiceID]
if result is not None:
raise result
def LoadButtons(self):
if self.destroyed:
return
btns = []
officeExists = sm.GetService('corp').GetOffice() is not None
canRent = session.corprole & const.corpRoleCanRentOffice == const.corpRoleCanRentOffice
canMove = session.corprole & const.corpRoleDirector == const.corpRoleDirector
if canRent and not officeExists:
rentLabel = localization.GetByLabel('UI/Station/Lobby/RentOffice')
btns.append([rentLabel, self.RentOffice, None])
if canMove and officeExists:
btns.append([localization.GetByLabel('UI/Station/Hangar/UnrentOffice'), self.UnrentOffice, None])
if canMove:
isHQHere = sm.GetService('corp').GetCorporation().stationID == session.stationid2
if not isHQHere:
hqLabel = localization.GetByLabel('UI/Station/Lobby/MoveHeadquartersHere')
btns.append([hqLabel, self.SetHQ, None])
if not officeExists and sm.GetService('corp').HasCorpImpoundedItemsAtStation():
btns.append([localization.GetByLabel('UI/Inventory/ReleaseItems'), self.ReleaseImpoundedItems, None])
if sm.GetService('corp').DoesCharactersCorpOwnThisStation():
mgmtLabel = localization.GetByLabel('UI/Station/Lobby/StationManagement')
btns.append([mgmtLabel, self.OpenStationManagement, None])
if self.destroyed:
return
self.officesButtons.Flush()
for label, func, args in btns:
Button(parent=self.officesButtons, label=label, func=func, args=args, align=uiconst.NOALIGN)
def ReleaseImpoundedItems(self, *args):
corpStationMgr = sm.GetService('corp').GetCorpStationManager()
cost = corpStationMgr.GetQuoteForGettingCorpJunkBack()
if eve.Message('CrpJunkAcceptCost', {'cost': FmtAmt(cost)}, uiconst.YESNO) != uiconst.ID_YES:
return
corpStationMgr.PayForReturnOfCorpJunk(cost)
sm.GetService('corp').hasImpoundedItemsCacheTime = None
self.LoadButtons()
def UnrentOffice(self, *args):
items = invCtrl.StationCorpHangar(divisionID=None).GetItems()
asked = False
if len([ item for item in items if item.ownerID == session.corpid ]):
asked = True
if eve.Message('crpUnrentOfficeWithContent', {}, uiconst.YESNO) != uiconst.ID_YES:
return
if not asked:
if eve.Message('crpUnrentOffice', {}, uiconst.YESNO) != uiconst.ID_YES:
return
corpStationMgr = sm.GetService('corp').GetCorpStationManager()
sm.GetService('corp').hasImpoundedItemsCacheTime = None
corpStationMgr.CancelRentOfOffice()
def OpenStationManagement(self, *args):
uthread.new(uicore.cmd.OpenStationManagement)
def LoadOwnerInfo(self):
parent = self.corpLogoParent
parent.Flush()
corpID = eve.stationItem.ownerID
size = 128 if CheckCorpID(corpID) else 64
logo = GetLogoIcon(itemID=corpID, parent=parent, acceptNone=False, state=uiconst.UI_DISABLED, pos=(0,
8,
size,
size), align=uiconst.CENTERTOP)
InfoIcon(typeID=const.typeCorporation, itemID=corpID, left=const.defaultPadding, top=20, align=uiconst.TOPRIGHT, parent=parent, idx=0)
self.corpLogoParent.height = logo.top + logo.height
if not CheckCorpID(corpID):
self.corpName.text = '<center>' + cfg.eveowners.Get(corpID).name
self.corpName.display = True
else:
self.corpName.display = False
def ImVisible(self):
return bool(self.state != uiconst.UI_HIDDEN and not self.IsCollapsed() and not self.IsMinimized())
def Load(self, key):
pass
@telemetry.ZONE_METHOD
def OnCharNowInStation(self, rec):
if self.destroyed or not session.stationid2:
return
self.UpdateGuestTabText()
if self.selectedGroupButtonID == GUESTSPANEL:
charID, corpID, allianceID, warFactionID = rec
cfg.eveowners.Prime([charID])
if self.destroyed:
return
newcharinfo = cfg.eveowners.Get(charID)
idx = 0
for each in self.guestScroll.GetNodes():
if each.charID == charID:
return
if CaseFoldCompare(each.info.name, newcharinfo.name) > 0:
break
idx += 1
filteredGuest = None
guestFilter = self.quickFilter.GetValue()
if len(guestFilter):
filteredGuest = NiceFilter(self.quickFilter.QuickFilter, newcharinfo.name)
if filteredGuest or len(guestFilter) == 0:
entry = GetListEntry(self.userEntry, {'charID': charID,
'info': newcharinfo,
'label': newcharinfo.name,
'corpID': corpID,
'allianceID': allianceID,
'warFactionID': warFactionID})
self.guestScroll.AddNodes(idx, [entry])
@telemetry.ZONE_METHOD
def OnCharNoLongerInStation(self, rec):
if self.destroyed or not session.stationid2:
return
self.UpdateGuestTabText()
charID, corpID, allianceID, warFactionID = rec
if self.selectedGroupButtonID == GUESTSPANEL:
for entry in self.guestScroll.GetNodes():
if entry.charID == charID:
self.guestScroll.RemoveNodes([entry])
return
def ShowGuests(self, condensed = None, *args):
if self.selectedGroupButtonID != GUESTSPANEL:
return
if condensed is not None:
settings.user.ui.Set('guestCondensedUserList', condensed)
self.SetGuestEntryType()
guests = sm.GetService('station').GetGuests()
ownerIDs = guests.keys()
cfg.eveowners.Prime(ownerIDs)
guestFilter = self.quickFilter.GetValue()
if len(guestFilter):
filterData = [ KeyVal(name=cfg.eveowners.Get(charID).name, charID=charID) for charID in ownerIDs ]
filterGuests = NiceFilter(self.quickFilter.QuickFilter, filterData)
ownerIDs = [ each.charID for each in filterGuests ]
if self.destroyed:
return
scrolllist = []
for charID in ownerIDs:
if charID not in guests:
continue
corpID, allianceID, warFactionID = guests[charID]
charinfo = cfg.eveowners.Get(charID)
scrolllist.append((charinfo.name.lower(), GetListEntry(self.userEntry, {'charID': charID,
'info': charinfo,
'label': charinfo.name,
'corpID': corpID,
'allianceID': allianceID,
'warFactionID': warFactionID})))
scrolllist = SortListOfTuples(scrolllist)
self.guestScroll.Clear()
self.guestScroll.AddNodes(0, scrolllist)
self.UpdateGuestTabText()
def UpdateGuestTabText(self):
numGuests = len(sm.GetService('station').GetGuests())
self.guestsButton.counter.text = numGuests
def SetGuestEntryType(self):
if settings.user.ui.Get('guestCondensedUserList', False):
self.userEntry = 'ChatUserSimple'
else:
self.userEntry = 'User'
def ShowAgents(self):
try:
agentsSvc = sm.GetService('agents')
journalSvc = sm.GetService('journal')
facWarSvc = sm.StartService('facwar')
standingSvc = sm.StartService('standing')
epicArcStatusSvc = sm.RemoteSvc('epicArcStatus')
if self.selectedGroupButtonID != AGENTSPANEL:
return
agentMissions = journalSvc.GetMyAgentJournalDetails()[:1][0]
agentsInStation = agentsSvc.GetAgentsByStationID()[session.stationid2]
relevantAgents = []
missionStateDict = {}
for each in agentMissions:
missionState, importantMission, missionType, missionName, agentID, expirationTime, bookmarks, remoteOfferable, remoteCompletable, contentID = each
agent = agentsSvc.GetAgentByID(agentID)
missionStateDict[agentID] = missionState
if missionState not in (const.agentMissionStateAllocated, const.agentMissionStateOffered) or agent.agentTypeID in (const.agentTypeGenericStorylineMissionAgent,
const.agentTypeStorylineMissionAgent,
const.agentTypeEventMissionAgent,
const.agentTypeCareerAgent,
const.agentTypeEpicArcAgent):
relevantAgents.append(agentID)
localRelevantAgents = []
for agent in agentsInStation:
if agent.agentID in relevantAgents:
localRelevantAgents.append(agent.agentID)
if self.destroyed:
return
scrolllist = []
sortlist = []
for agentID in relevantAgents:
if not eve.rookieState or agentID in const.rookieAgentList:
if agentID not in localRelevantAgents:
missionState = missionStateDict.get(agentID)
sortlist.append((cfg.eveowners.Get(agentID).name, GetListEntry('AgentEntry', {'charID': agentID,
'missionState': missionState})))
if sortlist:
agentLabel = localization.GetByLabel('UI/Station/Lobby/AgentsOfInterest')
scrolllist.append(GetListEntry('Header', {'label': agentLabel}))
scrolllist += SortListOfTuples(sortlist)
unavailableAgents = []
availableAgents = []
for agent in agentsInStation:
if agent.agentID in const.rookieAgentList:
continue
if not eve.rookieState or agent.agentID in const.rookieAgentList:
isLimitedToFacWar = False
if agent.agentTypeID == const.agentTypeFactionalWarfareAgent and facWarSvc.GetCorporationWarFactionID(agent.corporationID) != session.warfactionid:
isLimitedToFacWar = True
if agent.agentTypeID in (const.agentTypeResearchAgent,
const.agentTypeBasicAgent,
const.agentTypeEventMissionAgent,
const.agentTypeCareerAgent,
const.agentTypeFactionalWarfareAgent):
standingIsValid = standingSvc.CanUseAgent(agent.factionID, agent.corporationID, agent.agentID, agent.level, agent.agentTypeID)
haveMissionFromAgent = agent.agentID in relevantAgents
if not isLimitedToFacWar and (standingIsValid or haveMissionFromAgent):
availableAgents.append(agent.agentID)
else:
unavailableAgents.append(agent.agentID)
elif agent.agentTypeID == const.agentTypeEpicArcAgent:
standingIsValid = standingSvc.CanUseAgent(agent.factionID, agent.corporationID, agent.agentID, agent.level, agent.agentTypeID)
haveMissionFromAgent = agent.agentID in relevantAgents
epicAgentAvailable = False
if haveMissionFromAgent:
epicAgentAvailable = True
elif standingIsValid:
if agent.agentID in relevantAgents or epicArcStatusSvc.AgentHasEpicMissionsForCharacter(agent.agentID):
epicAgentAvailable = True
if epicAgentAvailable:
availableAgents.append(agent.agentID)
else:
unavailableAgents.append(agent.agentID)
if agent.agentTypeID == const.agentTypeAura:
if sm.GetService('experimentClientSvc').IsTutorialEnabled():
availableAgents.append(agent.agentID)
elif agent.agentTypeID in (const.agentTypeGenericStorylineMissionAgent, const.agentTypeStorylineMissionAgent):
if agent.agentID in localRelevantAgents:
availableAgents.append(agent.agentID)
else:
unavailableAgents.append(agent.agentID)
if availableAgents:
availableLabel = localization.GetByLabel('UI/Station/Lobby/AvailableToYou')
scrolllist.append(GetListEntry('Header', {'label': availableLabel}))
sortlist = []
for agentID in availableAgents:
missionState = missionStateDict.get(agentID)
sortlist.append((cfg.eveowners.Get(agentID).name, GetListEntry('AgentEntry', {'charID': agentID,
'missionState': missionState})))
scrolllist += SortListOfTuples(sortlist)
if unavailableAgents:
unavailableLabel = localization.GetByLabel('UI/Station/Lobby/NotAvailableToYou')
scrolllist.append(GetListEntry('Header', {'label': unavailableLabel}))
sortlist = []
for agentID in unavailableAgents:
missionState = missionStateDict.get(agentID)
sortlist.append((cfg.eveowners.Get(agentID).name, GetListEntry('AgentEntry', {'charID': agentID,
'missionState': missionState})))
scrolllist += SortListOfTuples(sortlist)
if self.destroyed:
return
self.agentScroll.Load(fixedEntryHeight=40, contentList=scrolllist)
except:
log.LogException()
sys.exc_clear()
def InteractWithAgent(self, agentID, *args):
sm.StartService('agents').InteractWith(agentID)
def SetHQ(self, *args):
if sm.GetService('godma').GetType(eve.stationItem.stationTypeID).isPlayerOwnable == 1:
raise UserError('CanNotSetHQAtPlayerOwnedStation')
if eve.Message('MoveHQHere', {}, uiconst.YESNO) == uiconst.ID_YES:
sm.GetService('corp').GetCorpStationManager().MoveCorpHQHere()
def RentOffice(self, *args):
if not self.sr.Get('isRentOfficeOpening') or not self.sr.isRentOfficeOpening:
self.sr.isRentOfficeOpening = 1
try:
cost = sm.GetService('corp').GetCorpStationManager().GetQuoteForRentingAnOffice()
if eve.Message('AskPayOfficeRentalFee', {'cost': cost,
'duration': const.rentalPeriodOffice * const.DAY}, uiconst.YESNO) == uiconst.ID_YES:
officeID = sm.GetService('corp').GetCorpStationManager().RentOffice(cost)
if officeID:
office = sm.GetService('corp').GetOffice()
invCache = sm.GetService('invCache')
invCache.InvalidateLocationCache(officeID)
if office is not None:
folder = invCache.GetInventoryFromId(office.officeFolderID, locationID=session.stationid2)
folder.List()
wnd = InventoryWindow.GetIfOpen()
if not wnd:
InventoryWindow.OpenOrShow()
uthread.new(self.LoadButtons)
if self.selectedGroupButtonID == OFFICESPANEL:
self.ShowOffices()
finally:
self.sr.isRentOfficeOpening = 0
def ShowShips(self):
if self.sr.shipsContainer is None:
return
self.mainButtonGroup.SelectByID(INVENTORYPANEL)
self.inventoryTabs.ShowPanel(self.sr.shipsContainer)
def ShowItems(self):
if self.sr.itemsContainer is None:
return
self.mainButtonGroup.SelectByID(INVENTORYPANEL)
self.inventoryTabs.ShowPanel(self.sr.itemsContainer)
def ReloadOfficesIfVisible(self):
if self.selectedGroupButtonID == OFFICESPANEL:
self.ShowOffices()
def ShowOffices(self):
if self.selectedGroupButtonID != OFFICESPANEL:
return
self.LoadButtons()
corpsWithOffices = sm.GetService('corp').GetCorporationsWithOfficesAtStation()
cfg.corptickernames.Prime([ c.corporationID for c in corpsWithOffices ])
scrolllist = []
for corp in corpsWithOffices:
data = KeyVal()
data.corpName = corp.corporationName
data.corpID = corp.corporationID
data.corporation = corp
scrolllist.append((data.corpName.lower(), GetListEntry('OfficeEntry', data=data)))
scrolllist = SortListOfTuples(scrolllist)
numUnrentedOffices = self.GetNumberOfUnrentedOffices()
availOfficesLabel = localization.GetByLabel('UI/Station/Lobby/NumAvailableOffices', numOffices=numUnrentedOffices)
scrolllist.insert(0, GetListEntry('Header', {'label': availOfficesLabel}))
if not self.destroyed:
self.officesScroll.Load(contentList=scrolllist)
def GetNumberOfUnrentedOffices(self):
return sm.GetService('corp').GetCorpStationManager().GetNumberOfUnrentedOffices()
def OnCorporationMemberChanged(self, corporationID, memberID, change):
if memberID == session.charid:
self.LoadButtons()
def StopAllBlinkButtons(self):
for each in self.serviceButtons.children:
if hasattr(each, 'Blink'):
each.Blink(0)
def BlinkButton(self, whatBtn):
for each in self.serviceButtons.children:
if each.name.lower() == whatBtn.lower():
each.Blink(blinks=40)
class OfficeEntry(SE_BaseClassCore):
__guid__ = 'listentry.OfficeEntry'
def Startup(self, *args):
self.Flush()
main = Container(parent=self, align=uiconst.TOTOP, height=30, state=uiconst.UI_PICKCHILDREN)
left = Container(parent=main, align=uiconst.TOLEFT, width=50, state=uiconst.UI_PICKCHILDREN)
icon = Container(parent=left, align=uiconst.TOPLEFT, width=32, height=32, left=3, top=3, state=uiconst.UI_PICKCHILDREN)
par = Container(parent=main, align=uiconst.TOTOP, height=17, state=uiconst.UI_PICKCHILDREN)
label = localization.GetByLabel('UI/Station/Lobby/CorpName')
fieldName = 'corpName'
l = EveLabelSmall(text=label, parent=par, left=5, top=2, state=uiconst.UI_DISABLED)
t = EveLabelMedium(text='', parent=par, left=5, state=uiconst.UI_NORMAL)
setattr(self.sr, fieldName + '_Label', l)
setattr(self.sr, fieldName + '_Text', t)
setattr(self.sr, fieldName, par)
LineThemeColored(parent=self, align=uiconst.TOBOTTOM)
self.sr.buttonCnt = Container(parent=self, align=uiconst.TOTOP, height=25, state=uiconst.UI_HIDDEN)
self.sr.icon = icon
self.sr.main = main
self.sr.infoicon = InfoIcon(left=32, top=3, parent=left, idx=0)
def Load(self, node):
self.sr.node = node
data = node
self.sr.infoicon.UpdateInfoLink(const.typeCorporation, data.corpID)
mainHeight = 0
fieldName = 'corpName'
infofield = self.sr.Get(fieldName, None)
fieldText = self.sr.Get(fieldName + '_Text', None)
fieldLabel = self.sr.Get(fieldName + '_Label', None)
fieldText.text = data.Get(fieldName, '')
fieldText.top = fieldLabel.textheight
infofield.height = fieldText.top + fieldText.textheight + 2
if infofield.state != uiconst.UI_HIDDEN:
mainHeight += infofield.height
self.sr.main.height = mainHeight + 10
self.sr.icon.Flush()
def LogoThread():
if not self.destroyed:
GetLogoIcon(itemID=data.corpID, parent=self.sr.icon, acceptNone=False, align=uiconst.TOALL)
uthread.new(LogoThread)
self.sr.buttonCnt.Flush()
if not IsNPC(node.corpID):
buttonEntries = []
if eve.session.corpid != node.corpID:
if sm.GetService('corp').GetActiveApplication(node.corpID) is not None:
applyLabel = localization.GetByLabel('UI/Corporations/CorpApplications/ViewApplication')
else:
applyLabel = localization.GetByLabel('UI/Corporations/CorporationWindow/Alliances/Rankings/ApplyToJoin')
buttonEntries.append((applyLabel,
sm.GetService('corp').ApplyForMembership,
(node.corpID,),
80))
if len(buttonEntries) > 0:
self.sr.buttonCnt.state = uiconst.UI_PICKCHILDREN
self.sr.buttons = ButtonGroup(btns=buttonEntries, parent=self.sr.buttonCnt, unisize=0, line=0)
self.sr.buttons.top -= 1
else:
self.sr.buttonCnt.state = uiconst.UI_PICKCHILDREN
else:
self.sr.buttonCnt.state = uiconst.UI_HIDDEN
def GetHeight(self, *args):
node, width = args
height = 2
lw, lh = EveLabelSmall.MeasureTextSize(text=localization.GetByLabel('UI/Station/Lobby/CorpName'))
tw, th = EveLabelMedium.MeasureTextSize(text=node.corpName)
multiplier = 1
height += (lh + th + 15) * multiplier
height += 5
if not IsNPC(node.corpID) and eve.session.corpid != node.corpID:
height += 30
node.height = height
return node.height
| [
"[email protected]"
] | |
1fced1f8cce087c0bbfdd61d98a7820b2eeef5ec | 419637376e445ec9faf04c877d5fb6c09d15903f | /steam/user/order/userCancelOrderActivityTest.py | 76c1e3a715c556e511a7af0fd3bc03f9bc9ddcd7 | [] | no_license | litaojun/steamOmTest | e4203df30acafaa5e282631d77429c0e4483fb88 | 86f84dbd802d947198823e02c2f1ba2695418a76 | refs/heads/master | 2020-04-02T21:48:55.115389 | 2019-07-11T06:08:27 | 2019-07-11T06:08:27 | 154,812,217 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,340 | py | #!/usr/bin/env python
# encoding: utf-8
"""
@version: v1.0
@author: Lieb
@license: Apache Licence
@contact: [email protected]
@site: http://blog.csdn.net/hqzxsc2006
@software: PyCharm
@file: userCancelOrderActivityTest.py
@time: 2018/7/11 10:09
"""
from steam.util.steamLog import SteamTestCase
from opg.bak.testcaseRunMgr import runTestOneCls
from steam.user.member.memberAddressService import MemberAddressService
from steam.user.order.userCancelOrderActivityService import UserCancelOrderActivityService
from steam.user.order.userOrederActivityService import UserOrderActivityService
from steam.user.weixin.userViewActivityService import UserViewActivityService
from steam.user.search.weixinSearchService import WeixinSearchService
from steam.util.testJsonFormat import initInputService
class UserCancelOrderActivityTest(SteamTestCase):
'''
取消点赞
'''
__interfaceName__ = "/order-service/order/cancel"
@initInputService( services = [ WeixinSearchService ,
UserViewActivityService ,
MemberAddressService ,
UserOrderActivityService ] ,
curser = UserCancelOrderActivityService )
def __init__(self, methodName='runTest', param=None):
super(UserCancelOrderActivityTest,self).__init__(methodName,param)
# def userCancelOrderActivity(self):
# userCancelOrderRsp = self.myservice.userCancelOrderActivity()
# retcode = self.myservice.getRetcodeByOrderRsp(response=userCancelOrderRsp)
# self.assertTrue(retcode == self.expectdata["code"])
if __name__ == "__main__":
kwarg = {
"memberId": "09c1316f-b304-46b1-96ff-c9ebbd93a617" ,
"resourceTypeId":12 ,
"title":"早鸟价!呼伦贝尔|私家牧场任你驰骋策马,原始森林徒步猎奇" ,
"skuName":"价格(成人)" ,
"skuId":1 ,
"resourceId":1
}
runTestOneCls(
casefilepath = "\\steamcase\\user\\order-serviceordercancels.yml",
testclse = UserCancelOrderActivityTest
) | [
"[email protected]"
] | |
aefe209940bc4b34c6bc1de86f72876bf4394890 | 4838552992476399d0452a92d0a38aa9b8b29c63 | /books/serializers.py | 9bcac01e22850c80eaea8102c4e89f7fa859d736 | [] | no_license | sannycand/books | 16cee4d0f8c1a1a4a52108fd0403c258620e146a | 593d77ccd1f4b68be0a5ed44adb495c034bea2a1 | refs/heads/develop | 2020-04-05T13:31:09.296128 | 2017-06-20T01:59:53 | 2017-06-20T01:59:53 | 94,852,739 | 0 | 0 | null | 2017-06-20T05:23:40 | 2017-06-20T05:23:40 | null | UTF-8 | Python | false | false | 412 | py | from rest_framework import serializers
from .models import Book, Review
class BookSerializer(serializers.ModelSerializer):
""" book serializer
"""
reviews = serializers.SerializerMethodField()
class Meta:
model = Book
fields = ('__all__')
def get_reviews(self, instance):
return Review.objects.filter(book=instance) \
.values_list('id', flat=True) | [
"[email protected]"
] | |
3b4b6347e2535618b4e084f8032df2acc602fed0 | e2de3f6fe4373f1d98b67af61dd558a813250d54 | /Algorithm/baekjoon/11726_2xn타일링.py | b6bc38b1ab6cf1a8f42e411b2e97006abbff0e4d | [] | no_license | Hansung-Lee/TIL | 3fd6d48427a8b24f7889116297143855d493535b | c24ebab8b631f5c1b835fdc8bd036acbebc8d187 | refs/heads/master | 2020-04-14T11:18:54.035863 | 2019-04-05T07:26:55 | 2019-04-05T07:26:55 | 163,810,436 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 105 | py | N = int(input())
li = [0,1,2,3]
for n in range(N-3):
li.append(li[-1] + li[-2])
print(li[N]%10007) | [
"[email protected]"
] | |
7a74b09d2af23b609529ac816704e8a2d47f99c9 | efae79bc4b6771191c612b121a420f898356ae77 | /tests/_support/py_conf/fabfile.py | 502c718f919618cab02d64fcbad3831bad9e585f | [
"BSD-2-Clause"
] | permissive | WinstonN/fabric | bd0d7c432d22501022495663a1ee3a385c9b045a | 34edfe4a1cc9e3d72c3cc41490673db29afe7bbb | refs/heads/2.0 | 2020-12-22T17:03:27.644562 | 2020-01-10T21:20:58 | 2020-01-10T21:20:58 | 236,866,877 | 0 | 0 | BSD-2-Clause | 2020-01-30T05:06:49 | 2020-01-28T23:40:23 | null | UTF-8 | Python | true | false | 92 | py | from invoke import task
@task
def expect_conf_value(c):
assert c.it_came_from == "py"
| [
"[email protected]"
] | |
d92958e24ff62b6981e69327f0d73818051e91d6 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_135/3035.py | cf3cfe0ebd3b61e847863f968039e9fe32db8586 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 656 | py | #! /usr/bin/env python
T = int(raw_input())
t = 1
while t <= T:
a = int(raw_input())
i = 0
while i < a-1:
raw_input()
i += 1
r1 = set(raw_input().split(" "))
while i < 3:
raw_input()
i += 1
a = int(raw_input())
i = 0
while i < a-1:
raw_input()
i += 1
r2 = set(raw_input().split(" "))
c = r1.intersection(r2)
lc = len(c)
if lc == 1:
print "Case #%s: %s"%(t, c.pop())
elif lc == 0:
print "Case #%s: Volunteer cheated!" % t
else:
print "Case #%s: Bad magician!" % t
while i < 3:
raw_input()
i += 1
t += 1
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.