file_path
stringlengths
20
207
content
stringlengths
5
3.85M
size
int64
5
3.85M
lang
stringclasses
9 values
avg_line_length
float64
1.33
100
max_line_length
int64
4
993
alphanum_fraction
float64
0.26
0.93
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/watchdog/observers/fsevents.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2011 Yesudeep Mangalapilly <[email protected]> # Copyright 2012 Google, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ :module: watchdog.observers.fsevents :synopsis: FSEvents based emitter implementation. :author: [email protected] (Yesudeep Mangalapilly) :platforms: Mac OS X """ from __future__ import with_statement import os import sys import threading import unicodedata import _watchdog_fsevents as _fsevents from watchdog.events import ( FileDeletedEvent, FileModifiedEvent, FileCreatedEvent, FileMovedEvent, DirDeletedEvent, DirModifiedEvent, DirCreatedEvent, DirMovedEvent ) from watchdog.observers.api import ( BaseObserver, EventEmitter, DEFAULT_EMITTER_TIMEOUT, DEFAULT_OBSERVER_TIMEOUT ) class FSEventsEmitter(EventEmitter): """ Mac OS X FSEvents Emitter class. :param event_queue: The event queue to fill with events. :param watch: A watch object representing the directory to monitor. :type watch: :class:`watchdog.observers.api.ObservedWatch` :param timeout: Read events blocking timeout (in seconds). :type timeout: ``float`` """ def __init__(self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT): EventEmitter.__init__(self, event_queue, watch, timeout) self._lock = threading.Lock() def on_thread_stop(self): if self.watch: _fsevents.remove_watch(self.watch) _fsevents.stop(self) self._watch = None def queue_events(self, timeout): with self._lock: events = self.native_events i = 0 while i < len(events): event = events[i] # For some reason the create and remove flags are sometimes also # set for rename and modify type events, so let those take # precedence. if event.is_renamed: # Internal moves appears to always be consecutive in the same # buffer and have IDs differ by exactly one (while others # don't) making it possible to pair up the two events coming # from a singe move operation. (None of this is documented!) # Otherwise, guess whether file was moved in or out. # TODO: handle id wrapping if (i + 1 < len(events) and events[i + 1].is_renamed and events[i + 1].event_id == event.event_id + 1): cls = DirMovedEvent if event.is_directory else FileMovedEvent self.queue_event(cls(event.path, events[i + 1].path)) self.queue_event(DirModifiedEvent(os.path.dirname(event.path))) self.queue_event(DirModifiedEvent(os.path.dirname(events[i + 1].path))) i += 1 elif os.path.exists(event.path): cls = DirCreatedEvent if event.is_directory else FileCreatedEvent self.queue_event(cls(event.path)) self.queue_event(DirModifiedEvent(os.path.dirname(event.path))) else: cls = DirDeletedEvent if event.is_directory else FileDeletedEvent self.queue_event(cls(event.path)) self.queue_event(DirModifiedEvent(os.path.dirname(event.path))) # TODO: generate events for tree elif event.is_modified or event.is_inode_meta_mod or event.is_xattr_mod : cls = DirModifiedEvent if event.is_directory else FileModifiedEvent self.queue_event(cls(event.path)) elif event.is_created: cls = DirCreatedEvent if event.is_directory else FileCreatedEvent self.queue_event(cls(event.path)) self.queue_event(DirModifiedEvent(os.path.dirname(event.path))) elif event.is_removed: cls = DirDeletedEvent if event.is_directory else FileDeletedEvent self.queue_event(cls(event.path)) self.queue_event(DirModifiedEvent(os.path.dirname(event.path))) i += 1 def run(self): try: def callback(pathnames, flags, ids, emitter=self): with emitter._lock: emitter.native_events = [ _fsevents.NativeEvent(event_path, event_flags, event_id) for event_path, event_flags, event_id in zip(pathnames, flags, ids) ] emitter.queue_events(emitter.timeout) # for pathname, flag in zip(pathnames, flags): # if emitter.watch.is_recursive: # and pathname != emitter.watch.path: # new_sub_snapshot = DirectorySnapshot(pathname, True) # old_sub_snapshot = self.snapshot.copy(pathname) # diff = new_sub_snapshot - old_sub_snapshot # self.snapshot += new_subsnapshot # else: # new_snapshot = DirectorySnapshot(emitter.watch.path, False) # diff = new_snapshot - emitter.snapshot # emitter.snapshot = new_snapshot # INFO: FSEvents reports directory notifications recursively # by default, so we do not need to add subdirectory paths. # pathnames = set([self.watch.path]) # if self.watch.is_recursive: # for root, directory_names, _ in os.walk(self.watch.path): # for directory_name in directory_names: # full_path = absolute_path( # os.path.join(root, directory_name)) # pathnames.add(full_path) self.pathnames = [self.watch.path] _fsevents.add_watch(self, self.watch, callback, self.pathnames) _fsevents.read_events(self) except Exception: pass class FSEventsObserver(BaseObserver): def __init__(self, timeout=DEFAULT_OBSERVER_TIMEOUT): BaseObserver.__init__(self, emitter_class=FSEventsEmitter, timeout=timeout) def schedule(self, event_handler, path, recursive=False): # Python 2/3 compat try: str_class = unicode except NameError: str_class = str # Fix for issue #26: Trace/BPT error when given a unicode path # string. https://github.com/gorakhargosh/watchdog/issues#issue/26 if isinstance(path, str_class): # path = unicode(path, 'utf-8') path = unicodedata.normalize('NFC', path) # We only encode the path in Python 2 for backwards compatibility. # On Python 3 we want the path to stay as unicode if possible for # the sake of path matching not having to be rewritten to use the # bytes API instead of strings. The _watchdog_fsevent.so code for # Python 3 can handle both str and bytes paths, which is why we # do not HAVE to encode it with Python 3. The Python 2 code in # _watchdog_fsevents.so was not changed for the sake of backwards # compatibility. if sys.version_info < (3,): path = path.encode('utf-8') return BaseObserver.schedule(self, event_handler, path, recursive)
8,156
Python
40.196969
95
0.584723
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/watchdog/observers/winapi.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # winapi.py: Windows API-Python interface (removes dependency on pywin32) # # Copyright (C) 2007 Thomas Heller <[email protected]> # Copyright (C) 2010 Will McGugan <[email protected]> # Copyright (C) 2010 Ryan Kelly <[email protected]> # Copyright (C) 2010 Yesudeep Mangalapilly <[email protected]> # Copyright (C) 2014 Thomas Amland # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and / or other materials provided with the distribution. # * Neither the name of the organization nor the names of its contributors may # be used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # # Portions of this code were taken from pyfilesystem, which uses the above # new BSD license. import ctypes.wintypes from functools import reduce LPVOID = ctypes.wintypes.LPVOID # Invalid handle value. INVALID_HANDLE_VALUE = ctypes.c_void_p(-1).value # File notification constants. FILE_NOTIFY_CHANGE_FILE_NAME = 0x01 FILE_NOTIFY_CHANGE_DIR_NAME = 0x02 FILE_NOTIFY_CHANGE_ATTRIBUTES = 0x04 FILE_NOTIFY_CHANGE_SIZE = 0x08 FILE_NOTIFY_CHANGE_LAST_WRITE = 0x010 FILE_NOTIFY_CHANGE_LAST_ACCESS = 0x020 FILE_NOTIFY_CHANGE_CREATION = 0x040 FILE_NOTIFY_CHANGE_SECURITY = 0x0100 FILE_FLAG_BACKUP_SEMANTICS = 0x02000000 FILE_FLAG_OVERLAPPED = 0x40000000 FILE_LIST_DIRECTORY = 1 FILE_SHARE_READ = 0x01 FILE_SHARE_WRITE = 0x02 FILE_SHARE_DELETE = 0x04 OPEN_EXISTING = 3 VOLUME_NAME_NT = 0x02 # File action constants. FILE_ACTION_CREATED = 1 FILE_ACTION_DELETED = 2 FILE_ACTION_MODIFIED = 3 FILE_ACTION_RENAMED_OLD_NAME = 4 FILE_ACTION_RENAMED_NEW_NAME = 5 FILE_ACTION_DELETED_SELF = 0xFFFE FILE_ACTION_OVERFLOW = 0xFFFF # Aliases FILE_ACTION_ADDED = FILE_ACTION_CREATED FILE_ACTION_REMOVED = FILE_ACTION_DELETED FILE_ACTION_REMOVED_SELF = FILE_ACTION_DELETED_SELF THREAD_TERMINATE = 0x0001 # IO waiting constants. WAIT_ABANDONED = 0x00000080 WAIT_IO_COMPLETION = 0x000000C0 WAIT_OBJECT_0 = 0x00000000 WAIT_TIMEOUT = 0x00000102 # Error codes ERROR_OPERATION_ABORTED = 995 class OVERLAPPED(ctypes.Structure): _fields_ = [('Internal', LPVOID), ('InternalHigh', LPVOID), ('Offset', ctypes.wintypes.DWORD), ('OffsetHigh', ctypes.wintypes.DWORD), ('Pointer', LPVOID), ('hEvent', ctypes.wintypes.HANDLE), ] def _errcheck_bool(value, func, args): if not value: raise ctypes.WinError() return args def _errcheck_handle(value, func, args): if not value: raise ctypes.WinError() if value == INVALID_HANDLE_VALUE: raise ctypes.WinError() return args def _errcheck_dword(value, func, args): if value == 0xFFFFFFFF: raise ctypes.WinError() return args kernel32 = ctypes.WinDLL("kernel32") ReadDirectoryChangesW = kernel32.ReadDirectoryChangesW ReadDirectoryChangesW.restype = ctypes.wintypes.BOOL ReadDirectoryChangesW.errcheck = _errcheck_bool ReadDirectoryChangesW.argtypes = ( ctypes.wintypes.HANDLE, # hDirectory LPVOID, # lpBuffer ctypes.wintypes.DWORD, # nBufferLength ctypes.wintypes.BOOL, # bWatchSubtree ctypes.wintypes.DWORD, # dwNotifyFilter ctypes.POINTER(ctypes.wintypes.DWORD), # lpBytesReturned ctypes.POINTER(OVERLAPPED), # lpOverlapped LPVOID # FileIOCompletionRoutine # lpCompletionRoutine ) CreateFileW = kernel32.CreateFileW CreateFileW.restype = ctypes.wintypes.HANDLE CreateFileW.errcheck = _errcheck_handle CreateFileW.argtypes = ( ctypes.wintypes.LPCWSTR, # lpFileName ctypes.wintypes.DWORD, # dwDesiredAccess ctypes.wintypes.DWORD, # dwShareMode LPVOID, # lpSecurityAttributes ctypes.wintypes.DWORD, # dwCreationDisposition ctypes.wintypes.DWORD, # dwFlagsAndAttributes ctypes.wintypes.HANDLE # hTemplateFile ) CloseHandle = kernel32.CloseHandle CloseHandle.restype = ctypes.wintypes.BOOL CloseHandle.argtypes = ( ctypes.wintypes.HANDLE, # hObject ) CancelIoEx = kernel32.CancelIoEx CancelIoEx.restype = ctypes.wintypes.BOOL CancelIoEx.errcheck = _errcheck_bool CancelIoEx.argtypes = ( ctypes.wintypes.HANDLE, # hObject ctypes.POINTER(OVERLAPPED) # lpOverlapped ) CreateEvent = kernel32.CreateEventW CreateEvent.restype = ctypes.wintypes.HANDLE CreateEvent.errcheck = _errcheck_handle CreateEvent.argtypes = ( LPVOID, # lpEventAttributes ctypes.wintypes.BOOL, # bManualReset ctypes.wintypes.BOOL, # bInitialState ctypes.wintypes.LPCWSTR, # lpName ) SetEvent = kernel32.SetEvent SetEvent.restype = ctypes.wintypes.BOOL SetEvent.errcheck = _errcheck_bool SetEvent.argtypes = ( ctypes.wintypes.HANDLE, # hEvent ) WaitForSingleObjectEx = kernel32.WaitForSingleObjectEx WaitForSingleObjectEx.restype = ctypes.wintypes.DWORD WaitForSingleObjectEx.errcheck = _errcheck_dword WaitForSingleObjectEx.argtypes = ( ctypes.wintypes.HANDLE, # hObject ctypes.wintypes.DWORD, # dwMilliseconds ctypes.wintypes.BOOL, # bAlertable ) CreateIoCompletionPort = kernel32.CreateIoCompletionPort CreateIoCompletionPort.restype = ctypes.wintypes.HANDLE CreateIoCompletionPort.errcheck = _errcheck_handle CreateIoCompletionPort.argtypes = ( ctypes.wintypes.HANDLE, # FileHandle ctypes.wintypes.HANDLE, # ExistingCompletionPort LPVOID, # CompletionKey ctypes.wintypes.DWORD, # NumberOfConcurrentThreads ) GetQueuedCompletionStatus = kernel32.GetQueuedCompletionStatus GetQueuedCompletionStatus.restype = ctypes.wintypes.BOOL GetQueuedCompletionStatus.errcheck = _errcheck_bool GetQueuedCompletionStatus.argtypes = ( ctypes.wintypes.HANDLE, # CompletionPort LPVOID, # lpNumberOfBytesTransferred LPVOID, # lpCompletionKey ctypes.POINTER(OVERLAPPED), # lpOverlapped ctypes.wintypes.DWORD, # dwMilliseconds ) PostQueuedCompletionStatus = kernel32.PostQueuedCompletionStatus PostQueuedCompletionStatus.restype = ctypes.wintypes.BOOL PostQueuedCompletionStatus.errcheck = _errcheck_bool PostQueuedCompletionStatus.argtypes = ( ctypes.wintypes.HANDLE, # CompletionPort ctypes.wintypes.DWORD, # lpNumberOfBytesTransferred ctypes.wintypes.DWORD, # lpCompletionKey ctypes.POINTER(OVERLAPPED), # lpOverlapped ) GetFinalPathNameByHandleW = kernel32.GetFinalPathNameByHandleW GetFinalPathNameByHandleW.restype = ctypes.wintypes.DWORD GetFinalPathNameByHandleW.errcheck = _errcheck_dword GetFinalPathNameByHandleW.argtypes = ( ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.LPWSTR, # lpszFilePath ctypes.wintypes.DWORD, # cchFilePath ctypes.wintypes.DWORD, # DWORD ) class FILE_NOTIFY_INFORMATION(ctypes.Structure): _fields_ = [("NextEntryOffset", ctypes.wintypes.DWORD), ("Action", ctypes.wintypes.DWORD), ("FileNameLength", ctypes.wintypes.DWORD), # ("FileName", (ctypes.wintypes.WCHAR * 1))] ("FileName", (ctypes.c_char * 1))] LPFNI = ctypes.POINTER(FILE_NOTIFY_INFORMATION) # We don't need to recalculate these flags every time a call is made to # the win32 API functions. WATCHDOG_FILE_FLAGS = FILE_FLAG_BACKUP_SEMANTICS WATCHDOG_FILE_SHARE_FLAGS = reduce( lambda x, y: x | y, [ FILE_SHARE_READ, FILE_SHARE_WRITE, FILE_SHARE_DELETE, ]) WATCHDOG_FILE_NOTIFY_FLAGS = reduce( lambda x, y: x | y, [ FILE_NOTIFY_CHANGE_FILE_NAME, FILE_NOTIFY_CHANGE_DIR_NAME, FILE_NOTIFY_CHANGE_ATTRIBUTES, FILE_NOTIFY_CHANGE_SIZE, FILE_NOTIFY_CHANGE_LAST_WRITE, FILE_NOTIFY_CHANGE_SECURITY, FILE_NOTIFY_CHANGE_LAST_ACCESS, FILE_NOTIFY_CHANGE_CREATION, ]) BUFFER_SIZE = 2048 def _parse_event_buffer(readBuffer, nBytes): results = [] while nBytes > 0: fni = ctypes.cast(readBuffer, LPFNI)[0] ptr = ctypes.addressof(fni) + FILE_NOTIFY_INFORMATION.FileName.offset # filename = ctypes.wstring_at(ptr, fni.FileNameLength) filename = ctypes.string_at(ptr, fni.FileNameLength) results.append((fni.Action, filename.decode('utf-16'))) numToSkip = fni.NextEntryOffset if numToSkip <= 0: break readBuffer = readBuffer[numToSkip:] nBytes -= numToSkip # numToSkip is long. nBytes should be long too. return results def _is_observed_path_deleted(handle, path): # Comparison of observed path and actual path, returned by # GetFinalPathNameByHandleW. If directory moved to the trash bin, or # deleted, actual path will not be equal to observed path. buff = ctypes.create_unicode_buffer(BUFFER_SIZE) GetFinalPathNameByHandleW(handle, buff, BUFFER_SIZE, VOLUME_NAME_NT) return buff.value != path def _generate_observed_path_deleted_event(): # Create synthetic event for notify that observed directory is deleted path = ctypes.create_unicode_buffer('.') event = FILE_NOTIFY_INFORMATION(0, FILE_ACTION_DELETED_SELF, len(path), path.value.encode("utf-8")) event_size = ctypes.sizeof(event) buff = ctypes.create_string_buffer(BUFFER_SIZE) ctypes.memmove(buff, ctypes.addressof(event), event_size) return buff, event_size def get_directory_handle(path): """Returns a Windows handle to the specified directory path.""" return CreateFileW(path, FILE_LIST_DIRECTORY, WATCHDOG_FILE_SHARE_FLAGS, None, OPEN_EXISTING, WATCHDOG_FILE_FLAGS, None) def close_directory_handle(handle): try: CancelIoEx(handle, None) # force ReadDirectoryChangesW to return CloseHandle(handle) # close directory handle except WindowsError: try: CloseHandle(handle) # close directory handle except Exception: return def read_directory_changes(handle, path, recursive): """Read changes to the directory using the specified directory handle. http://timgolden.me.uk/pywin32-docs/win32file__ReadDirectoryChangesW_meth.html """ event_buffer = ctypes.create_string_buffer(BUFFER_SIZE) nbytes = ctypes.wintypes.DWORD() try: ReadDirectoryChangesW(handle, ctypes.byref(event_buffer), len(event_buffer), recursive, WATCHDOG_FILE_NOTIFY_FLAGS, ctypes.byref(nbytes), None, None) except WindowsError as e: if e.winerror == ERROR_OPERATION_ABORTED: return [], 0 # Handle the case when the root path is deleted if _is_observed_path_deleted(handle, path): return _generate_observed_path_deleted_event() raise e # Python 2/3 compat try: int_class = long except NameError: int_class = int return event_buffer.raw, int_class(nbytes.value) class WinAPINativeEvent(object): def __init__(self, action, src_path): self.action = action self.src_path = src_path @property def is_added(self): return self.action == FILE_ACTION_CREATED @property def is_removed(self): return self.action == FILE_ACTION_REMOVED @property def is_modified(self): return self.action == FILE_ACTION_MODIFIED @property def is_renamed_old(self): return self.action == FILE_ACTION_RENAMED_OLD_NAME @property def is_renamed_new(self): return self.action == FILE_ACTION_RENAMED_NEW_NAME @property def is_removed_self(self): return self.action == FILE_ACTION_REMOVED_SELF def __repr__(self): return ("<%s: action=%d, src_path=%r>" % ( type(self).__name__, self.action, self.src_path)) def read_events(handle, path, recursive): buf, nbytes = read_directory_changes(handle, path, recursive) events = _parse_event_buffer(buf, nbytes) return [WinAPINativeEvent(action, src_path) for action, src_path in events]
13,063
Python
32.497436
103
0.714384
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/watchdog/observers/polling.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2011 Yesudeep Mangalapilly <[email protected]> # Copyright 2012 Google, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ :module: watchdog.observers.polling :synopsis: Polling emitter implementation. :author: [email protected] (Yesudeep Mangalapilly) Classes ------- .. autoclass:: PollingObserver :members: :show-inheritance: .. autoclass:: PollingObserverVFS :members: :show-inheritance: :special-members: """ from __future__ import with_statement import threading from functools import partial from watchdog.utils import stat as default_stat from watchdog.utils.dirsnapshot import DirectorySnapshot, DirectorySnapshotDiff from watchdog.observers.api import ( EventEmitter, BaseObserver, DEFAULT_OBSERVER_TIMEOUT, DEFAULT_EMITTER_TIMEOUT ) from watchdog.events import ( DirMovedEvent, DirDeletedEvent, DirCreatedEvent, DirModifiedEvent, FileMovedEvent, FileDeletedEvent, FileCreatedEvent, FileModifiedEvent ) try: from os import scandir except ImportError: from os import listdir as scandir class PollingEmitter(EventEmitter): """ Platform-independent emitter that polls a directory to detect file system changes. """ def __init__(self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT, stat=default_stat, listdir=scandir): EventEmitter.__init__(self, event_queue, watch, timeout) self._snapshot = None self._lock = threading.Lock() self._take_snapshot = lambda: DirectorySnapshot( self.watch.path, self.watch.is_recursive, stat=stat, listdir=listdir) def on_thread_start(self): self._snapshot = self._take_snapshot() def queue_events(self, timeout): # We don't want to hit the disk continuously. # timeout behaves like an interval for polling emitters. if self.stopped_event.wait(timeout): return with self._lock: if not self.should_keep_running(): return # Get event diff between fresh snapshot and previous snapshot. # Update snapshot. try: new_snapshot = self._take_snapshot() except OSError: self.queue_event(DirDeletedEvent(self.watch.path)) self.stop() return events = DirectorySnapshotDiff(self._snapshot, new_snapshot) self._snapshot = new_snapshot # Files. for src_path in events.files_deleted: self.queue_event(FileDeletedEvent(src_path)) for src_path in events.files_modified: self.queue_event(FileModifiedEvent(src_path)) for src_path in events.files_created: self.queue_event(FileCreatedEvent(src_path)) for src_path, dest_path in events.files_moved: self.queue_event(FileMovedEvent(src_path, dest_path)) # Directories. for src_path in events.dirs_deleted: self.queue_event(DirDeletedEvent(src_path)) for src_path in events.dirs_modified: self.queue_event(DirModifiedEvent(src_path)) for src_path in events.dirs_created: self.queue_event(DirCreatedEvent(src_path)) for src_path, dest_path in events.dirs_moved: self.queue_event(DirMovedEvent(src_path, dest_path)) class PollingObserver(BaseObserver): """ Platform-independent observer that polls a directory to detect file system changes. """ def __init__(self, timeout=DEFAULT_OBSERVER_TIMEOUT): BaseObserver.__init__(self, emitter_class=PollingEmitter, timeout=timeout) class PollingObserverVFS(BaseObserver): """ File system independent observer that polls a directory to detect changes. """ def __init__(self, stat, listdir, polling_interval=1): """ :param stat: stat function. See ``os.stat`` for details. :param listdir: listdir function. See ``os.listdir`` for details. :type polling_interval: float :param polling_interval: interval in seconds between polling the file system. """ emitter_cls = partial(PollingEmitter, stat=stat, listdir=listdir) BaseObserver.__init__(self, emitter_class=emitter_cls, timeout=polling_interval)
4,929
Python
31.866666
88
0.661392
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/watchdog/observers/inotify.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2011 Yesudeep Mangalapilly <[email protected]> # Copyright 2012 Google, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ :module: watchdog.observers.inotify :synopsis: ``inotify(7)`` based emitter implementation. :author: Sebastien Martini <[email protected]> :author: Luke McCarthy <[email protected]> :author: [email protected] (Yesudeep Mangalapilly) :author: Tim Cuthbertson <[email protected]> :platforms: Linux 2.6.13+. .. ADMONITION:: About system requirements Recommended minimum kernel version: 2.6.25. Quote from the inotify(7) man page: "Inotify was merged into the 2.6.13 Linux kernel. The required library interfaces were added to glibc in version 2.4. (IN_DONT_FOLLOW, IN_MASK_ADD, and IN_ONLYDIR were only added in version 2.5.)" Therefore, you must ensure the system is running at least these versions appropriate libraries and the kernel. .. ADMONITION:: About recursiveness, event order, and event coalescing Quote from the inotify(7) man page: If successive output inotify events produced on the inotify file descriptor are identical (same wd, mask, cookie, and name) then they are coalesced into a single event if the older event has not yet been read (but see BUGS). The events returned by reading from an inotify file descriptor form an ordered queue. Thus, for example, it is guaranteed that when renaming from one directory to another, events will be produced in the correct order on the inotify file descriptor. ... Inotify monitoring of directories is not recursive: to monitor subdirectories under a directory, additional watches must be created. This emitter implementation therefore automatically adds watches for sub-directories if running in recursive mode. Some extremely useful articles and documentation: .. _inotify FAQ: http://inotify.aiken.cz/?section=inotify&page=faq&lang=en .. _intro to inotify: http://www.linuxjournal.com/article/8478 """ from __future__ import with_statement import os import threading from .inotify_buffer import InotifyBuffer from watchdog.observers.api import ( EventEmitter, BaseObserver, DEFAULT_EMITTER_TIMEOUT, DEFAULT_OBSERVER_TIMEOUT ) from watchdog.events import ( DirDeletedEvent, DirModifiedEvent, DirMovedEvent, DirCreatedEvent, FileDeletedEvent, FileModifiedEvent, FileMovedEvent, FileCreatedEvent, generate_sub_moved_events, generate_sub_created_events, ) from watchdog.utils import unicode_paths class InotifyEmitter(EventEmitter): """ inotify(7)-based event emitter. :param event_queue: The event queue to fill with events. :param watch: A watch object representing the directory to monitor. :type watch: :class:`watchdog.observers.api.ObservedWatch` :param timeout: Read events blocking timeout (in seconds). :type timeout: ``float`` """ def __init__(self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT): EventEmitter.__init__(self, event_queue, watch, timeout) self._lock = threading.Lock() self._inotify = None def on_thread_start(self): path = unicode_paths.encode(self.watch.path) self._inotify = InotifyBuffer(path, self.watch.is_recursive) def on_thread_stop(self): if self._inotify: self._inotify.close() def queue_events(self, timeout, full_events=False): # If "full_events" is true, then the method will report unmatched move events as separate events # This behavior is by default only called by a InotifyFullEmitter with self._lock: event = self._inotify.read_event() if event is None: return if isinstance(event, tuple): move_from, move_to = event src_path = self._decode_path(move_from.src_path) dest_path = self._decode_path(move_to.src_path) cls = DirMovedEvent if move_from.is_directory else FileMovedEvent self.queue_event(cls(src_path, dest_path)) self.queue_event(DirModifiedEvent(os.path.dirname(src_path))) self.queue_event(DirModifiedEvent(os.path.dirname(dest_path))) if move_from.is_directory and self.watch.is_recursive: for sub_event in generate_sub_moved_events(src_path, dest_path): self.queue_event(sub_event) return src_path = self._decode_path(event.src_path) if event.is_moved_to: if full_events: cls = DirMovedEvent if event.is_directory else FileMovedEvent self.queue_event(cls(None, src_path)) else: cls = DirCreatedEvent if event.is_directory else FileCreatedEvent self.queue_event(cls(src_path)) self.queue_event(DirModifiedEvent(os.path.dirname(src_path))) if event.is_directory and self.watch.is_recursive: for sub_event in generate_sub_created_events(src_path): self.queue_event(sub_event) elif event.is_attrib: cls = DirModifiedEvent if event.is_directory else FileModifiedEvent self.queue_event(cls(src_path)) elif event.is_modify: cls = DirModifiedEvent if event.is_directory else FileModifiedEvent self.queue_event(cls(src_path)) elif event.is_delete or (event.is_moved_from and not full_events): cls = DirDeletedEvent if event.is_directory else FileDeletedEvent self.queue_event(cls(src_path)) self.queue_event(DirModifiedEvent(os.path.dirname(src_path))) elif event.is_moved_from and full_events: cls = DirMovedEvent if event.is_directory else FileMovedEvent self.queue_event(cls(src_path, None)) self.queue_event(DirModifiedEvent(os.path.dirname(src_path))) elif event.is_create: cls = DirCreatedEvent if event.is_directory else FileCreatedEvent self.queue_event(cls(src_path)) self.queue_event(DirModifiedEvent(os.path.dirname(src_path))) def _decode_path(self, path): """ Decode path only if unicode string was passed to this emitter. """ if isinstance(self.watch.path, bytes): return path return unicode_paths.decode(path) class InotifyFullEmitter(InotifyEmitter): """ inotify(7)-based event emitter. By default this class produces move events even if they are not matched Such move events will have a ``None`` value for the unmatched part. :param event_queue: The event queue to fill with events. :param watch: A watch object representing the directory to monitor. :type watch: :class:`watchdog.observers.api.ObservedWatch` :param timeout: Read events blocking timeout (in seconds). :type timeout: ``float`` """ def __init__(self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT): InotifyEmitter.__init__(self, event_queue, watch, timeout) def queue_events(self, timeout, events=True): InotifyEmitter.queue_events(self, timeout, full_events=events) class InotifyObserver(BaseObserver): """ Observer thread that schedules watching directories and dispatches calls to event handlers. """ def __init__(self, timeout=DEFAULT_OBSERVER_TIMEOUT, generate_full_events=False): if (generate_full_events): BaseObserver.__init__(self, emitter_class=InotifyFullEmitter, timeout=timeout) else: BaseObserver.__init__(self, emitter_class=InotifyEmitter, timeout=timeout)
8,525
Python
37.754545
107
0.658534
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/watchdog/observers/fsevents2.py
# -*- coding: utf-8 -*- # # Copyright 2014 Thomas Amland <[email protected]> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ :module: watchdog.observers.fsevents2 :synopsis: FSEvents based emitter implementation. :platforms: Mac OS X """ import os import logging import unicodedata from threading import Thread from watchdog.utils.compat import queue from watchdog.events import ( FileDeletedEvent, FileModifiedEvent, FileCreatedEvent, FileMovedEvent, DirDeletedEvent, DirModifiedEvent, DirCreatedEvent, DirMovedEvent ) from watchdog.observers.api import ( BaseObserver, EventEmitter, DEFAULT_EMITTER_TIMEOUT, DEFAULT_OBSERVER_TIMEOUT, ) # pyobjc import AppKit from FSEvents import ( FSEventStreamCreate, CFRunLoopGetCurrent, FSEventStreamScheduleWithRunLoop, FSEventStreamStart, CFRunLoopRun, CFRunLoopStop, FSEventStreamStop, FSEventStreamInvalidate, FSEventStreamRelease, ) from FSEvents import ( kCFAllocatorDefault, kCFRunLoopDefaultMode, kFSEventStreamEventIdSinceNow, kFSEventStreamCreateFlagNoDefer, kFSEventStreamCreateFlagFileEvents, kFSEventStreamEventFlagItemCreated, kFSEventStreamEventFlagItemRemoved, kFSEventStreamEventFlagItemInodeMetaMod, kFSEventStreamEventFlagItemRenamed, kFSEventStreamEventFlagItemModified, kFSEventStreamEventFlagItemFinderInfoMod, kFSEventStreamEventFlagItemChangeOwner, kFSEventStreamEventFlagItemXattrMod, kFSEventStreamEventFlagItemIsDir, kFSEventStreamEventFlagItemIsSymlink, ) logger = logging.getLogger(__name__) class FSEventsQueue(Thread): """ Low level FSEvents client. """ def __init__(self, path): Thread.__init__(self) self._queue = queue.Queue() self._run_loop = None if isinstance(path, bytes): path = path.decode('utf-8') self._path = unicodedata.normalize('NFC', path) context = None latency = 1.0 self._stream_ref = FSEventStreamCreate( kCFAllocatorDefault, self._callback, context, [self._path], kFSEventStreamEventIdSinceNow, latency, kFSEventStreamCreateFlagNoDefer | kFSEventStreamCreateFlagFileEvents) if self._stream_ref is None: raise IOError("FSEvents. Could not create stream.") def run(self): pool = AppKit.NSAutoreleasePool.alloc().init() self._run_loop = CFRunLoopGetCurrent() FSEventStreamScheduleWithRunLoop( self._stream_ref, self._run_loop, kCFRunLoopDefaultMode) if not FSEventStreamStart(self._stream_ref): FSEventStreamInvalidate(self._stream_ref) FSEventStreamRelease(self._stream_ref) raise IOError("FSEvents. Could not start stream.") CFRunLoopRun() FSEventStreamStop(self._stream_ref) FSEventStreamInvalidate(self._stream_ref) FSEventStreamRelease(self._stream_ref) del pool # Make sure waiting thread is notified self._queue.put(None) def stop(self): if self._run_loop is not None: CFRunLoopStop(self._run_loop) def _callback(self, streamRef, clientCallBackInfo, numEvents, eventPaths, eventFlags, eventIDs): events = [NativeEvent(path, flags, _id) for path, flags, _id in zip(eventPaths, eventFlags, eventIDs)] logger.debug("FSEvents callback. Got %d events:" % numEvents) for e in events: logger.debug(e) self._queue.put(events) def read_events(self): """ Returns a list or one or more events, or None if there are no more events to be read. """ if not self.is_alive(): return None return self._queue.get() class NativeEvent(object): def __init__(self, path, flags, event_id): self.path = path self.flags = flags self.event_id = event_id self.is_created = bool(flags & kFSEventStreamEventFlagItemCreated) self.is_removed = bool(flags & kFSEventStreamEventFlagItemRemoved) self.is_renamed = bool(flags & kFSEventStreamEventFlagItemRenamed) self.is_modified = bool(flags & kFSEventStreamEventFlagItemModified) self.is_change_owner = bool(flags & kFSEventStreamEventFlagItemChangeOwner) self.is_inode_meta_mod = bool(flags & kFSEventStreamEventFlagItemInodeMetaMod) self.is_finder_info_mod = bool(flags & kFSEventStreamEventFlagItemFinderInfoMod) self.is_xattr_mod = bool(flags & kFSEventStreamEventFlagItemXattrMod) self.is_symlink = bool(flags & kFSEventStreamEventFlagItemIsSymlink) self.is_directory = bool(flags & kFSEventStreamEventFlagItemIsDir) @property def _event_type(self): if self.is_created: return "Created" if self.is_removed: return "Removed" if self.is_renamed: return "Renamed" if self.is_modified: return "Modified" if self.is_inode_meta_mod: return "InodeMetaMod" if self.is_xattr_mod: return "XattrMod" return "Unknown" def __repr__(self): s = "<%s: path=%s, type=%s, is_dir=%s, flags=%s, id=%s>" return s % (type(self).__name__, repr(self.path), self._event_type, self.is_directory, hex(self.flags), self.event_id) class FSEventsEmitter(EventEmitter): """ FSEvents based event emitter. Handles conversion of native events. """ def __init__(self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT): EventEmitter.__init__(self, event_queue, watch, timeout) self._fsevents = FSEventsQueue(watch.path) self._fsevents.start() def on_thread_stop(self): self._fsevents.stop() def queue_events(self, timeout): events = self._fsevents.read_events() if events is None: return i = 0 while i < len(events): event = events[i] # For some reason the create and remove flags are sometimes also # set for rename and modify type events, so let those take # precedence. if event.is_renamed: # Internal moves appears to always be consecutive in the same # buffer and have IDs differ by exactly one (while others # don't) making it possible to pair up the two events coming # from a singe move operation. (None of this is documented!) # Otherwise, guess whether file was moved in or out. # TODO: handle id wrapping if (i + 1 < len(events) and events[i + 1].is_renamed and events[i + 1].event_id == event.event_id + 1): cls = DirMovedEvent if event.is_directory else FileMovedEvent self.queue_event(cls(event.path, events[i + 1].path)) self.queue_event(DirModifiedEvent(os.path.dirname(event.path))) self.queue_event(DirModifiedEvent(os.path.dirname(events[i + 1].path))) i += 1 elif os.path.exists(event.path): cls = DirCreatedEvent if event.is_directory else FileCreatedEvent self.queue_event(cls(event.path)) self.queue_event(DirModifiedEvent(os.path.dirname(event.path))) else: cls = DirDeletedEvent if event.is_directory else FileDeletedEvent self.queue_event(cls(event.path)) self.queue_event(DirModifiedEvent(os.path.dirname(event.path))) # TODO: generate events for tree elif event.is_modified or event.is_inode_meta_mod or event.is_xattr_mod : cls = DirModifiedEvent if event.is_directory else FileModifiedEvent self.queue_event(cls(event.path)) elif event.is_created: cls = DirCreatedEvent if event.is_directory else FileCreatedEvent self.queue_event(cls(event.path)) self.queue_event(DirModifiedEvent(os.path.dirname(event.path))) elif event.is_removed: cls = DirDeletedEvent if event.is_directory else FileDeletedEvent self.queue_event(cls(event.path)) self.queue_event(DirModifiedEvent(os.path.dirname(event.path))) i += 1 class FSEventsObserver2(BaseObserver): def __init__(self, timeout=DEFAULT_OBSERVER_TIMEOUT): BaseObserver.__init__(self, emitter_class=FSEventsEmitter, timeout=timeout)
9,142
Python
36.016194
100
0.646467
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/watchdog/observers/kqueue.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2011 Yesudeep Mangalapilly <[email protected]> # Copyright 2012 Google, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ :module: watchdog.observers.kqueue :synopsis: ``kqueue(2)`` based emitter implementation. :author: [email protected] (Yesudeep Mangalapilly) :platforms: Mac OS X and BSD with kqueue(2). .. WARNING:: kqueue is a very heavyweight way to monitor file systems. Each kqueue-detected directory modification triggers a full directory scan. Traversing the entire directory tree and opening file descriptors for all files will create performance problems. We need to find a way to re-scan only those directories which report changes and do a diff between two sub-DirectorySnapshots perhaps. .. ADMONITION:: About OS X performance guidelines Quote from the `Mac OS X File System Performance Guidelines`_: "When you only want to track changes on a file or directory, be sure to open it using the ``O_EVTONLY`` flag. This flag prevents the file or directory from being marked as open or in use. This is important if you are tracking files on a removable volume and the user tries to unmount the volume. With this flag in place, the system knows it can dismiss the volume. If you had opened the files or directories without this flag, the volume would be marked as busy and would not be unmounted." ``O_EVTONLY`` is defined as ``0x8000`` in the OS X header files. More information here: http://www.mlsite.net/blog/?p=2312 Classes ------- .. autoclass:: KqueueEmitter :members: :show-inheritance: Collections and Utility Classes ------------------------------- .. autoclass:: KeventDescriptor :members: :show-inheritance: .. autoclass:: KeventDescriptorSet :members: :show-inheritance: .. _Mac OS X File System Performance Guidelines: http://developer.apple.com/library/ios/#documentation/Performance/Conceptual/FileSystem/Articles/TrackingChanges.html#//apple_ref/doc/uid/20001993-CJBJFIDD """ from __future__ import with_statement from watchdog.utils import platform import threading import errno from stat import S_ISDIR import os import os.path import select from pathtools.path import absolute_path from watchdog.observers.api import ( BaseObserver, EventEmitter, DEFAULT_OBSERVER_TIMEOUT, DEFAULT_EMITTER_TIMEOUT ) from watchdog.utils import stat as default_stat from watchdog.utils.dirsnapshot import DirectorySnapshot from watchdog.events import ( DirMovedEvent, DirDeletedEvent, DirCreatedEvent, DirModifiedEvent, FileMovedEvent, FileDeletedEvent, FileCreatedEvent, FileModifiedEvent, EVENT_TYPE_MOVED, EVENT_TYPE_DELETED, EVENT_TYPE_CREATED, generate_sub_moved_events, ) # Maximum number of events to process. MAX_EVENTS = 4096 # O_EVTONLY value from the header files for OS X only. O_EVTONLY = 0x8000 # Pre-calculated values for the kevent filter, flags, and fflags attributes. if platform.is_darwin(): WATCHDOG_OS_OPEN_FLAGS = O_EVTONLY else: WATCHDOG_OS_OPEN_FLAGS = os.O_RDONLY | os.O_NONBLOCK WATCHDOG_KQ_FILTER = select.KQ_FILTER_VNODE WATCHDOG_KQ_EV_FLAGS = select.KQ_EV_ADD | select.KQ_EV_ENABLE | select.KQ_EV_CLEAR WATCHDOG_KQ_FFLAGS = ( select.KQ_NOTE_DELETE | select.KQ_NOTE_WRITE | select.KQ_NOTE_EXTEND | select.KQ_NOTE_ATTRIB | select.KQ_NOTE_LINK | select.KQ_NOTE_RENAME | select.KQ_NOTE_REVOKE ) # Flag tests. def is_deleted(kev): """Determines whether the given kevent represents deletion.""" return kev.fflags & select.KQ_NOTE_DELETE def is_modified(kev): """Determines whether the given kevent represents modification.""" fflags = kev.fflags return (fflags & select.KQ_NOTE_EXTEND) or (fflags & select.KQ_NOTE_WRITE) def is_attrib_modified(kev): """Determines whether the given kevent represents attribute modification.""" return kev.fflags & select.KQ_NOTE_ATTRIB def is_renamed(kev): """Determines whether the given kevent represents movement.""" return kev.fflags & select.KQ_NOTE_RENAME class KeventDescriptorSet(object): """ Thread-safe kevent descriptor collection. """ def __init__(self): # Set of KeventDescriptor self._descriptors = set() # Descriptor for a given path. self._descriptor_for_path = dict() # Descriptor for a given fd. self._descriptor_for_fd = dict() # List of kevent objects. self._kevents = list() self._lock = threading.Lock() @property def kevents(self): """ List of kevents monitored. """ with self._lock: return self._kevents @property def paths(self): """ List of paths for which kevents have been created. """ with self._lock: return list(self._descriptor_for_path.keys()) def get_for_fd(self, fd): """ Given a file descriptor, returns the kevent descriptor object for it. :param fd: OS file descriptor. :type fd: ``int`` :returns: A :class:`KeventDescriptor` object. """ with self._lock: return self._descriptor_for_fd[fd] def get(self, path): """ Obtains a :class:`KeventDescriptor` object for the specified path. :param path: Path for which the descriptor will be obtained. """ with self._lock: path = absolute_path(path) return self._get(path) def __contains__(self, path): """ Determines whether a :class:`KeventDescriptor has been registered for the specified path. :param path: Path for which the descriptor will be obtained. """ with self._lock: path = absolute_path(path) return self._has_path(path) def add(self, path, is_directory): """ Adds a :class:`KeventDescriptor` to the collection for the given path. :param path: The path for which a :class:`KeventDescriptor` object will be added. :param is_directory: ``True`` if the path refers to a directory; ``False`` otherwise. :type is_directory: ``bool`` """ with self._lock: path = absolute_path(path) if not self._has_path(path): self._add_descriptor(KeventDescriptor(path, is_directory)) def remove(self, path): """ Removes the :class:`KeventDescriptor` object for the given path if it already exists. :param path: Path for which the :class:`KeventDescriptor` object will be removed. """ with self._lock: path = absolute_path(path) if self._has_path(path): self._remove_descriptor(self._get(path)) def clear(self): """ Clears the collection and closes all open descriptors. """ with self._lock: for descriptor in self._descriptors: descriptor.close() self._descriptors.clear() self._descriptor_for_fd.clear() self._descriptor_for_path.clear() self._kevents = [] # Thread-unsafe methods. Locking is provided at a higher level. def _get(self, path): """Returns a kevent descriptor for a given path.""" return self._descriptor_for_path[path] def _has_path(self, path): """Determines whether a :class:`KeventDescriptor` for the specified path exists already in the collection.""" return path in self._descriptor_for_path def _add_descriptor(self, descriptor): """ Adds a descriptor to the collection. :param descriptor: An instance of :class:`KeventDescriptor` to be added. """ self._descriptors.add(descriptor) self._kevents.append(descriptor.kevent) self._descriptor_for_path[descriptor.path] = descriptor self._descriptor_for_fd[descriptor.fd] = descriptor def _remove_descriptor(self, descriptor): """ Removes a descriptor from the collection. :param descriptor: An instance of :class:`KeventDescriptor` to be removed. """ self._descriptors.remove(descriptor) del self._descriptor_for_fd[descriptor.fd] del self._descriptor_for_path[descriptor.path] self._kevents.remove(descriptor.kevent) descriptor.close() class KeventDescriptor(object): """ A kevent descriptor convenience data structure to keep together: * kevent * directory status * path * file descriptor :param path: Path string for which a kevent descriptor will be created. :param is_directory: ``True`` if the path refers to a directory; ``False`` otherwise. :type is_directory: ``bool`` """ def __init__(self, path, is_directory): self._path = absolute_path(path) self._is_directory = is_directory self._fd = os.open(path, WATCHDOG_OS_OPEN_FLAGS) self._kev = select.kevent(self._fd, filter=WATCHDOG_KQ_FILTER, flags=WATCHDOG_KQ_EV_FLAGS, fflags=WATCHDOG_KQ_FFLAGS) @property def fd(self): """OS file descriptor for the kevent descriptor.""" return self._fd @property def path(self): """The path associated with the kevent descriptor.""" return self._path @property def kevent(self): """The kevent object associated with the kevent descriptor.""" return self._kev @property def is_directory(self): """Determines whether the kevent descriptor refers to a directory. :returns: ``True`` or ``False`` """ return self._is_directory def close(self): """ Closes the file descriptor associated with a kevent descriptor. """ try: os.close(self.fd) except OSError: pass @property def key(self): return (self.path, self.is_directory) def __eq__(self, descriptor): return self.key == descriptor.key def __ne__(self, descriptor): return self.key != descriptor.key def __hash__(self): return hash(self.key) def __repr__(self): return "<%s: path=%s, is_directory=%s>"\ % (type(self).__name__, self.path, self.is_directory) class KqueueEmitter(EventEmitter): """ kqueue(2)-based event emitter. .. ADMONITION:: About ``kqueue(2)`` behavior and this implementation ``kqueue(2)`` monitors file system events only for open descriptors, which means, this emitter does a lot of book-keeping behind the scenes to keep track of open descriptors for every entry in the monitored directory tree. This also means the number of maximum open file descriptors on your system must be increased **manually**. Usually, issuing a call to ``ulimit`` should suffice:: ulimit -n 1024 Ensure that you pick a number that is larger than the number of files you expect to be monitored. ``kqueue(2)`` does not provide enough information about the following things: * The destination path of a file or directory that is renamed. * Creation of a file or directory within a directory; in this case, ``kqueue(2)`` only indicates a modified event on the parent directory. Therefore, this emitter takes a snapshot of the directory tree when ``kqueue(2)`` detects a change on the file system to be able to determine the above information. :param event_queue: The event queue to fill with events. :param watch: A watch object representing the directory to monitor. :type watch: :class:`watchdog.observers.api.ObservedWatch` :param timeout: Read events blocking timeout (in seconds). :type timeout: ``float`` :param stat: stat function. See ``os.stat`` for details. """ def __init__(self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT, stat=default_stat): EventEmitter.__init__(self, event_queue, watch, timeout) self._kq = select.kqueue() self._lock = threading.RLock() # A collection of KeventDescriptor. self._descriptors = KeventDescriptorSet() def custom_stat(path, self=self): stat_info = stat(path) self._register_kevent(path, S_ISDIR(stat_info.st_mode)) return stat_info self._snapshot = DirectorySnapshot(watch.path, recursive=watch.is_recursive, stat=custom_stat) def _register_kevent(self, path, is_directory): """ Registers a kevent descriptor for the given path. :param path: Path for which a kevent descriptor will be created. :param is_directory: ``True`` if the path refers to a directory; ``False`` otherwise. :type is_directory: ``bool`` """ try: self._descriptors.add(path, is_directory) except OSError as e: if e.errno == errno.ENOENT: # Probably dealing with a temporary file that was created # and then quickly deleted before we could open # a descriptor for it. Therefore, simply queue a sequence # of created and deleted events for the path. # path = absolute_path(path) # if is_directory: # self.queue_event(DirCreatedEvent(path)) # self.queue_event(DirDeletedEvent(path)) # else: # self.queue_event(FileCreatedEvent(path)) # self.queue_event(FileDeletedEvent(path)) # TODO: We could simply ignore these files. # Locked files cause the python process to die with # a bus error when we handle temporary files. # eg. .git/index.lock when running tig operations. # I don't fully understand this at the moment. pass elif e.errno == errno.EOPNOTSUPP: # Probably dealing with the socket or special file # mounted through a file system that does not support # access to it (e.g. NFS). On BSD systems look at # EOPNOTSUPP in man 2 open. pass else: # All other errors are propagated. raise def _unregister_kevent(self, path): """ Convenience function to close the kevent descriptor for a specified kqueue-monitored path. :param path: Path for which the kevent descriptor will be closed. """ self._descriptors.remove(path) def queue_event(self, event): """ Handles queueing a single event object. :param event: An instance of :class:`watchdog.events.FileSystemEvent` or a subclass. """ # Handles all the book keeping for queued events. # We do not need to fire moved/deleted events for all subitems in # a directory tree here, because this function is called by kqueue # for all those events anyway. EventEmitter.queue_event(self, event) if event.event_type == EVENT_TYPE_CREATED: self._register_kevent(event.src_path, event.is_directory) elif event.event_type == EVENT_TYPE_MOVED: self._unregister_kevent(event.src_path) self._register_kevent(event.dest_path, event.is_directory) elif event.event_type == EVENT_TYPE_DELETED: self._unregister_kevent(event.src_path) def _gen_kqueue_events(self, kev, ref_snapshot, new_snapshot): """ Generate events from the kevent list returned from the call to :meth:`select.kqueue.control`. .. NOTE:: kqueue only tells us about deletions, file modifications, attribute modifications. The other events, namely, file creation, directory modification, file rename, directory rename, directory creation, etc. are determined by comparing directory snapshots. """ descriptor = self._descriptors.get_for_fd(kev.ident) src_path = descriptor.path if is_renamed(kev): # Kqueue does not specify the destination names for renames # to, so we have to process these using the a snapshot # of the directory. for event in self._gen_renamed_events(src_path, descriptor.is_directory, ref_snapshot, new_snapshot): yield event elif is_attrib_modified(kev): if descriptor.is_directory: yield DirModifiedEvent(src_path) else: yield FileModifiedEvent(src_path) elif is_modified(kev): if descriptor.is_directory: if self.watch.is_recursive or self.watch.path == src_path: # When a directory is modified, it may be due to # sub-file/directory renames or new file/directory # creation. We determine all this by comparing # snapshots later. yield DirModifiedEvent(src_path) else: yield FileModifiedEvent(src_path) elif is_deleted(kev): if descriptor.is_directory: yield DirDeletedEvent(src_path) else: yield FileDeletedEvent(src_path) def _parent_dir_modified(self, src_path): """ Helper to generate a DirModifiedEvent on the parent of src_path. """ return DirModifiedEvent(os.path.dirname(src_path)) def _gen_renamed_events(self, src_path, is_directory, ref_snapshot, new_snapshot): """ Compares information from two directory snapshots (one taken before the rename operation and another taken right after) to determine the destination path of the file system object renamed, and yields the appropriate events to be queued. """ try: f_inode = ref_snapshot.inode(src_path) except KeyError: # Probably caught a temporary file/directory that was renamed # and deleted. Fires a sequence of created and deleted events # for the path. if is_directory: yield DirCreatedEvent(src_path) yield DirDeletedEvent(src_path) else: yield FileCreatedEvent(src_path) yield FileDeletedEvent(src_path) # We don't process any further and bail out assuming # the event represents deletion/creation instead of movement. return dest_path = new_snapshot.path(f_inode) if dest_path is not None: dest_path = absolute_path(dest_path) if is_directory: event = DirMovedEvent(src_path, dest_path) yield event else: yield FileMovedEvent(src_path, dest_path) yield self._parent_dir_modified(src_path) yield self._parent_dir_modified(dest_path) if is_directory: # TODO: Do we need to fire moved events for the items # inside the directory tree? Does kqueue does this # all by itself? Check this and then enable this code # only if it doesn't already. # A: It doesn't. So I've enabled this block. if self.watch.is_recursive: for sub_event in generate_sub_moved_events(src_path, dest_path): yield sub_event else: # If the new snapshot does not have an inode for the # old path, we haven't found the new name. Therefore, # we mark it as deleted and remove unregister the path. if is_directory: yield DirDeletedEvent(src_path) else: yield FileDeletedEvent(src_path) yield self._parent_dir_modified(src_path) def _read_events(self, timeout=None): """ Reads events from a call to the blocking :meth:`select.kqueue.control()` method. :param timeout: Blocking timeout for reading events. :type timeout: ``float`` (seconds) """ return self._kq.control(self._descriptors.kevents, MAX_EVENTS, timeout) def queue_events(self, timeout): """ Queues events by reading them from a call to the blocking :meth:`select.kqueue.control()` method. :param timeout: Blocking timeout for reading events. :type timeout: ``float`` (seconds) """ with self._lock: try: event_list = self._read_events(timeout) # TODO: investigate why order appears to be reversed event_list.reverse() # Take a fresh snapshot of the directory and update the # saved snapshot. new_snapshot = DirectorySnapshot(self.watch.path, self.watch.is_recursive) ref_snapshot = self._snapshot self._snapshot = new_snapshot diff_events = new_snapshot - ref_snapshot # Process events for directory_created in diff_events.dirs_created: self.queue_event(DirCreatedEvent(directory_created)) for file_created in diff_events.files_created: self.queue_event(FileCreatedEvent(file_created)) for file_modified in diff_events.files_modified: self.queue_event(FileModifiedEvent(file_modified)) for kev in event_list: for event in self._gen_kqueue_events(kev, ref_snapshot, new_snapshot): self.queue_event(event) except OSError as e: if e.errno != errno.EBADF: raise def on_thread_stop(self): # Clean up. with self._lock: self._descriptors.clear() self._kq.close() class KqueueObserver(BaseObserver): """ Observer thread that schedules watching directories and dispatches calls to event handlers. """ def __init__(self, timeout=DEFAULT_OBSERVER_TIMEOUT): BaseObserver.__init__(self, emitter_class=KqueueEmitter, timeout=timeout)
24,355
Python
33.449788
159
0.585424
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/watchdog/observers/__init__.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2011 Yesudeep Mangalapilly <[email protected]> # Copyright 2012 Google, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ :module: watchdog.observers :synopsis: Observer that picks a native implementation if available. :author: [email protected] (Yesudeep Mangalapilly) Classes ======= .. autoclass:: Observer :members: :show-inheritance: :inherited-members: Observer thread that schedules watching directories and dispatches calls to event handlers. You can also import platform specific classes directly and use it instead of :class:`Observer`. Here is a list of implemented observer classes.: ============== ================================ ============================== Class Platforms Note ============== ================================ ============================== |Inotify| Linux 2.6.13+ ``inotify(7)`` based observer |FSEvents| Mac OS X FSEvents based observer |Kqueue| Mac OS X and BSD with kqueue(2) ``kqueue(2)`` based observer |WinApi| MS Windows Windows API-based observer |Polling| Any fallback implementation ============== ================================ ============================== .. |Inotify| replace:: :class:`.inotify.InotifyObserver` .. |FSEvents| replace:: :class:`.fsevents.FSEventsObserver` .. |Kqueue| replace:: :class:`.kqueue.KqueueObserver` .. |WinApi| replace:: :class:`.read_directory_changes.WindowsApiObserver` .. |WinApiAsync| replace:: :class:`.read_directory_changes_async.WindowsApiAsyncObserver` .. |Polling| replace:: :class:`.polling.PollingObserver` """ import warnings from watchdog.utils import platform from watchdog.utils import UnsupportedLibc if platform.is_linux(): try: from .inotify import InotifyObserver as Observer except UnsupportedLibc: from .polling import PollingObserver as Observer elif platform.is_darwin(): try: from .fsevents import FSEventsObserver as Observer except Exception: try: from .kqueue import KqueueObserver as Observer warnings.warn("Failed to import fsevents. Fall back to kqueue") except Exception: from .polling import PollingObserver as Observer warnings.warn("Failed to import fsevents and kqueue. Fall back to polling.") elif platform.is_bsd(): from .kqueue import KqueueObserver as Observer elif platform.is_windows(): # TODO: find a reliable way of checking Windows version and import # polling explicitly for Windows XP try: from .read_directory_changes import WindowsApiObserver as Observer except Exception: from .polling import PollingObserver as Observer warnings.warn("Failed to import read_directory_changes. Fall back to polling.") else: from .polling import PollingObserver as Observer __all__ = ["Observer"]
3,528
Python
36.542553
89
0.649093
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/watchdog/observers/read_directory_changes.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2011 Yesudeep Mangalapilly <[email protected]> # Copyright 2012 Google, Inc. # Copyright 2014 Thomas Amland # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import threading import os.path import time from watchdog.events import ( DirCreatedEvent, DirMovedEvent, DirModifiedEvent, FileCreatedEvent, FileDeletedEvent, FileMovedEvent, FileModifiedEvent, generate_sub_moved_events, generate_sub_created_events, ) from watchdog.observers.api import ( EventEmitter, BaseObserver, DEFAULT_OBSERVER_TIMEOUT, DEFAULT_EMITTER_TIMEOUT ) from watchdog.observers.winapi import ( read_events, get_directory_handle, close_directory_handle, ) # HACK: WATCHDOG_TRAVERSE_MOVED_DIR_DELAY = 1 # seconds class WindowsApiEmitter(EventEmitter): """ Windows API-based emitter that uses ReadDirectoryChangesW to detect file system changes for a watch. """ def __init__(self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT): EventEmitter.__init__(self, event_queue, watch, timeout) self._lock = threading.Lock() self._handle = None def on_thread_start(self): self._handle = get_directory_handle(self.watch.path) def on_thread_stop(self): if self._handle: close_directory_handle(self._handle) def _read_events(self): return read_events(self._handle, self.watch.path, self.watch.is_recursive) def queue_events(self, timeout): winapi_events = self._read_events() with self._lock: last_renamed_src_path = "" for winapi_event in winapi_events: src_path = os.path.join(self.watch.path, winapi_event.src_path) if winapi_event.is_renamed_old: last_renamed_src_path = src_path elif winapi_event.is_renamed_new: dest_path = src_path src_path = last_renamed_src_path if os.path.isdir(dest_path): event = DirMovedEvent(src_path, dest_path) if self.watch.is_recursive: # HACK: We introduce a forced delay before # traversing the moved directory. This will read # only file movement that finishes within this # delay time. time.sleep(WATCHDOG_TRAVERSE_MOVED_DIR_DELAY) # The following block of code may not # obtain moved events for the entire tree if # the I/O is not completed within the above # delay time. So, it's not guaranteed to work. # TODO: Come up with a better solution, possibly # a way to wait for I/O to complete before # queuing events. for sub_moved_event in generate_sub_moved_events(src_path, dest_path): self.queue_event(sub_moved_event) self.queue_event(event) else: self.queue_event(FileMovedEvent(src_path, dest_path)) elif winapi_event.is_modified: cls = DirModifiedEvent if os.path.isdir(src_path) else FileModifiedEvent self.queue_event(cls(src_path)) elif winapi_event.is_added: isdir = os.path.isdir(src_path) cls = DirCreatedEvent if isdir else FileCreatedEvent self.queue_event(cls(src_path)) if isdir and self.watch.is_recursive: # If a directory is moved from outside the watched folder to inside it # we only get a created directory event out of it, not any events for its children # so use the same hack as for file moves to get the child events time.sleep(WATCHDOG_TRAVERSE_MOVED_DIR_DELAY) sub_events = generate_sub_created_events(src_path) for sub_created_event in sub_events: self.queue_event(sub_created_event) elif winapi_event.is_removed: self.queue_event(FileDeletedEvent(src_path)) elif winapi_event.is_removed_self: self.stop() class WindowsApiObserver(BaseObserver): """ Observer thread that schedules watching directories and dispatches calls to event handlers. """ def __init__(self, timeout=DEFAULT_OBSERVER_TIMEOUT): BaseObserver.__init__(self, emitter_class=WindowsApiEmitter, timeout=timeout)
5,381
Python
38.284671
106
0.586508
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/watchdog/observers/api.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2011 Yesudeep Mangalapilly <[email protected]> # Copyright 2012 Google, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import with_statement import threading from watchdog.utils import BaseThread from watchdog.utils.compat import queue from watchdog.utils.bricks import SkipRepeatsQueue try: from pathlib import Path as _PATH_CLASSES except ImportError: _PATH_CLASSES = () DEFAULT_EMITTER_TIMEOUT = 1 # in seconds. DEFAULT_OBSERVER_TIMEOUT = 1 # in seconds. # Collection classes class EventQueue(SkipRepeatsQueue): """Thread-safe event queue based on a special queue that skips adding the same event (:class:`FileSystemEvent`) multiple times consecutively. Thus avoiding dispatching multiple event handling calls when multiple identical events are produced quicker than an observer can consume them. """ class ObservedWatch(object): """An scheduled watch. :param path: Path string. :param recursive: ``True`` if watch is recursive; ``False`` otherwise. """ def __init__(self, path, recursive): if isinstance(path, _PATH_CLASSES): self._path = str(path) else: self._path = path self._is_recursive = recursive @property def path(self): """The path that this watch monitors.""" return self._path @property def is_recursive(self): """Determines whether subdirectories are watched for the path.""" return self._is_recursive @property def key(self): return self.path, self.is_recursive def __eq__(self, watch): return self.key == watch.key def __ne__(self, watch): return self.key != watch.key def __hash__(self): return hash(self.key) def __repr__(self): return "<%s: path=%s, is_recursive=%s>" % ( type(self).__name__, self.path, self.is_recursive) # Observer classes class EventEmitter(BaseThread): """ Producer thread base class subclassed by event emitters that generate events and populate a queue with them. :param event_queue: The event queue to populate with generated events. :type event_queue: :class:`watchdog.events.EventQueue` :param watch: The watch to observe and produce events for. :type watch: :class:`ObservedWatch` :param timeout: Timeout (in seconds) between successive attempts at reading events. :type timeout: ``float`` """ def __init__(self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT): BaseThread.__init__(self) self._event_queue = event_queue self._watch = watch self._timeout = timeout @property def timeout(self): """ Blocking timeout for reading events. """ return self._timeout @property def watch(self): """ The watch associated with this emitter. """ return self._watch def queue_event(self, event): """ Queues a single event. :param event: Event to be queued. :type event: An instance of :class:`watchdog.events.FileSystemEvent` or a subclass. """ self._event_queue.put((event, self.watch)) def queue_events(self, timeout): """Override this method to populate the event queue with events per interval period. :param timeout: Timeout (in seconds) between successive attempts at reading events. :type timeout: ``float`` """ def run(self): while self.should_keep_running(): self.queue_events(self.timeout) class EventDispatcher(BaseThread): """ Consumer thread base class subclassed by event observer threads that dispatch events from an event queue to appropriate event handlers. :param timeout: Event queue blocking timeout (in seconds). :type timeout: ``float`` """ def __init__(self, timeout=DEFAULT_OBSERVER_TIMEOUT): BaseThread.__init__(self) self._event_queue = EventQueue() self._timeout = timeout @property def timeout(self): """Event queue block timeout.""" return self._timeout @property def event_queue(self): """The event queue which is populated with file system events by emitters and from which events are dispatched by a dispatcher thread.""" return self._event_queue def dispatch_events(self, event_queue, timeout): """Override this method to consume events from an event queue, blocking on the queue for the specified timeout before raising :class:`queue.Empty`. :param event_queue: Event queue to populate with one set of events. :type event_queue: :class:`EventQueue` :param timeout: Interval period (in seconds) to wait before timing out on the event queue. :type timeout: ``float`` :raises: :class:`queue.Empty` """ def run(self): while self.should_keep_running(): try: self.dispatch_events(self.event_queue, self.timeout) except queue.Empty: continue class BaseObserver(EventDispatcher): """Base observer.""" def __init__(self, emitter_class, timeout=DEFAULT_OBSERVER_TIMEOUT): EventDispatcher.__init__(self, timeout) self._emitter_class = emitter_class self._lock = threading.RLock() self._watches = set() self._handlers = dict() self._emitters = set() self._emitter_for_watch = dict() def _add_emitter(self, emitter): self._emitter_for_watch[emitter.watch] = emitter self._emitters.add(emitter) def _remove_emitter(self, emitter): del self._emitter_for_watch[emitter.watch] self._emitters.remove(emitter) emitter.stop() try: emitter.join() except RuntimeError: pass def _clear_emitters(self): for emitter in self._emitters: emitter.stop() for emitter in self._emitters: try: emitter.join() except RuntimeError: pass self._emitters.clear() self._emitter_for_watch.clear() def _add_handler_for_watch(self, event_handler, watch): if watch not in self._handlers: self._handlers[watch] = set() self._handlers[watch].add(event_handler) def _remove_handlers_for_watch(self, watch): del self._handlers[watch] @property def emitters(self): """Returns event emitter created by this observer.""" return self._emitters def start(self): for emitter in self._emitters.copy(): try: emitter.start() except Exception: self._remove_emitter(emitter) raise super(BaseObserver, self).start() def schedule(self, event_handler, path, recursive=False): """ Schedules watching a path and calls appropriate methods specified in the given event handler in response to file system events. :param event_handler: An event handler instance that has appropriate event handling methods which will be called by the observer in response to file system events. :type event_handler: :class:`watchdog.events.FileSystemEventHandler` or a subclass :param path: Directory path that will be monitored. :type path: ``str`` :param recursive: ``True`` if events will be emitted for sub-directories traversed recursively; ``False`` otherwise. :type recursive: ``bool`` :return: An :class:`ObservedWatch` object instance representing a watch. """ with self._lock: watch = ObservedWatch(path, recursive) self._add_handler_for_watch(event_handler, watch) # If we don't have an emitter for this watch already, create it. if self._emitter_for_watch.get(watch) is None: emitter = self._emitter_class(event_queue=self.event_queue, watch=watch, timeout=self.timeout) self._add_emitter(emitter) if self.is_alive(): emitter.start() self._watches.add(watch) return watch def add_handler_for_watch(self, event_handler, watch): """Adds a handler for the given watch. :param event_handler: An event handler instance that has appropriate event handling methods which will be called by the observer in response to file system events. :type event_handler: :class:`watchdog.events.FileSystemEventHandler` or a subclass :param watch: The watch to add a handler for. :type watch: An instance of :class:`ObservedWatch` or a subclass of :class:`ObservedWatch` """ with self._lock: self._add_handler_for_watch(event_handler, watch) def remove_handler_for_watch(self, event_handler, watch): """Removes a handler for the given watch. :param event_handler: An event handler instance that has appropriate event handling methods which will be called by the observer in response to file system events. :type event_handler: :class:`watchdog.events.FileSystemEventHandler` or a subclass :param watch: The watch to remove a handler for. :type watch: An instance of :class:`ObservedWatch` or a subclass of :class:`ObservedWatch` """ with self._lock: self._handlers[watch].remove(event_handler) def unschedule(self, watch): """Unschedules a watch. :param watch: The watch to unschedule. :type watch: An instance of :class:`ObservedWatch` or a subclass of :class:`ObservedWatch` """ with self._lock: emitter = self._emitter_for_watch[watch] del self._handlers[watch] self._remove_emitter(emitter) self._watches.remove(watch) def unschedule_all(self): """Unschedules all watches and detaches all associated event handlers.""" with self._lock: self._handlers.clear() self._clear_emitters() self._watches.clear() def on_thread_stop(self): self.unschedule_all() def dispatch_events(self, event_queue, timeout): event, watch = event_queue.get(block=True, timeout=timeout) with self._lock: # To allow unschedule/stop and safe removal of event handlers # within event handlers itself, check if the handler is still # registered after every dispatch. for handler in list(self._handlers.get(watch, [])): if handler in self._handlers.get(watch, []): handler.dispatch(event) event_queue.task_done()
11,992
Python
30.727513
83
0.5999
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/watchdog/observers/inotify_c.py
# -*- coding: utf-8 -*- # # Copyright 2011 Yesudeep Mangalapilly <[email protected]> # Copyright 2012 Google, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import with_statement import os import errno import struct import threading import ctypes import ctypes.util from functools import reduce from ctypes import c_int, c_char_p, c_uint32 from watchdog.utils import has_attribute from watchdog.utils import UnsupportedLibc from watchdog.utils.unicode_paths import decode def _load_libc(): libc_path = None try: libc_path = ctypes.util.find_library('c') except (OSError, IOError, RuntimeError): # Note: find_library will on some platforms raise these undocumented # errors, e.g.on android IOError "No usable temporary directory found" # will be raised. pass if libc_path is not None: return ctypes.CDLL(libc_path) # Fallbacks try: return ctypes.CDLL('libc.so') except (OSError, IOError): pass try: return ctypes.CDLL('libc.so.6') except (OSError, IOError): pass # uClibc try: return ctypes.CDLL('libc.so.0') except (OSError, IOError) as err: raise err libc = _load_libc() if not has_attribute(libc, 'inotify_init') or \ not has_attribute(libc, 'inotify_add_watch') or \ not has_attribute(libc, 'inotify_rm_watch'): raise UnsupportedLibc("Unsupported libc version found: %s" % libc._name) inotify_add_watch = ctypes.CFUNCTYPE(c_int, c_int, c_char_p, c_uint32, use_errno=True)( ("inotify_add_watch", libc)) inotify_rm_watch = ctypes.CFUNCTYPE(c_int, c_int, c_uint32, use_errno=True)( ("inotify_rm_watch", libc)) inotify_init = ctypes.CFUNCTYPE(c_int, use_errno=True)( ("inotify_init", libc)) class InotifyConstants(object): # User-space events IN_ACCESS = 0x00000001 # File was accessed. IN_MODIFY = 0x00000002 # File was modified. IN_ATTRIB = 0x00000004 # Meta-data changed. IN_CLOSE_WRITE = 0x00000008 # Writable file was closed. IN_CLOSE_NOWRITE = 0x00000010 # Unwritable file closed. IN_OPEN = 0x00000020 # File was opened. IN_MOVED_FROM = 0x00000040 # File was moved from X. IN_MOVED_TO = 0x00000080 # File was moved to Y. IN_CREATE = 0x00000100 # Subfile was created. IN_DELETE = 0x00000200 # Subfile was deleted. IN_DELETE_SELF = 0x00000400 # Self was deleted. IN_MOVE_SELF = 0x00000800 # Self was moved. # Helper user-space events. IN_CLOSE = IN_CLOSE_WRITE | IN_CLOSE_NOWRITE # Close. IN_MOVE = IN_MOVED_FROM | IN_MOVED_TO # Moves. # Events sent by the kernel to a watch. IN_UNMOUNT = 0x00002000 # Backing file system was unmounted. IN_Q_OVERFLOW = 0x00004000 # Event queued overflowed. IN_IGNORED = 0x00008000 # File was ignored. # Special flags. IN_ONLYDIR = 0x01000000 # Only watch the path if it's a directory. IN_DONT_FOLLOW = 0x02000000 # Do not follow a symbolic link. IN_EXCL_UNLINK = 0x04000000 # Exclude events on unlinked objects IN_MASK_ADD = 0x20000000 # Add to the mask of an existing watch. IN_ISDIR = 0x40000000 # Event occurred against directory. IN_ONESHOT = 0x80000000 # Only send event once. # All user-space events. IN_ALL_EVENTS = reduce( lambda x, y: x | y, [ IN_ACCESS, IN_MODIFY, IN_ATTRIB, IN_CLOSE_WRITE, IN_CLOSE_NOWRITE, IN_OPEN, IN_MOVED_FROM, IN_MOVED_TO, IN_DELETE, IN_CREATE, IN_DELETE_SELF, IN_MOVE_SELF, ]) # Flags for ``inotify_init1`` IN_CLOEXEC = 0x02000000 IN_NONBLOCK = 0x00004000 # Watchdog's API cares only about these events. WATCHDOG_ALL_EVENTS = reduce( lambda x, y: x | y, [ InotifyConstants.IN_MODIFY, InotifyConstants.IN_ATTRIB, InotifyConstants.IN_MOVED_FROM, InotifyConstants.IN_MOVED_TO, InotifyConstants.IN_CREATE, InotifyConstants.IN_DELETE, InotifyConstants.IN_DELETE_SELF, InotifyConstants.IN_DONT_FOLLOW, ]) class inotify_event_struct(ctypes.Structure): """ Structure representation of the inotify_event structure (used in buffer size calculations):: struct inotify_event { __s32 wd; /* watch descriptor */ __u32 mask; /* watch mask */ __u32 cookie; /* cookie to synchronize two events */ __u32 len; /* length (including nulls) of name */ char name[0]; /* stub for possible name */ }; """ _fields_ = [('wd', c_int), ('mask', c_uint32), ('cookie', c_uint32), ('len', c_uint32), ('name', c_char_p)] EVENT_SIZE = ctypes.sizeof(inotify_event_struct) DEFAULT_NUM_EVENTS = 2048 DEFAULT_EVENT_BUFFER_SIZE = DEFAULT_NUM_EVENTS * (EVENT_SIZE + 16) class Inotify(object): """ Linux inotify(7) API wrapper class. :param path: The directory path for which we want an inotify object. :type path: :class:`bytes` :param recursive: ``True`` if subdirectories should be monitored; ``False`` otherwise. """ def __init__(self, path, recursive=False, event_mask=WATCHDOG_ALL_EVENTS): # The file descriptor associated with the inotify instance. inotify_fd = inotify_init() if inotify_fd == -1: Inotify._raise_error() self._inotify_fd = inotify_fd self._lock = threading.Lock() # Stores the watch descriptor for a given path. self._wd_for_path = dict() self._path_for_wd = dict() self._path = path self._event_mask = event_mask self._is_recursive = recursive if os.path.isdir(path): self._add_dir_watch(path, recursive, event_mask) else: self._add_watch(path, event_mask) self._moved_from_events = dict() @property def event_mask(self): """The event mask for this inotify instance.""" return self._event_mask @property def path(self): """The path associated with the inotify instance.""" return self._path @property def is_recursive(self): """Whether we are watching directories recursively.""" return self._is_recursive @property def fd(self): """The file descriptor associated with the inotify instance.""" return self._inotify_fd def clear_move_records(self): """Clear cached records of MOVED_FROM events""" self._moved_from_events = dict() def source_for_move(self, destination_event): """ The source path corresponding to the given MOVED_TO event. If the source path is outside the monitored directories, None is returned instead. """ if destination_event.cookie in self._moved_from_events: return self._moved_from_events[destination_event.cookie].src_path else: return None def remember_move_from_event(self, event): """ Save this event as the source event for future MOVED_TO events to reference. """ self._moved_from_events[event.cookie] = event def add_watch(self, path): """ Adds a watch for the given path. :param path: Path to begin monitoring. """ with self._lock: self._add_watch(path, self._event_mask) def remove_watch(self, path): """ Removes a watch for the given path. :param path: Path string for which the watch will be removed. """ with self._lock: wd = self._wd_for_path.pop(path) del self._path_for_wd[wd] if inotify_rm_watch(self._inotify_fd, wd) == -1: Inotify._raise_error() def close(self): """ Closes the inotify instance and removes all associated watches. """ with self._lock: if self._path in self._wd_for_path: wd = self._wd_for_path[self._path] inotify_rm_watch(self._inotify_fd, wd) os.close(self._inotify_fd) def read_events(self, event_buffer_size=DEFAULT_EVENT_BUFFER_SIZE): """ Reads events from inotify and yields them. """ # HACK: We need to traverse the directory path # recursively and simulate events for newly # created subdirectories/files. This will handle # mkdir -p foobar/blah/bar; touch foobar/afile def _recursive_simulate(src_path): events = [] for root, dirnames, filenames in os.walk(src_path): for dirname in dirnames: try: full_path = os.path.join(root, dirname) wd_dir = self._add_watch(full_path, self._event_mask) e = InotifyEvent( wd_dir, InotifyConstants.IN_CREATE | InotifyConstants.IN_ISDIR, 0, dirname, full_path) events.append(e) except OSError: pass for filename in filenames: full_path = os.path.join(root, filename) wd_parent_dir = self._wd_for_path[os.path.dirname(full_path)] e = InotifyEvent( wd_parent_dir, InotifyConstants.IN_CREATE, 0, filename, full_path) events.append(e) return events event_buffer = None while True: try: event_buffer = os.read(self._inotify_fd, event_buffer_size) except OSError as e: if e.errno == errno.EINTR: continue break with self._lock: event_list = [] for wd, mask, cookie, name in Inotify._parse_event_buffer(event_buffer): if wd == -1: continue wd_path = self._path_for_wd[wd] src_path = os.path.join(wd_path, name) if name else wd_path # avoid trailing slash inotify_event = InotifyEvent(wd, mask, cookie, name, src_path) if inotify_event.is_moved_from: self.remember_move_from_event(inotify_event) elif inotify_event.is_moved_to: move_src_path = self.source_for_move(inotify_event) if move_src_path in self._wd_for_path: moved_wd = self._wd_for_path[move_src_path] del self._wd_for_path[move_src_path] self._wd_for_path[inotify_event.src_path] = moved_wd self._path_for_wd[moved_wd] = inotify_event.src_path if self.is_recursive: for _path, _wd in self._wd_for_path.copy().items(): if _path.startswith(move_src_path + os.path.sep.encode()): moved_wd = self._wd_for_path.pop(_path) _move_to_path = _path.replace(move_src_path, inotify_event.src_path) self._wd_for_path[_move_to_path] = moved_wd self._path_for_wd[moved_wd] = _move_to_path src_path = os.path.join(wd_path, name) inotify_event = InotifyEvent(wd, mask, cookie, name, src_path) if inotify_event.is_ignored: # Clean up book-keeping for deleted watches. path = self._path_for_wd.pop(wd) if self._wd_for_path[path] == wd: del self._wd_for_path[path] continue event_list.append(inotify_event) if (self.is_recursive and inotify_event.is_directory and inotify_event.is_create): # TODO: When a directory from another part of the # filesystem is moved into a watched directory, this # will not generate events for the directory tree. # We need to coalesce IN_MOVED_TO events and those # IN_MOVED_TO events which don't pair up with # IN_MOVED_FROM events should be marked IN_CREATE # instead relative to this directory. try: self._add_watch(src_path, self._event_mask) except OSError: continue event_list.extend(_recursive_simulate(src_path)) return event_list # Non-synchronized methods. def _add_dir_watch(self, path, recursive, mask): """ Adds a watch (optionally recursively) for the given directory path to monitor events specified by the mask. :param path: Path to monitor :param recursive: ``True`` to monitor recursively. :param mask: Event bit mask. """ if not os.path.isdir(path): raise OSError(errno.ENOTDIR, os.strerror(errno.ENOTDIR), path) self._add_watch(path, mask) if recursive: for root, dirnames, _ in os.walk(path): for dirname in dirnames: full_path = os.path.join(root, dirname) if os.path.islink(full_path): continue self._add_watch(full_path, mask) def _add_watch(self, path, mask): """ Adds a watch for the given path to monitor events specified by the mask. :param path: Path to monitor :param mask: Event bit mask. """ wd = inotify_add_watch(self._inotify_fd, path, mask) if wd == -1: Inotify._raise_error() self._wd_for_path[path] = wd self._path_for_wd[wd] = path return wd @staticmethod def _raise_error(): """ Raises errors for inotify failures. """ err = ctypes.get_errno() if err == errno.ENOSPC: raise OSError(errno.ENOSPC, "inotify watch limit reached") elif err == errno.EMFILE: raise OSError(errno.EMFILE, "inotify instance limit reached") elif err == errno.EACCES: # Prevent raising an exception when a file with no permissions # changes pass else: raise OSError(err, os.strerror(err)) @staticmethod def _parse_event_buffer(event_buffer): """ Parses an event buffer of ``inotify_event`` structs returned by inotify:: struct inotify_event { __s32 wd; /* watch descriptor */ __u32 mask; /* watch mask */ __u32 cookie; /* cookie to synchronize two events */ __u32 len; /* length (including nulls) of name */ char name[0]; /* stub for possible name */ }; The ``cookie`` member of this struct is used to pair two related events, for example, it pairs an IN_MOVED_FROM event with an IN_MOVED_TO event. """ i = 0 while i + 16 <= len(event_buffer): wd, mask, cookie, length = struct.unpack_from('iIII', event_buffer, i) name = event_buffer[i + 16:i + 16 + length].rstrip(b'\0') i += 16 + length yield wd, mask, cookie, name class InotifyEvent(object): """ Inotify event struct wrapper. :param wd: Watch descriptor :param mask: Event mask :param cookie: Event cookie :param name: Base name of the event source path. :param src_path: Full event source path. """ def __init__(self, wd, mask, cookie, name, src_path): self._wd = wd self._mask = mask self._cookie = cookie self._name = name self._src_path = src_path @property def src_path(self): return self._src_path @property def wd(self): return self._wd @property def mask(self): return self._mask @property def cookie(self): return self._cookie @property def name(self): return self._name @property def is_modify(self): return self._mask & InotifyConstants.IN_MODIFY > 0 @property def is_close_write(self): return self._mask & InotifyConstants.IN_CLOSE_WRITE > 0 @property def is_close_nowrite(self): return self._mask & InotifyConstants.IN_CLOSE_NOWRITE > 0 @property def is_access(self): return self._mask & InotifyConstants.IN_ACCESS > 0 @property def is_delete(self): return self._mask & InotifyConstants.IN_DELETE > 0 @property def is_delete_self(self): return self._mask & InotifyConstants.IN_DELETE_SELF > 0 @property def is_create(self): return self._mask & InotifyConstants.IN_CREATE > 0 @property def is_moved_from(self): return self._mask & InotifyConstants.IN_MOVED_FROM > 0 @property def is_moved_to(self): return self._mask & InotifyConstants.IN_MOVED_TO > 0 @property def is_move(self): return self._mask & InotifyConstants.IN_MOVE > 0 @property def is_move_self(self): return self._mask & InotifyConstants.IN_MOVE_SELF > 0 @property def is_attrib(self): return self._mask & InotifyConstants.IN_ATTRIB > 0 @property def is_ignored(self): return self._mask & InotifyConstants.IN_IGNORED > 0 @property def is_directory(self): # It looks like the kernel does not provide this information for # IN_DELETE_SELF and IN_MOVE_SELF. In this case, assume it's a dir. # See also: https://github.com/seb-m/pyinotify/blob/2c7e8f8/python2/pyinotify.py#L897 return (self.is_delete_self or self.is_move_self or self._mask & InotifyConstants.IN_ISDIR > 0) @property def key(self): return self._src_path, self._wd, self._mask, self._cookie, self._name def __eq__(self, inotify_event): return self.key == inotify_event.key def __ne__(self, inotify_event): return self.key == inotify_event.key def __hash__(self): return hash(self.key) @staticmethod def _get_mask_string(mask): masks = [] for c in dir(InotifyConstants): if c.startswith('IN_') and c not in ['IN_ALL_EVENTS', 'IN_CLOSE', 'IN_MOVE']: c_val = getattr(InotifyConstants, c) if mask & c_val: masks.append(c) mask_string = '|'.join(masks) return mask_string def __repr__(self): mask_string = self._get_mask_string(self.mask) s = '<%s: src_path=%r, wd=%d, mask=%s, cookie=%d, name=%s>' return s % (type(self).__name__, self.src_path, self.wd, mask_string, self.cookie, decode(self.name))
19,875
Python
32.574324
114
0.56478
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/watchdog/observers/inotify_buffer.py
# -*- coding: utf-8 -*- # # Copyright 2014 Thomas Amland <[email protected]> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from watchdog.utils import BaseThread from watchdog.utils.delayed_queue import DelayedQueue from watchdog.observers.inotify_c import Inotify logger = logging.getLogger(__name__) class InotifyBuffer(BaseThread): """A wrapper for `Inotify` that holds events for `delay` seconds. During this time, IN_MOVED_FROM and IN_MOVED_TO events are paired. """ delay = 0.5 def __init__(self, path, recursive=False): BaseThread.__init__(self) self._queue = DelayedQueue(self.delay) self._inotify = Inotify(path, recursive) self.start() def read_event(self): """Returns a single event or a tuple of from/to events in case of a paired move event. If this buffer has been closed, immediately return None. """ return self._queue.get() def on_thread_stop(self): self._inotify.close() self._queue.close() def close(self): self.stop() self.join() def _group_events(self, event_list): """Group any matching move events""" grouped = [] for inotify_event in event_list: logger.debug("in-event %s", inotify_event) def matching_from_event(event): return (not isinstance(event, tuple) and event.is_moved_from and event.cookie == inotify_event.cookie) if inotify_event.is_moved_to: # Check if move_from is already in the buffer for index, event in enumerate(grouped): if matching_from_event(event): grouped[index] = (event, inotify_event) break else: # Check if move_from is in delayqueue already from_event = self._queue.remove(matching_from_event) if from_event is not None: grouped.append((from_event, inotify_event)) else: logger.debug("could not find matching move_from event") grouped.append(inotify_event) else: grouped.append(inotify_event) return grouped def run(self): """Read event from `inotify` and add them to `queue`. When reading a IN_MOVE_TO event, remove the previous added matching IN_MOVE_FROM event and add them back to the queue as a tuple. """ deleted_self = False while self.should_keep_running() and not deleted_self: inotify_events = self._inotify.read_events() grouped_events = self._group_events(inotify_events) for inotify_event in grouped_events: # Only add delay for unmatched move_from events delay = not isinstance(inotify_event, tuple) and inotify_event.is_moved_from self._queue.put(inotify_event, delay) if not isinstance(inotify_event, tuple) and inotify_event.is_delete_self and \ inotify_event.src_path == self._inotify.path: # Deleted the watched directory, stop watching for events deleted_self = True
3,833
Python
37.727272
94
0.606314
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pkg_resources/__init__.py
""" Package resource API -------------------- A resource is a logical file contained within a package, or a logical subdirectory thereof. The package resource API expects resource names to have their path parts separated with ``/``, *not* whatever the local path separator is. Do not use os.path operations to manipulate resource names being passed into the API. The package resource API is designed to work with normal filesystem packages, .egg files, and unpacked .egg files. It can also work in a limited way with .zip files and with custom PEP 302 loaders that support the ``get_data()`` method. This module is deprecated. Users are directed to :mod:`importlib.resources`, :mod:`importlib.metadata` and :pypi:`packaging` instead. """ import sys import os import io import time import re import types import zipfile import zipimport import warnings import stat import functools import pkgutil import operator import platform import collections import plistlib import email.parser import errno import tempfile import textwrap import inspect import ntpath import posixpath import importlib from pkgutil import get_importer try: import _imp except ImportError: # Python 3.2 compatibility import imp as _imp try: FileExistsError except NameError: FileExistsError = OSError # capture these to bypass sandboxing from os import utime try: from os import mkdir, rename, unlink WRITE_SUPPORT = True except ImportError: # no write support, probably under GAE WRITE_SUPPORT = False from os import open as os_open from os.path import isdir, split try: import importlib.machinery as importlib_machinery # access attribute to force import under delayed import mechanisms. importlib_machinery.__name__ except ImportError: importlib_machinery = None from pkg_resources.extern.jaraco.text import ( yield_lines, drop_comment, join_continuation, ) from pkg_resources.extern import platformdirs from pkg_resources.extern import packaging __import__('pkg_resources.extern.packaging.version') __import__('pkg_resources.extern.packaging.specifiers') __import__('pkg_resources.extern.packaging.requirements') __import__('pkg_resources.extern.packaging.markers') __import__('pkg_resources.extern.packaging.utils') if sys.version_info < (3, 5): raise RuntimeError("Python 3.5 or later is required") # declare some globals that will be defined later to # satisfy the linters. require = None working_set = None add_activation_listener = None resources_stream = None cleanup_resources = None resource_dir = None resource_stream = None set_extraction_path = None resource_isdir = None resource_string = None iter_entry_points = None resource_listdir = None resource_filename = None resource_exists = None _distribution_finders = None _namespace_handlers = None _namespace_packages = None warnings.warn( "pkg_resources is deprecated as an API. " "See https://setuptools.pypa.io/en/latest/pkg_resources.html", DeprecationWarning, stacklevel=2 ) _PEP440_FALLBACK = re.compile(r"^v?(?P<safe>(?:[0-9]+!)?[0-9]+(?:\.[0-9]+)*)", re.I) class PEP440Warning(RuntimeWarning): """ Used when there is an issue with a version or specifier not complying with PEP 440. """ parse_version = packaging.version.Version _state_vars = {} def _declare_state(vartype, **kw): globals().update(kw) _state_vars.update(dict.fromkeys(kw, vartype)) def __getstate__(): state = {} g = globals() for k, v in _state_vars.items(): state[k] = g['_sget_' + v](g[k]) return state def __setstate__(state): g = globals() for k, v in state.items(): g['_sset_' + _state_vars[k]](k, g[k], v) return state def _sget_dict(val): return val.copy() def _sset_dict(key, ob, state): ob.clear() ob.update(state) def _sget_object(val): return val.__getstate__() def _sset_object(key, ob, state): ob.__setstate__(state) _sget_none = _sset_none = lambda *args: None def get_supported_platform(): """Return this platform's maximum compatible version. distutils.util.get_platform() normally reports the minimum version of macOS that would be required to *use* extensions produced by distutils. But what we want when checking compatibility is to know the version of macOS that we are *running*. To allow usage of packages that explicitly require a newer version of macOS, we must also know the current version of the OS. If this condition occurs for any other platform with a version in its platform strings, this function should be extended accordingly. """ plat = get_build_platform() m = macosVersionString.match(plat) if m is not None and sys.platform == "darwin": try: plat = 'macosx-%s-%s' % ('.'.join(_macos_vers()[:2]), m.group(3)) except ValueError: # not macOS pass return plat __all__ = [ # Basic resource access and distribution/entry point discovery 'require', 'run_script', 'get_provider', 'get_distribution', 'load_entry_point', 'get_entry_map', 'get_entry_info', 'iter_entry_points', 'resource_string', 'resource_stream', 'resource_filename', 'resource_listdir', 'resource_exists', 'resource_isdir', # Environmental control 'declare_namespace', 'working_set', 'add_activation_listener', 'find_distributions', 'set_extraction_path', 'cleanup_resources', 'get_default_cache', # Primary implementation classes 'Environment', 'WorkingSet', 'ResourceManager', 'Distribution', 'Requirement', 'EntryPoint', # Exceptions 'ResolutionError', 'VersionConflict', 'DistributionNotFound', 'UnknownExtra', 'ExtractionError', # Warnings 'PEP440Warning', # Parsing functions and string utilities 'parse_requirements', 'parse_version', 'safe_name', 'safe_version', 'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections', 'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker', # filesystem utilities 'ensure_directory', 'normalize_path', # Distribution "precedence" constants 'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST', # "Provider" interfaces, implementations, and registration/lookup APIs 'IMetadataProvider', 'IResourceProvider', 'FileMetadata', 'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider', 'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider', 'register_finder', 'register_namespace_handler', 'register_loader_type', 'fixup_namespace_packages', 'get_importer', # Warnings 'PkgResourcesDeprecationWarning', # Deprecated/backward compatibility only 'run_main', 'AvailableDistributions', ] class ResolutionError(Exception): """Abstract base for dependency resolution errors""" def __repr__(self): return self.__class__.__name__ + repr(self.args) class VersionConflict(ResolutionError): """ An already-installed version conflicts with the requested version. Should be initialized with the installed Distribution and the requested Requirement. """ _template = "{self.dist} is installed but {self.req} is required" @property def dist(self): return self.args[0] @property def req(self): return self.args[1] def report(self): return self._template.format(**locals()) def with_context(self, required_by): """ If required_by is non-empty, return a version of self that is a ContextualVersionConflict. """ if not required_by: return self args = self.args + (required_by,) return ContextualVersionConflict(*args) class ContextualVersionConflict(VersionConflict): """ A VersionConflict that accepts a third parameter, the set of the requirements that required the installed Distribution. """ _template = VersionConflict._template + ' by {self.required_by}' @property def required_by(self): return self.args[2] class DistributionNotFound(ResolutionError): """A requested distribution was not found""" _template = ( "The '{self.req}' distribution was not found " "and is required by {self.requirers_str}" ) @property def req(self): return self.args[0] @property def requirers(self): return self.args[1] @property def requirers_str(self): if not self.requirers: return 'the application' return ', '.join(self.requirers) def report(self): return self._template.format(**locals()) def __str__(self): return self.report() class UnknownExtra(ResolutionError): """Distribution doesn't have an "extra feature" of the given name""" _provider_factories = {} PY_MAJOR = '{}.{}'.format(*sys.version_info) EGG_DIST = 3 BINARY_DIST = 2 SOURCE_DIST = 1 CHECKOUT_DIST = 0 DEVELOP_DIST = -1 def register_loader_type(loader_type, provider_factory): """Register `provider_factory` to make providers for `loader_type` `loader_type` is the type or class of a PEP 302 ``module.__loader__``, and `provider_factory` is a function that, passed a *module* object, returns an ``IResourceProvider`` for that module. """ _provider_factories[loader_type] = provider_factory def get_provider(moduleOrReq): """Return an IResourceProvider for the named module or requirement""" if isinstance(moduleOrReq, Requirement): return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0] try: module = sys.modules[moduleOrReq] except KeyError: __import__(moduleOrReq) module = sys.modules[moduleOrReq] loader = getattr(module, '__loader__', None) return _find_adapter(_provider_factories, loader)(module) def _macos_vers(_cache=[]): if not _cache: version = platform.mac_ver()[0] # fallback for MacPorts if version == '': plist = '/System/Library/CoreServices/SystemVersion.plist' if os.path.exists(plist): if hasattr(plistlib, 'readPlist'): plist_content = plistlib.readPlist(plist) if 'ProductVersion' in plist_content: version = plist_content['ProductVersion'] _cache.append(version.split('.')) return _cache[0] def _macos_arch(machine): return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine) def get_build_platform(): """Return this platform's string for platform-specific distributions XXX Currently this is the same as ``distutils.util.get_platform()``, but it needs some hacks for Linux and macOS. """ from sysconfig import get_platform plat = get_platform() if sys.platform == "darwin" and not plat.startswith('macosx-'): try: version = _macos_vers() machine = os.uname()[4].replace(" ", "_") return "macosx-%d.%d-%s" % ( int(version[0]), int(version[1]), _macos_arch(machine), ) except ValueError: # if someone is running a non-Mac darwin system, this will fall # through to the default implementation pass return plat macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)") darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)") # XXX backward compat get_platform = get_build_platform def compatible_platforms(provided, required): """Can code for the `provided` platform run on the `required` platform? Returns true if either platform is ``None``, or the platforms are equal. XXX Needs compatibility checks for Linux and other unixy OSes. """ if provided is None or required is None or provided == required: # easy case return True # macOS special cases reqMac = macosVersionString.match(required) if reqMac: provMac = macosVersionString.match(provided) # is this a Mac package? if not provMac: # this is backwards compatibility for packages built before # setuptools 0.6. All packages built after this point will # use the new macOS designation. provDarwin = darwinVersionString.match(provided) if provDarwin: dversion = int(provDarwin.group(1)) macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2)) if ( dversion == 7 and macosversion >= "10.3" or dversion == 8 and macosversion >= "10.4" ): return True # egg isn't macOS or legacy darwin return False # are they the same major version and machine type? if provMac.group(1) != reqMac.group(1) or provMac.group(3) != reqMac.group(3): return False # is the required OS major update >= the provided one? if int(provMac.group(2)) > int(reqMac.group(2)): return False return True # XXX Linux and other platforms' special cases should go here return False def run_script(dist_spec, script_name): """Locate distribution `dist_spec` and run its `script_name` script""" ns = sys._getframe(1).f_globals name = ns['__name__'] ns.clear() ns['__name__'] = name require(dist_spec)[0].run_script(script_name, ns) # backward compatibility run_main = run_script def get_distribution(dist): """Return a current distribution object for a Requirement or string""" if isinstance(dist, str): dist = Requirement.parse(dist) if isinstance(dist, Requirement): dist = get_provider(dist) if not isinstance(dist, Distribution): raise TypeError("Expected string, Requirement, or Distribution", dist) return dist def load_entry_point(dist, group, name): """Return `name` entry point of `group` for `dist` or raise ImportError""" return get_distribution(dist).load_entry_point(group, name) def get_entry_map(dist, group=None): """Return the entry point map for `group`, or the full entry map""" return get_distribution(dist).get_entry_map(group) def get_entry_info(dist, group, name): """Return the EntryPoint object for `group`+`name`, or ``None``""" return get_distribution(dist).get_entry_info(group, name) class IMetadataProvider: def has_metadata(name): """Does the package's distribution contain the named metadata?""" def get_metadata(name): """The named metadata resource as a string""" def get_metadata_lines(name): """Yield named metadata resource as list of non-blank non-comment lines Leading and trailing whitespace is stripped from each line, and lines with ``#`` as the first non-blank character are omitted.""" def metadata_isdir(name): """Is the named metadata a directory? (like ``os.path.isdir()``)""" def metadata_listdir(name): """List of metadata names in the directory (like ``os.listdir()``)""" def run_script(script_name, namespace): """Execute the named script in the supplied namespace dictionary""" class IResourceProvider(IMetadataProvider): """An object that provides access to package resources""" def get_resource_filename(manager, resource_name): """Return a true filesystem path for `resource_name` `manager` must be an ``IResourceManager``""" def get_resource_stream(manager, resource_name): """Return a readable file-like object for `resource_name` `manager` must be an ``IResourceManager``""" def get_resource_string(manager, resource_name): """Return a string containing the contents of `resource_name` `manager` must be an ``IResourceManager``""" def has_resource(resource_name): """Does the package contain the named resource?""" def resource_isdir(resource_name): """Is the named resource a directory? (like ``os.path.isdir()``)""" def resource_listdir(resource_name): """List of resource names in the directory (like ``os.listdir()``)""" class WorkingSet: """A collection of active distributions on sys.path (or a similar list)""" def __init__(self, entries=None): """Create working set from list of path entries (default=sys.path)""" self.entries = [] self.entry_keys = {} self.by_key = {} self.normalized_to_canonical_keys = {} self.callbacks = [] if entries is None: entries = sys.path for entry in entries: self.add_entry(entry) @classmethod def _build_master(cls): """ Prepare the master working set. """ ws = cls() try: from __main__ import __requires__ except ImportError: # The main program does not list any requirements return ws # ensure the requirements are met try: ws.require(__requires__) except VersionConflict: return cls._build_from_requirements(__requires__) return ws @classmethod def _build_from_requirements(cls, req_spec): """ Build a working set from a requirement spec. Rewrites sys.path. """ # try it without defaults already on sys.path # by starting with an empty path ws = cls([]) reqs = parse_requirements(req_spec) dists = ws.resolve(reqs, Environment()) for dist in dists: ws.add(dist) # add any missing entries from sys.path for entry in sys.path: if entry not in ws.entries: ws.add_entry(entry) # then copy back to sys.path sys.path[:] = ws.entries return ws def add_entry(self, entry): """Add a path item to ``.entries``, finding any distributions on it ``find_distributions(entry, True)`` is used to find distributions corresponding to the path entry, and they are added. `entry` is always appended to ``.entries``, even if it is already present. (This is because ``sys.path`` can contain the same value more than once, and the ``.entries`` of the ``sys.path`` WorkingSet should always equal ``sys.path``.) """ self.entry_keys.setdefault(entry, []) self.entries.append(entry) for dist in find_distributions(entry, True): self.add(dist, entry, False) def __contains__(self, dist): """True if `dist` is the active distribution for its project""" return self.by_key.get(dist.key) == dist def find(self, req): """Find a distribution matching requirement `req` If there is an active distribution for the requested project, this returns it as long as it meets the version requirement specified by `req`. But, if there is an active distribution for the project and it does *not* meet the `req` requirement, ``VersionConflict`` is raised. If there is no active distribution for the requested project, ``None`` is returned. """ dist = self.by_key.get(req.key) if dist is None: canonical_key = self.normalized_to_canonical_keys.get(req.key) if canonical_key is not None: req.key = canonical_key dist = self.by_key.get(canonical_key) if dist is not None and dist not in req: # XXX add more info raise VersionConflict(dist, req) return dist def iter_entry_points(self, group, name=None): """Yield entry point objects from `group` matching `name` If `name` is None, yields all entry points in `group` from all distributions in the working set, otherwise only ones matching both `group` and `name` are yielded (in distribution order). """ return ( entry for dist in self for entry in dist.get_entry_map(group).values() if name is None or name == entry.name ) def run_script(self, requires, script_name): """Locate distribution for `requires` and run `script_name` script""" ns = sys._getframe(1).f_globals name = ns['__name__'] ns.clear() ns['__name__'] = name self.require(requires)[0].run_script(script_name, ns) def __iter__(self): """Yield distributions for non-duplicate projects in the working set The yield order is the order in which the items' path entries were added to the working set. """ seen = {} for item in self.entries: if item not in self.entry_keys: # workaround a cache issue continue for key in self.entry_keys[item]: if key not in seen: seen[key] = 1 yield self.by_key[key] def add(self, dist, entry=None, insert=True, replace=False): """Add `dist` to working set, associated with `entry` If `entry` is unspecified, it defaults to the ``.location`` of `dist`. On exit from this routine, `entry` is added to the end of the working set's ``.entries`` (if it wasn't already present). `dist` is only added to the working set if it's for a project that doesn't already have a distribution in the set, unless `replace=True`. If it's added, any callbacks registered with the ``subscribe()`` method will be called. """ if insert: dist.insert_on(self.entries, entry, replace=replace) if entry is None: entry = dist.location keys = self.entry_keys.setdefault(entry, []) keys2 = self.entry_keys.setdefault(dist.location, []) if not replace and dist.key in self.by_key: # ignore hidden distros return self.by_key[dist.key] = dist normalized_name = packaging.utils.canonicalize_name(dist.key) self.normalized_to_canonical_keys[normalized_name] = dist.key if dist.key not in keys: keys.append(dist.key) if dist.key not in keys2: keys2.append(dist.key) self._added_new(dist) def resolve( self, requirements, env=None, installer=None, replace_conflicting=False, extras=None, ): """List all distributions needed to (recursively) meet `requirements` `requirements` must be a sequence of ``Requirement`` objects. `env`, if supplied, should be an ``Environment`` instance. If not supplied, it defaults to all distributions available within any entry or distribution in the working set. `installer`, if supplied, will be invoked with each requirement that cannot be met by an already-installed distribution; it should return a ``Distribution`` or ``None``. Unless `replace_conflicting=True`, raises a VersionConflict exception if any requirements are found on the path that have the correct name but the wrong version. Otherwise, if an `installer` is supplied it will be invoked to obtain the correct version of the requirement and activate it. `extras` is a list of the extras to be used with these requirements. This is important because extra requirements may look like `my_req; extra = "my_extra"`, which would otherwise be interpreted as a purely optional requirement. Instead, we want to be able to assert that these requirements are truly required. """ # set up the stack requirements = list(requirements)[::-1] # set of processed requirements processed = {} # key -> dist best = {} to_activate = [] req_extras = _ReqExtras() # Mapping of requirement to set of distributions that required it; # useful for reporting info about conflicts. required_by = collections.defaultdict(set) while requirements: # process dependencies breadth-first req = requirements.pop(0) if req in processed: # Ignore cyclic or redundant dependencies continue if not req_extras.markers_pass(req, extras): continue dist = self._resolve_dist( req, best, replace_conflicting, env, installer, required_by, to_activate ) # push the new requirements onto the stack new_requirements = dist.requires(req.extras)[::-1] requirements.extend(new_requirements) # Register the new requirements needed by req for new_requirement in new_requirements: required_by[new_requirement].add(req.project_name) req_extras[new_requirement] = req.extras processed[req] = True # return list of distros to activate return to_activate def _resolve_dist( self, req, best, replace_conflicting, env, installer, required_by, to_activate ): dist = best.get(req.key) if dist is None: # Find the best distribution and add it to the map dist = self.by_key.get(req.key) if dist is None or (dist not in req and replace_conflicting): ws = self if env is None: if dist is None: env = Environment(self.entries) else: # Use an empty environment and workingset to avoid # any further conflicts with the conflicting # distribution env = Environment([]) ws = WorkingSet([]) dist = best[req.key] = env.best_match( req, ws, installer, replace_conflicting=replace_conflicting ) if dist is None: requirers = required_by.get(req, None) raise DistributionNotFound(req, requirers) to_activate.append(dist) if dist not in req: # Oops, the "best" so far conflicts with a dependency dependent_req = required_by[req] raise VersionConflict(dist, req).with_context(dependent_req) return dist def find_plugins(self, plugin_env, full_env=None, installer=None, fallback=True): """Find all activatable distributions in `plugin_env` Example usage:: distributions, errors = working_set.find_plugins( Environment(plugin_dirlist) ) # add plugins+libs to sys.path map(working_set.add, distributions) # display errors print('Could not load', errors) The `plugin_env` should be an ``Environment`` instance that contains only distributions that are in the project's "plugin directory" or directories. The `full_env`, if supplied, should be an ``Environment`` contains all currently-available distributions. If `full_env` is not supplied, one is created automatically from the ``WorkingSet`` this method is called on, which will typically mean that every directory on ``sys.path`` will be scanned for distributions. `installer` is a standard installer callback as used by the ``resolve()`` method. The `fallback` flag indicates whether we should attempt to resolve older versions of a plugin if the newest version cannot be resolved. This method returns a 2-tuple: (`distributions`, `error_info`), where `distributions` is a list of the distributions found in `plugin_env` that were loadable, along with any other distributions that are needed to resolve their dependencies. `error_info` is a dictionary mapping unloadable plugin distributions to an exception instance describing the error that occurred. Usually this will be a ``DistributionNotFound`` or ``VersionConflict`` instance. """ plugin_projects = list(plugin_env) # scan project names in alphabetic order plugin_projects.sort() error_info = {} distributions = {} if full_env is None: env = Environment(self.entries) env += plugin_env else: env = full_env + plugin_env shadow_set = self.__class__([]) # put all our entries in shadow_set list(map(shadow_set.add, self)) for project_name in plugin_projects: for dist in plugin_env[project_name]: req = [dist.as_requirement()] try: resolvees = shadow_set.resolve(req, env, installer) except ResolutionError as v: # save error info error_info[dist] = v if fallback: # try the next older version of project continue else: # give up on this project, keep going break else: list(map(shadow_set.add, resolvees)) distributions.update(dict.fromkeys(resolvees)) # success, no need to try any more versions of this project break distributions = list(distributions) distributions.sort() return distributions, error_info def require(self, *requirements): """Ensure that distributions matching `requirements` are activated `requirements` must be a string or a (possibly-nested) sequence thereof, specifying the distributions and versions required. The return value is a sequence of the distributions that needed to be activated to fulfill the requirements; all relevant distributions are included, even if they were already activated in this working set. """ needed = self.resolve(parse_requirements(requirements)) for dist in needed: self.add(dist) return needed def subscribe(self, callback, existing=True): """Invoke `callback` for all distributions If `existing=True` (default), call on all existing ones, as well. """ if callback in self.callbacks: return self.callbacks.append(callback) if not existing: return for dist in self: callback(dist) def _added_new(self, dist): for callback in self.callbacks: callback(dist) def __getstate__(self): return ( self.entries[:], self.entry_keys.copy(), self.by_key.copy(), self.normalized_to_canonical_keys.copy(), self.callbacks[:], ) def __setstate__(self, e_k_b_n_c): entries, keys, by_key, normalized_to_canonical_keys, callbacks = e_k_b_n_c self.entries = entries[:] self.entry_keys = keys.copy() self.by_key = by_key.copy() self.normalized_to_canonical_keys = normalized_to_canonical_keys.copy() self.callbacks = callbacks[:] class _ReqExtras(dict): """ Map each requirement to the extras that demanded it. """ def markers_pass(self, req, extras=None): """ Evaluate markers for req against each extra that demanded it. Return False if the req has a marker and fails evaluation. Otherwise, return True. """ extra_evals = ( req.marker.evaluate({'extra': extra}) for extra in self.get(req, ()) + (extras or (None,)) ) return not req.marker or any(extra_evals) class Environment: """Searchable snapshot of distributions on a search path""" def __init__( self, search_path=None, platform=get_supported_platform(), python=PY_MAJOR ): """Snapshot distributions available on a search path Any distributions found on `search_path` are added to the environment. `search_path` should be a sequence of ``sys.path`` items. If not supplied, ``sys.path`` is used. `platform` is an optional string specifying the name of the platform that platform-specific distributions must be compatible with. If unspecified, it defaults to the current platform. `python` is an optional string naming the desired version of Python (e.g. ``'3.6'``); it defaults to the current version. You may explicitly set `platform` (and/or `python`) to ``None`` if you wish to map *all* distributions, not just those compatible with the running platform or Python version. """ self._distmap = {} self.platform = platform self.python = python self.scan(search_path) def can_add(self, dist): """Is distribution `dist` acceptable for this environment? The distribution must match the platform and python version requirements specified when this environment was created, or False is returned. """ py_compat = ( self.python is None or dist.py_version is None or dist.py_version == self.python ) return py_compat and compatible_platforms(dist.platform, self.platform) def remove(self, dist): """Remove `dist` from the environment""" self._distmap[dist.key].remove(dist) def scan(self, search_path=None): """Scan `search_path` for distributions usable in this environment Any distributions found are added to the environment. `search_path` should be a sequence of ``sys.path`` items. If not supplied, ``sys.path`` is used. Only distributions conforming to the platform/python version defined at initialization are added. """ if search_path is None: search_path = sys.path for item in search_path: for dist in find_distributions(item): self.add(dist) def __getitem__(self, project_name): """Return a newest-to-oldest list of distributions for `project_name` Uses case-insensitive `project_name` comparison, assuming all the project's distributions use their project's name converted to all lowercase as their key. """ distribution_key = project_name.lower() return self._distmap.get(distribution_key, []) def add(self, dist): """Add `dist` if we ``can_add()`` it and it has not already been added""" if self.can_add(dist) and dist.has_version(): dists = self._distmap.setdefault(dist.key, []) if dist not in dists: dists.append(dist) dists.sort(key=operator.attrgetter('hashcmp'), reverse=True) def best_match(self, req, working_set, installer=None, replace_conflicting=False): """Find distribution best matching `req` and usable on `working_set` This calls the ``find(req)`` method of the `working_set` to see if a suitable distribution is already active. (This may raise ``VersionConflict`` if an unsuitable version of the project is already active in the specified `working_set`.) If a suitable distribution isn't active, this method returns the newest distribution in the environment that meets the ``Requirement`` in `req`. If no suitable distribution is found, and `installer` is supplied, then the result of calling the environment's ``obtain(req, installer)`` method will be returned. """ try: dist = working_set.find(req) except VersionConflict: if not replace_conflicting: raise dist = None if dist is not None: return dist for dist in self[req.key]: if dist in req: return dist # try to download/install return self.obtain(req, installer) def obtain(self, requirement, installer=None): """Obtain a distribution matching `requirement` (e.g. via download) Obtain a distro that matches requirement (e.g. via download). In the base ``Environment`` class, this routine just returns ``installer(requirement)``, unless `installer` is None, in which case None is returned instead. This method is a hook that allows subclasses to attempt other ways of obtaining a distribution before falling back to the `installer` argument.""" if installer is not None: return installer(requirement) def __iter__(self): """Yield the unique project names of the available distributions""" for key in self._distmap.keys(): if self[key]: yield key def __iadd__(self, other): """In-place addition of a distribution or environment""" if isinstance(other, Distribution): self.add(other) elif isinstance(other, Environment): for project in other: for dist in other[project]: self.add(dist) else: raise TypeError("Can't add %r to environment" % (other,)) return self def __add__(self, other): """Add an environment or distribution to an environment""" new = self.__class__([], platform=None, python=None) for env in self, other: new += env return new # XXX backward compatibility AvailableDistributions = Environment class ExtractionError(RuntimeError): """An error occurred extracting a resource The following attributes are available from instances of this exception: manager The resource manager that raised this exception cache_path The base directory for resource extraction original_error The exception instance that caused extraction to fail """ class ResourceManager: """Manage resource extraction and packages""" extraction_path = None def __init__(self): self.cached_files = {} def resource_exists(self, package_or_requirement, resource_name): """Does the named resource exist?""" return get_provider(package_or_requirement).has_resource(resource_name) def resource_isdir(self, package_or_requirement, resource_name): """Is the named resource an existing directory?""" return get_provider(package_or_requirement).resource_isdir(resource_name) def resource_filename(self, package_or_requirement, resource_name): """Return a true filesystem path for specified resource""" return get_provider(package_or_requirement).get_resource_filename( self, resource_name ) def resource_stream(self, package_or_requirement, resource_name): """Return a readable file-like object for specified resource""" return get_provider(package_or_requirement).get_resource_stream( self, resource_name ) def resource_string(self, package_or_requirement, resource_name): """Return specified resource as a string""" return get_provider(package_or_requirement).get_resource_string( self, resource_name ) def resource_listdir(self, package_or_requirement, resource_name): """List the contents of the named resource directory""" return get_provider(package_or_requirement).resource_listdir(resource_name) def extraction_error(self): """Give an error message for problems extracting file(s)""" old_exc = sys.exc_info()[1] cache_path = self.extraction_path or get_default_cache() tmpl = textwrap.dedent( """ Can't extract file(s) to egg cache The following error occurred while trying to extract file(s) to the Python egg cache: {old_exc} The Python egg cache directory is currently set to: {cache_path} Perhaps your account does not have write access to this directory? You can change the cache directory by setting the PYTHON_EGG_CACHE environment variable to point to an accessible directory. """ ).lstrip() err = ExtractionError(tmpl.format(**locals())) err.manager = self err.cache_path = cache_path err.original_error = old_exc raise err def get_cache_path(self, archive_name, names=()): """Return absolute location in cache for `archive_name` and `names` The parent directory of the resulting path will be created if it does not already exist. `archive_name` should be the base filename of the enclosing egg (which may not be the name of the enclosing zipfile!), including its ".egg" extension. `names`, if provided, should be a sequence of path name parts "under" the egg's extraction location. This method should only be called by resource providers that need to obtain an extraction location, and only for names they intend to extract, as it tracks the generated names for possible cleanup later. """ extract_path = self.extraction_path or get_default_cache() target_path = os.path.join(extract_path, archive_name + '-tmp', *names) try: _bypass_ensure_directory(target_path) except Exception: self.extraction_error() self._warn_unsafe_extraction_path(extract_path) self.cached_files[target_path] = 1 return target_path @staticmethod def _warn_unsafe_extraction_path(path): """ If the default extraction path is overridden and set to an insecure location, such as /tmp, it opens up an opportunity for an attacker to replace an extracted file with an unauthorized payload. Warn the user if a known insecure location is used. See Distribute #375 for more details. """ if os.name == 'nt' and not path.startswith(os.environ['windir']): # On Windows, permissions are generally restrictive by default # and temp directories are not writable by other users, so # bypass the warning. return mode = os.stat(path).st_mode if mode & stat.S_IWOTH or mode & stat.S_IWGRP: msg = ( "Extraction path is writable by group/others " "and vulnerable to attack when " "used with get_resource_filename ({path}). " "Consider a more secure " "location (set with .set_extraction_path or the " "PYTHON_EGG_CACHE environment variable)." ).format(**locals()) warnings.warn(msg, UserWarning) def postprocess(self, tempname, filename): """Perform any platform-specific postprocessing of `tempname` This is where Mac header rewrites should be done; other platforms don't have anything special they should do. Resource providers should call this method ONLY after successfully extracting a compressed resource. They must NOT call it on resources that are already in the filesystem. `tempname` is the current (temporary) name of the file, and `filename` is the name it will be renamed to by the caller after this routine returns. """ if os.name == 'posix': # Make the resource executable mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777 os.chmod(tempname, mode) def set_extraction_path(self, path): """Set the base path where resources will be extracted to, if needed. If you do not call this routine before any extractions take place, the path defaults to the return value of ``get_default_cache()``. (Which is based on the ``PYTHON_EGG_CACHE`` environment variable, with various platform-specific fallbacks. See that routine's documentation for more details.) Resources are extracted to subdirectories of this path based upon information given by the ``IResourceProvider``. You may set this to a temporary directory, but then you must call ``cleanup_resources()`` to delete the extracted files when done. There is no guarantee that ``cleanup_resources()`` will be able to remove all extracted files. (Note: you may not change the extraction path for a given resource manager once resources have been extracted, unless you first call ``cleanup_resources()``.) """ if self.cached_files: raise ValueError("Can't change extraction path, files already extracted") self.extraction_path = path def cleanup_resources(self, force=False): """ Delete all extracted resource files and directories, returning a list of the file and directory names that could not be successfully removed. This function does not have any concurrency protection, so it should generally only be called when the extraction path is a temporary directory exclusive to a single process. This method is not automatically called; you must call it explicitly or register it as an ``atexit`` function if you wish to ensure cleanup of a temporary directory used for extractions. """ # XXX def get_default_cache(): """ Return the ``PYTHON_EGG_CACHE`` environment variable or a platform-relevant user cache dir for an app named "Python-Eggs". """ return os.environ.get('PYTHON_EGG_CACHE') or platformdirs.user_cache_dir( appname='Python-Eggs' ) def safe_name(name): """Convert an arbitrary string to a standard distribution name Any runs of non-alphanumeric/. characters are replaced with a single '-'. """ return re.sub('[^A-Za-z0-9.]+', '-', name) def safe_version(version): """ Convert an arbitrary string to a standard version string """ try: # normalize the version return str(packaging.version.Version(version)) except packaging.version.InvalidVersion: version = version.replace(' ', '.') return re.sub('[^A-Za-z0-9.]+', '-', version) def _forgiving_version(version): """Fallback when ``safe_version`` is not safe enough >>> parse_version(_forgiving_version('0.23ubuntu1')) <Version('0.23.dev0+sanitized.ubuntu1')> >>> parse_version(_forgiving_version('0.23-')) <Version('0.23.dev0+sanitized')> >>> parse_version(_forgiving_version('0.-_')) <Version('0.dev0+sanitized')> >>> parse_version(_forgiving_version('42.+?1')) <Version('42.dev0+sanitized.1')> >>> parse_version(_forgiving_version('hello world')) <Version('0.dev0+sanitized.hello.world')> """ version = version.replace(' ', '.') match = _PEP440_FALLBACK.search(version) if match: safe = match["safe"] rest = version[len(safe):] else: safe = "0" rest = version local = f"sanitized.{_safe_segment(rest)}".strip(".") return f"{safe}.dev0+{local}" def _safe_segment(segment): """Convert an arbitrary string into a safe segment""" segment = re.sub('[^A-Za-z0-9.]+', '-', segment) segment = re.sub('-[^A-Za-z0-9]+', '-', segment) return re.sub(r'\.[^A-Za-z0-9]+', '.', segment).strip(".-") def safe_extra(extra): """Convert an arbitrary string to a standard 'extra' name Any runs of non-alphanumeric characters are replaced with a single '_', and the result is always lowercased. """ return re.sub('[^A-Za-z0-9.-]+', '_', extra).lower() def to_filename(name): """Convert a project or version name to its filename-escaped form Any '-' characters are currently replaced with '_'. """ return name.replace('-', '_') def invalid_marker(text): """ Validate text as a PEP 508 environment marker; return an exception if invalid or False otherwise. """ try: evaluate_marker(text) except SyntaxError as e: e.filename = None e.lineno = None return e return False def evaluate_marker(text, extra=None): """ Evaluate a PEP 508 environment marker. Return a boolean indicating the marker result in this environment. Raise SyntaxError if marker is invalid. This implementation uses the 'pyparsing' module. """ try: marker = packaging.markers.Marker(text) return marker.evaluate() except packaging.markers.InvalidMarker as e: raise SyntaxError(e) from e class NullProvider: """Try to implement resources and metadata for arbitrary PEP 302 loaders""" egg_name = None egg_info = None loader = None def __init__(self, module): self.loader = getattr(module, '__loader__', None) self.module_path = os.path.dirname(getattr(module, '__file__', '')) def get_resource_filename(self, manager, resource_name): return self._fn(self.module_path, resource_name) def get_resource_stream(self, manager, resource_name): return io.BytesIO(self.get_resource_string(manager, resource_name)) def get_resource_string(self, manager, resource_name): return self._get(self._fn(self.module_path, resource_name)) def has_resource(self, resource_name): return self._has(self._fn(self.module_path, resource_name)) def _get_metadata_path(self, name): return self._fn(self.egg_info, name) def has_metadata(self, name): if not self.egg_info: return self.egg_info path = self._get_metadata_path(name) return self._has(path) def get_metadata(self, name): if not self.egg_info: return "" path = self._get_metadata_path(name) value = self._get(path) try: return value.decode('utf-8') except UnicodeDecodeError as exc: # Include the path in the error message to simplify # troubleshooting, and without changing the exception type. exc.reason += ' in {} file at path: {}'.format(name, path) raise def get_metadata_lines(self, name): return yield_lines(self.get_metadata(name)) def resource_isdir(self, resource_name): return self._isdir(self._fn(self.module_path, resource_name)) def metadata_isdir(self, name): return self.egg_info and self._isdir(self._fn(self.egg_info, name)) def resource_listdir(self, resource_name): return self._listdir(self._fn(self.module_path, resource_name)) def metadata_listdir(self, name): if self.egg_info: return self._listdir(self._fn(self.egg_info, name)) return [] def run_script(self, script_name, namespace): script = 'scripts/' + script_name if not self.has_metadata(script): raise ResolutionError( "Script {script!r} not found in metadata at {self.egg_info!r}".format( **locals() ), ) script_text = self.get_metadata(script).replace('\r\n', '\n') script_text = script_text.replace('\r', '\n') script_filename = self._fn(self.egg_info, script) namespace['__file__'] = script_filename if os.path.exists(script_filename): with open(script_filename) as fid: source = fid.read() code = compile(source, script_filename, 'exec') exec(code, namespace, namespace) else: from linecache import cache cache[script_filename] = ( len(script_text), 0, script_text.split('\n'), script_filename, ) script_code = compile(script_text, script_filename, 'exec') exec(script_code, namespace, namespace) def _has(self, path): raise NotImplementedError( "Can't perform this operation for unregistered loader type" ) def _isdir(self, path): raise NotImplementedError( "Can't perform this operation for unregistered loader type" ) def _listdir(self, path): raise NotImplementedError( "Can't perform this operation for unregistered loader type" ) def _fn(self, base, resource_name): self._validate_resource_path(resource_name) if resource_name: return os.path.join(base, *resource_name.split('/')) return base @staticmethod def _validate_resource_path(path): """ Validate the resource paths according to the docs. https://setuptools.pypa.io/en/latest/pkg_resources.html#basic-resource-access >>> warned = getfixture('recwarn') >>> warnings.simplefilter('always') >>> vrp = NullProvider._validate_resource_path >>> vrp('foo/bar.txt') >>> bool(warned) False >>> vrp('../foo/bar.txt') >>> bool(warned) True >>> warned.clear() >>> vrp('/foo/bar.txt') >>> bool(warned) True >>> vrp('foo/../../bar.txt') >>> bool(warned) True >>> warned.clear() >>> vrp('foo/f../bar.txt') >>> bool(warned) False Windows path separators are straight-up disallowed. >>> vrp(r'\\foo/bar.txt') Traceback (most recent call last): ... ValueError: Use of .. or absolute path in a resource path \ is not allowed. >>> vrp(r'C:\\foo/bar.txt') Traceback (most recent call last): ... ValueError: Use of .. or absolute path in a resource path \ is not allowed. Blank values are allowed >>> vrp('') >>> bool(warned) False Non-string values are not. >>> vrp(None) Traceback (most recent call last): ... AttributeError: ... """ invalid = ( os.path.pardir in path.split(posixpath.sep) or posixpath.isabs(path) or ntpath.isabs(path) ) if not invalid: return msg = "Use of .. or absolute path in a resource path is not allowed." # Aggressively disallow Windows absolute paths if ntpath.isabs(path) and not posixpath.isabs(path): raise ValueError(msg) # for compatibility, warn; in future # raise ValueError(msg) issue_warning( msg[:-1] + " and will raise exceptions in a future release.", DeprecationWarning, ) def _get(self, path): if hasattr(self.loader, 'get_data'): return self.loader.get_data(path) raise NotImplementedError( "Can't perform this operation for loaders without 'get_data()'" ) register_loader_type(object, NullProvider) def _parents(path): """ yield all parents of path including path """ last = None while path != last: yield path last = path path, _ = os.path.split(path) class EggProvider(NullProvider): """Provider based on a virtual filesystem""" def __init__(self, module): super().__init__(module) self._setup_prefix() def _setup_prefix(self): # Assume that metadata may be nested inside a "basket" # of multiple eggs and use module_path instead of .archive. eggs = filter(_is_egg_path, _parents(self.module_path)) egg = next(eggs, None) egg and self._set_egg(egg) def _set_egg(self, path): self.egg_name = os.path.basename(path) self.egg_info = os.path.join(path, 'EGG-INFO') self.egg_root = path class DefaultProvider(EggProvider): """Provides access to package resources in the filesystem""" def _has(self, path): return os.path.exists(path) def _isdir(self, path): return os.path.isdir(path) def _listdir(self, path): return os.listdir(path) def get_resource_stream(self, manager, resource_name): return open(self._fn(self.module_path, resource_name), 'rb') def _get(self, path): with open(path, 'rb') as stream: return stream.read() @classmethod def _register(cls): loader_names = ( 'SourceFileLoader', 'SourcelessFileLoader', ) for name in loader_names: loader_cls = getattr(importlib_machinery, name, type(None)) register_loader_type(loader_cls, cls) DefaultProvider._register() class EmptyProvider(NullProvider): """Provider that returns nothing for all requests""" module_path = None _isdir = _has = lambda self, path: False def _get(self, path): return '' def _listdir(self, path): return [] def __init__(self): pass empty_provider = EmptyProvider() class ZipManifests(dict): """ zip manifest builder """ @classmethod def build(cls, path): """ Build a dictionary similar to the zipimport directory caches, except instead of tuples, store ZipInfo objects. Use a platform-specific path separator (os.sep) for the path keys for compatibility with pypy on Windows. """ with zipfile.ZipFile(path) as zfile: items = ( ( name.replace('/', os.sep), zfile.getinfo(name), ) for name in zfile.namelist() ) return dict(items) load = build class MemoizedZipManifests(ZipManifests): """ Memoized zipfile manifests. """ manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime') def load(self, path): """ Load a manifest at path or return a suitable manifest already loaded. """ path = os.path.normpath(path) mtime = os.stat(path).st_mtime if path not in self or self[path].mtime != mtime: manifest = self.build(path) self[path] = self.manifest_mod(manifest, mtime) return self[path].manifest class ZipProvider(EggProvider): """Resource support for zips and eggs""" eagers = None _zip_manifests = MemoizedZipManifests() def __init__(self, module): super().__init__(module) self.zip_pre = self.loader.archive + os.sep def _zipinfo_name(self, fspath): # Convert a virtual filename (full path to file) into a zipfile subpath # usable with the zipimport directory cache for our target archive fspath = fspath.rstrip(os.sep) if fspath == self.loader.archive: return '' if fspath.startswith(self.zip_pre): return fspath[len(self.zip_pre) :] raise AssertionError("%s is not a subpath of %s" % (fspath, self.zip_pre)) def _parts(self, zip_path): # Convert a zipfile subpath into an egg-relative path part list. # pseudo-fs path fspath = self.zip_pre + zip_path if fspath.startswith(self.egg_root + os.sep): return fspath[len(self.egg_root) + 1 :].split(os.sep) raise AssertionError("%s is not a subpath of %s" % (fspath, self.egg_root)) @property def zipinfo(self): return self._zip_manifests.load(self.loader.archive) def get_resource_filename(self, manager, resource_name): if not self.egg_name: raise NotImplementedError( "resource_filename() only supported for .egg, not .zip" ) # no need to lock for extraction, since we use temp names zip_path = self._resource_to_zip(resource_name) eagers = self._get_eager_resources() if '/'.join(self._parts(zip_path)) in eagers: for name in eagers: self._extract_resource(manager, self._eager_to_zip(name)) return self._extract_resource(manager, zip_path) @staticmethod def _get_date_and_size(zip_stat): size = zip_stat.file_size # ymdhms+wday, yday, dst date_time = zip_stat.date_time + (0, 0, -1) # 1980 offset already done timestamp = time.mktime(date_time) return timestamp, size # FIXME: 'ZipProvider._extract_resource' is too complex (12) def _extract_resource(self, manager, zip_path): # noqa: C901 if zip_path in self._index(): for name in self._index()[zip_path]: last = self._extract_resource(manager, os.path.join(zip_path, name)) # return the extracted directory name return os.path.dirname(last) timestamp, size = self._get_date_and_size(self.zipinfo[zip_path]) if not WRITE_SUPPORT: raise IOError( '"os.rename" and "os.unlink" are not supported ' 'on this platform' ) try: real_path = manager.get_cache_path(self.egg_name, self._parts(zip_path)) if self._is_current(real_path, zip_path): return real_path outf, tmpnam = _mkstemp( ".$extract", dir=os.path.dirname(real_path), ) os.write(outf, self.loader.get_data(zip_path)) os.close(outf) utime(tmpnam, (timestamp, timestamp)) manager.postprocess(tmpnam, real_path) try: rename(tmpnam, real_path) except os.error: if os.path.isfile(real_path): if self._is_current(real_path, zip_path): # the file became current since it was checked above, # so proceed. return real_path # Windows, del old file and retry elif os.name == 'nt': unlink(real_path) rename(tmpnam, real_path) return real_path raise except os.error: # report a user-friendly error manager.extraction_error() return real_path def _is_current(self, file_path, zip_path): """ Return True if the file_path is current for this zip_path """ timestamp, size = self._get_date_and_size(self.zipinfo[zip_path]) if not os.path.isfile(file_path): return False stat = os.stat(file_path) if stat.st_size != size or stat.st_mtime != timestamp: return False # check that the contents match zip_contents = self.loader.get_data(zip_path) with open(file_path, 'rb') as f: file_contents = f.read() return zip_contents == file_contents def _get_eager_resources(self): if self.eagers is None: eagers = [] for name in ('native_libs.txt', 'eager_resources.txt'): if self.has_metadata(name): eagers.extend(self.get_metadata_lines(name)) self.eagers = eagers return self.eagers def _index(self): try: return self._dirindex except AttributeError: ind = {} for path in self.zipinfo: parts = path.split(os.sep) while parts: parent = os.sep.join(parts[:-1]) if parent in ind: ind[parent].append(parts[-1]) break else: ind[parent] = [parts.pop()] self._dirindex = ind return ind def _has(self, fspath): zip_path = self._zipinfo_name(fspath) return zip_path in self.zipinfo or zip_path in self._index() def _isdir(self, fspath): return self._zipinfo_name(fspath) in self._index() def _listdir(self, fspath): return list(self._index().get(self._zipinfo_name(fspath), ())) def _eager_to_zip(self, resource_name): return self._zipinfo_name(self._fn(self.egg_root, resource_name)) def _resource_to_zip(self, resource_name): return self._zipinfo_name(self._fn(self.module_path, resource_name)) register_loader_type(zipimport.zipimporter, ZipProvider) class FileMetadata(EmptyProvider): """Metadata handler for standalone PKG-INFO files Usage:: metadata = FileMetadata("/path/to/PKG-INFO") This provider rejects all data and metadata requests except for PKG-INFO, which is treated as existing, and will be the contents of the file at the provided location. """ def __init__(self, path): self.path = path def _get_metadata_path(self, name): return self.path def has_metadata(self, name): return name == 'PKG-INFO' and os.path.isfile(self.path) def get_metadata(self, name): if name != 'PKG-INFO': raise KeyError("No metadata except PKG-INFO is available") with io.open(self.path, encoding='utf-8', errors="replace") as f: metadata = f.read() self._warn_on_replacement(metadata) return metadata def _warn_on_replacement(self, metadata): replacement_char = '�' if replacement_char in metadata: tmpl = "{self.path} could not be properly decoded in UTF-8" msg = tmpl.format(**locals()) warnings.warn(msg) def get_metadata_lines(self, name): return yield_lines(self.get_metadata(name)) class PathMetadata(DefaultProvider): """Metadata provider for egg directories Usage:: # Development eggs: egg_info = "/path/to/PackageName.egg-info" base_dir = os.path.dirname(egg_info) metadata = PathMetadata(base_dir, egg_info) dist_name = os.path.splitext(os.path.basename(egg_info))[0] dist = Distribution(basedir, project_name=dist_name, metadata=metadata) # Unpacked egg directories: egg_path = "/path/to/PackageName-ver-pyver-etc.egg" metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO')) dist = Distribution.from_filename(egg_path, metadata=metadata) """ def __init__(self, path, egg_info): self.module_path = path self.egg_info = egg_info class EggMetadata(ZipProvider): """Metadata provider for .egg files""" def __init__(self, importer): """Create a metadata provider from a zipimporter""" self.zip_pre = importer.archive + os.sep self.loader = importer if importer.prefix: self.module_path = os.path.join(importer.archive, importer.prefix) else: self.module_path = importer.archive self._setup_prefix() _declare_state('dict', _distribution_finders={}) def register_finder(importer_type, distribution_finder): """Register `distribution_finder` to find distributions in sys.path items `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item handler), and `distribution_finder` is a callable that, passed a path item and the importer instance, yields ``Distribution`` instances found on that path item. See ``pkg_resources.find_on_path`` for an example.""" _distribution_finders[importer_type] = distribution_finder def find_distributions(path_item, only=False): """Yield distributions accessible via `path_item`""" importer = get_importer(path_item) finder = _find_adapter(_distribution_finders, importer) return finder(importer, path_item, only) def find_eggs_in_zip(importer, path_item, only=False): """ Find eggs in zip files; possibly multiple nested eggs. """ if importer.archive.endswith('.whl'): # wheels are not supported with this finder # they don't have PKG-INFO metadata, and won't ever contain eggs return metadata = EggMetadata(importer) if metadata.has_metadata('PKG-INFO'): yield Distribution.from_filename(path_item, metadata=metadata) if only: # don't yield nested distros return for subitem in metadata.resource_listdir(''): if _is_egg_path(subitem): subpath = os.path.join(path_item, subitem) dists = find_eggs_in_zip(zipimport.zipimporter(subpath), subpath) for dist in dists: yield dist elif subitem.lower().endswith(('.dist-info', '.egg-info')): subpath = os.path.join(path_item, subitem) submeta = EggMetadata(zipimport.zipimporter(subpath)) submeta.egg_info = subpath yield Distribution.from_location(path_item, subitem, submeta) register_finder(zipimport.zipimporter, find_eggs_in_zip) def find_nothing(importer, path_item, only=False): return () register_finder(object, find_nothing) def find_on_path(importer, path_item, only=False): """Yield distributions accessible on a sys.path directory""" path_item = _normalize_cached(path_item) if _is_unpacked_egg(path_item): yield Distribution.from_filename( path_item, metadata=PathMetadata(path_item, os.path.join(path_item, 'EGG-INFO')), ) return entries = (os.path.join(path_item, child) for child in safe_listdir(path_item)) # scan for .egg and .egg-info in directory for entry in sorted(entries): fullpath = os.path.join(path_item, entry) factory = dist_factory(path_item, entry, only) for dist in factory(fullpath): yield dist def dist_factory(path_item, entry, only): """Return a dist_factory for the given entry.""" lower = entry.lower() is_egg_info = lower.endswith('.egg-info') is_dist_info = lower.endswith('.dist-info') and os.path.isdir( os.path.join(path_item, entry) ) is_meta = is_egg_info or is_dist_info return ( distributions_from_metadata if is_meta else find_distributions if not only and _is_egg_path(entry) else resolve_egg_link if not only and lower.endswith('.egg-link') else NoDists() ) class NoDists: """ >>> bool(NoDists()) False >>> list(NoDists()('anything')) [] """ def __bool__(self): return False def __call__(self, fullpath): return iter(()) def safe_listdir(path): """ Attempt to list contents of path, but suppress some exceptions. """ try: return os.listdir(path) except (PermissionError, NotADirectoryError): pass except OSError as e: # Ignore the directory if does not exist, not a directory or # permission denied if e.errno not in (errno.ENOTDIR, errno.EACCES, errno.ENOENT): raise return () def distributions_from_metadata(path): root = os.path.dirname(path) if os.path.isdir(path): if len(os.listdir(path)) == 0: # empty metadata dir; skip return metadata = PathMetadata(root, path) else: metadata = FileMetadata(path) entry = os.path.basename(path) yield Distribution.from_location( root, entry, metadata, precedence=DEVELOP_DIST, ) def non_empty_lines(path): """ Yield non-empty lines from file at path """ with open(path) as f: for line in f: line = line.strip() if line: yield line def resolve_egg_link(path): """ Given a path to an .egg-link, resolve distributions present in the referenced path. """ referenced_paths = non_empty_lines(path) resolved_paths = ( os.path.join(os.path.dirname(path), ref) for ref in referenced_paths ) dist_groups = map(find_distributions, resolved_paths) return next(dist_groups, ()) if hasattr(pkgutil, 'ImpImporter'): register_finder(pkgutil.ImpImporter, find_on_path) register_finder(importlib_machinery.FileFinder, find_on_path) _declare_state('dict', _namespace_handlers={}) _declare_state('dict', _namespace_packages={}) def register_namespace_handler(importer_type, namespace_handler): """Register `namespace_handler` to declare namespace packages `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item handler), and `namespace_handler` is a callable like this:: def namespace_handler(importer, path_entry, moduleName, module): # return a path_entry to use for child packages Namespace handlers are only called if the importer object has already agreed that it can handle the relevant path item, and they should only return a subpath if the module __path__ does not already contain an equivalent subpath. For an example namespace handler, see ``pkg_resources.file_ns_handler``. """ _namespace_handlers[importer_type] = namespace_handler def _handle_ns(packageName, path_item): """Ensure that named package includes a subpath of path_item (if needed)""" importer = get_importer(path_item) if importer is None: return None # use find_spec (PEP 451) and fall-back to find_module (PEP 302) try: spec = importer.find_spec(packageName) except AttributeError: # capture warnings due to #1111 with warnings.catch_warnings(): warnings.simplefilter("ignore") loader = importer.find_module(packageName) else: loader = spec.loader if spec else None if loader is None: return None module = sys.modules.get(packageName) if module is None: module = sys.modules[packageName] = types.ModuleType(packageName) module.__path__ = [] _set_parent_ns(packageName) elif not hasattr(module, '__path__'): raise TypeError("Not a package:", packageName) handler = _find_adapter(_namespace_handlers, importer) subpath = handler(importer, path_item, packageName, module) if subpath is not None: path = module.__path__ path.append(subpath) importlib.import_module(packageName) _rebuild_mod_path(path, packageName, module) return subpath def _rebuild_mod_path(orig_path, package_name, module): """ Rebuild module.__path__ ensuring that all entries are ordered corresponding to their sys.path order """ sys_path = [_normalize_cached(p) for p in sys.path] def safe_sys_path_index(entry): """ Workaround for #520 and #513. """ try: return sys_path.index(entry) except ValueError: return float('inf') def position_in_sys_path(path): """ Return the ordinal of the path based on its position in sys.path """ path_parts = path.split(os.sep) module_parts = package_name.count('.') + 1 parts = path_parts[:-module_parts] return safe_sys_path_index(_normalize_cached(os.sep.join(parts))) new_path = sorted(orig_path, key=position_in_sys_path) new_path = [_normalize_cached(p) for p in new_path] if isinstance(module.__path__, list): module.__path__[:] = new_path else: module.__path__ = new_path def declare_namespace(packageName): """Declare that package 'packageName' is a namespace package""" msg = ( f"Deprecated call to `pkg_resources.declare_namespace({packageName!r})`.\n" "Implementing implicit namespace packages (as specified in PEP 420) " "is preferred to `pkg_resources.declare_namespace`. " "See https://setuptools.pypa.io/en/latest/references/" "keywords.html#keyword-namespace-packages" ) warnings.warn(msg, DeprecationWarning, stacklevel=2) _imp.acquire_lock() try: if packageName in _namespace_packages: return path = sys.path parent, _, _ = packageName.rpartition('.') if parent: declare_namespace(parent) if parent not in _namespace_packages: __import__(parent) try: path = sys.modules[parent].__path__ except AttributeError as e: raise TypeError("Not a package:", parent) from e # Track what packages are namespaces, so when new path items are added, # they can be updated _namespace_packages.setdefault(parent or None, []).append(packageName) _namespace_packages.setdefault(packageName, []) for path_item in path: # Ensure all the parent's path items are reflected in the child, # if they apply _handle_ns(packageName, path_item) finally: _imp.release_lock() def fixup_namespace_packages(path_item, parent=None): """Ensure that previously-declared namespace packages include path_item""" _imp.acquire_lock() try: for package in _namespace_packages.get(parent, ()): subpath = _handle_ns(package, path_item) if subpath: fixup_namespace_packages(subpath, package) finally: _imp.release_lock() def file_ns_handler(importer, path_item, packageName, module): """Compute an ns-package subpath for a filesystem or zipfile importer""" subpath = os.path.join(path_item, packageName.split('.')[-1]) normalized = _normalize_cached(subpath) for item in module.__path__: if _normalize_cached(item) == normalized: break else: # Only return the path if it's not already there return subpath if hasattr(pkgutil, 'ImpImporter'): register_namespace_handler(pkgutil.ImpImporter, file_ns_handler) register_namespace_handler(zipimport.zipimporter, file_ns_handler) register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler) def null_ns_handler(importer, path_item, packageName, module): return None register_namespace_handler(object, null_ns_handler) def normalize_path(filename): """Normalize a file/dir name for comparison purposes""" return os.path.normcase(os.path.realpath(os.path.normpath(_cygwin_patch(filename)))) def _cygwin_patch(filename): # pragma: nocover """ Contrary to POSIX 2008, on Cygwin, getcwd (3) contains symlink components. Using os.path.abspath() works around this limitation. A fix in os.getcwd() would probably better, in Cygwin even more so, except that this seems to be by design... """ return os.path.abspath(filename) if sys.platform == 'cygwin' else filename def _normalize_cached(filename, _cache={}): try: return _cache[filename] except KeyError: _cache[filename] = result = normalize_path(filename) return result def _is_egg_path(path): """ Determine if given path appears to be an egg. """ return _is_zip_egg(path) or _is_unpacked_egg(path) def _is_zip_egg(path): return ( path.lower().endswith('.egg') and os.path.isfile(path) and zipfile.is_zipfile(path) ) def _is_unpacked_egg(path): """ Determine if given path appears to be an unpacked egg. """ return path.lower().endswith('.egg') and os.path.isfile( os.path.join(path, 'EGG-INFO', 'PKG-INFO') ) def _set_parent_ns(packageName): parts = packageName.split('.') name = parts.pop() if parts: parent = '.'.join(parts) setattr(sys.modules[parent], name, sys.modules[packageName]) MODULE = re.compile(r"\w+(\.\w+)*$").match EGG_NAME = re.compile( r""" (?P<name>[^-]+) ( -(?P<ver>[^-]+) ( -py(?P<pyver>[^-]+) ( -(?P<plat>.+) )? )? )? """, re.VERBOSE | re.IGNORECASE, ).match class EntryPoint: """Object representing an advertised importable object""" def __init__(self, name, module_name, attrs=(), extras=(), dist=None): if not MODULE(module_name): raise ValueError("Invalid module name", module_name) self.name = name self.module_name = module_name self.attrs = tuple(attrs) self.extras = tuple(extras) self.dist = dist def __str__(self): s = "%s = %s" % (self.name, self.module_name) if self.attrs: s += ':' + '.'.join(self.attrs) if self.extras: s += ' [%s]' % ','.join(self.extras) return s def __repr__(self): return "EntryPoint.parse(%r)" % str(self) def load(self, require=True, *args, **kwargs): """ Require packages for this EntryPoint, then resolve it. """ if not require or args or kwargs: warnings.warn( "Parameters to load are deprecated. Call .resolve and " ".require separately.", PkgResourcesDeprecationWarning, stacklevel=2, ) if require: self.require(*args, **kwargs) return self.resolve() def resolve(self): """ Resolve the entry point from its module and attrs. """ module = __import__(self.module_name, fromlist=['__name__'], level=0) try: return functools.reduce(getattr, self.attrs, module) except AttributeError as exc: raise ImportError(str(exc)) from exc def require(self, env=None, installer=None): if self.extras and not self.dist: raise UnknownExtra("Can't require() without a distribution", self) # Get the requirements for this entry point with all its extras and # then resolve them. We have to pass `extras` along when resolving so # that the working set knows what extras we want. Otherwise, for # dist-info distributions, the working set will assume that the # requirements for that extra are purely optional and skip over them. reqs = self.dist.requires(self.extras) items = working_set.resolve(reqs, env, installer, extras=self.extras) list(map(working_set.add, items)) pattern = re.compile( r'\s*' r'(?P<name>.+?)\s*' r'=\s*' r'(?P<module>[\w.]+)\s*' r'(:\s*(?P<attr>[\w.]+))?\s*' r'(?P<extras>\[.*\])?\s*$' ) @classmethod def parse(cls, src, dist=None): """Parse a single entry point from string `src` Entry point syntax follows the form:: name = some.module:some.attr [extra1, extra2] The entry name and module name are required, but the ``:attrs`` and ``[extras]`` parts are optional """ m = cls.pattern.match(src) if not m: msg = "EntryPoint must be in 'name=module:attrs [extras]' format" raise ValueError(msg, src) res = m.groupdict() extras = cls._parse_extras(res['extras']) attrs = res['attr'].split('.') if res['attr'] else () return cls(res['name'], res['module'], attrs, extras, dist) @classmethod def _parse_extras(cls, extras_spec): if not extras_spec: return () req = Requirement.parse('x' + extras_spec) if req.specs: raise ValueError() return req.extras @classmethod def parse_group(cls, group, lines, dist=None): """Parse an entry point group""" if not MODULE(group): raise ValueError("Invalid group name", group) this = {} for line in yield_lines(lines): ep = cls.parse(line, dist) if ep.name in this: raise ValueError("Duplicate entry point", group, ep.name) this[ep.name] = ep return this @classmethod def parse_map(cls, data, dist=None): """Parse a map of entry point groups""" if isinstance(data, dict): data = data.items() else: data = split_sections(data) maps = {} for group, lines in data: if group is None: if not lines: continue raise ValueError("Entry points must be listed in groups") group = group.strip() if group in maps: raise ValueError("Duplicate group name", group) maps[group] = cls.parse_group(group, lines, dist) return maps def _version_from_file(lines): """ Given an iterable of lines from a Metadata file, return the value of the Version field, if present, or None otherwise. """ def is_version_line(line): return line.lower().startswith('version:') version_lines = filter(is_version_line, lines) line = next(iter(version_lines), '') _, _, value = line.partition(':') return safe_version(value.strip()) or None class Distribution: """Wrap an actual or potential sys.path entry w/metadata""" PKG_INFO = 'PKG-INFO' def __init__( self, location=None, metadata=None, project_name=None, version=None, py_version=PY_MAJOR, platform=None, precedence=EGG_DIST, ): self.project_name = safe_name(project_name or 'Unknown') if version is not None: self._version = safe_version(version) self.py_version = py_version self.platform = platform self.location = location self.precedence = precedence self._provider = metadata or empty_provider @classmethod def from_location(cls, location, basename, metadata=None, **kw): project_name, version, py_version, platform = [None] * 4 basename, ext = os.path.splitext(basename) if ext.lower() in _distributionImpl: cls = _distributionImpl[ext.lower()] match = EGG_NAME(basename) if match: project_name, version, py_version, platform = match.group( 'name', 'ver', 'pyver', 'plat' ) return cls( location, metadata, project_name=project_name, version=version, py_version=py_version, platform=platform, **kw, )._reload_version() def _reload_version(self): return self @property def hashcmp(self): return ( self._forgiving_parsed_version, self.precedence, self.key, self.location, self.py_version or '', self.platform or '', ) def __hash__(self): return hash(self.hashcmp) def __lt__(self, other): return self.hashcmp < other.hashcmp def __le__(self, other): return self.hashcmp <= other.hashcmp def __gt__(self, other): return self.hashcmp > other.hashcmp def __ge__(self, other): return self.hashcmp >= other.hashcmp def __eq__(self, other): if not isinstance(other, self.__class__): # It's not a Distribution, so they are not equal return False return self.hashcmp == other.hashcmp def __ne__(self, other): return not self == other # These properties have to be lazy so that we don't have to load any # metadata until/unless it's actually needed. (i.e., some distributions # may not know their name or version without loading PKG-INFO) @property def key(self): try: return self._key except AttributeError: self._key = key = self.project_name.lower() return key @property def parsed_version(self): if not hasattr(self, "_parsed_version"): try: self._parsed_version = parse_version(self.version) except packaging.version.InvalidVersion as ex: info = f"(package: {self.project_name})" if hasattr(ex, "add_note"): ex.add_note(info) # PEP 678 raise raise packaging.version.InvalidVersion(f"{str(ex)} {info}") from None return self._parsed_version @property def _forgiving_parsed_version(self): try: return self.parsed_version except packaging.version.InvalidVersion as ex: self._parsed_version = parse_version(_forgiving_version(self.version)) notes = "\n".join(getattr(ex, "__notes__", [])) # PEP 678 msg = f"""!!\n\n ************************************************************************* {str(ex)}\n{notes} This is a long overdue deprecation. For the time being, `pkg_resources` will use `{self._parsed_version}` as a replacement to avoid breaking existing environments, but no future compatibility is guaranteed. If you maintain package {self.project_name} you should implement the relevant changes to adequate the project to PEP 440 immediately. ************************************************************************* \n\n!! """ warnings.warn(msg, DeprecationWarning) return self._parsed_version @property def version(self): try: return self._version except AttributeError as e: version = self._get_version() if version is None: path = self._get_metadata_path_for_display(self.PKG_INFO) msg = ("Missing 'Version:' header and/or {} file at path: {}").format( self.PKG_INFO, path ) raise ValueError(msg, self) from e return version @property def _dep_map(self): """ A map of extra to its list of (direct) requirements for this distribution, including the null extra. """ try: return self.__dep_map except AttributeError: self.__dep_map = self._filter_extras(self._build_dep_map()) return self.__dep_map @staticmethod def _filter_extras(dm): """ Given a mapping of extras to dependencies, strip off environment markers and filter out any dependencies not matching the markers. """ for extra in list(filter(None, dm)): new_extra = extra reqs = dm.pop(extra) new_extra, _, marker = extra.partition(':') fails_marker = marker and ( invalid_marker(marker) or not evaluate_marker(marker) ) if fails_marker: reqs = [] new_extra = safe_extra(new_extra) or None dm.setdefault(new_extra, []).extend(reqs) return dm def _build_dep_map(self): dm = {} for name in 'requires.txt', 'depends.txt': for extra, reqs in split_sections(self._get_metadata(name)): dm.setdefault(extra, []).extend(parse_requirements(reqs)) return dm def requires(self, extras=()): """List of Requirements needed for this distro if `extras` are used""" dm = self._dep_map deps = [] deps.extend(dm.get(None, ())) for ext in extras: try: deps.extend(dm[safe_extra(ext)]) except KeyError as e: raise UnknownExtra( "%s has no such extra feature %r" % (self, ext) ) from e return deps def _get_metadata_path_for_display(self, name): """ Return the path to the given metadata file, if available. """ try: # We need to access _get_metadata_path() on the provider object # directly rather than through this class's __getattr__() # since _get_metadata_path() is marked private. path = self._provider._get_metadata_path(name) # Handle exceptions e.g. in case the distribution's metadata # provider doesn't support _get_metadata_path(). except Exception: return '[could not detect]' return path def _get_metadata(self, name): if self.has_metadata(name): for line in self.get_metadata_lines(name): yield line def _get_version(self): lines = self._get_metadata(self.PKG_INFO) version = _version_from_file(lines) return version def activate(self, path=None, replace=False): """Ensure distribution is importable on `path` (default=sys.path)""" if path is None: path = sys.path self.insert_on(path, replace=replace) if path is sys.path: fixup_namespace_packages(self.location) for pkg in self._get_metadata('namespace_packages.txt'): if pkg in sys.modules: declare_namespace(pkg) def egg_name(self): """Return what this distribution's standard .egg filename should be""" filename = "%s-%s-py%s" % ( to_filename(self.project_name), to_filename(self.version), self.py_version or PY_MAJOR, ) if self.platform: filename += '-' + self.platform return filename def __repr__(self): if self.location: return "%s (%s)" % (self, self.location) else: return str(self) def __str__(self): try: version = getattr(self, 'version', None) except ValueError: version = None version = version or "[unknown version]" return "%s %s" % (self.project_name, version) def __getattr__(self, attr): """Delegate all unrecognized public attributes to .metadata provider""" if attr.startswith('_'): raise AttributeError(attr) return getattr(self._provider, attr) def __dir__(self): return list( set(super(Distribution, self).__dir__()) | set(attr for attr in self._provider.__dir__() if not attr.startswith('_')) ) @classmethod def from_filename(cls, filename, metadata=None, **kw): return cls.from_location( _normalize_cached(filename), os.path.basename(filename), metadata, **kw ) def as_requirement(self): """Return a ``Requirement`` that matches this distribution exactly""" if isinstance(self.parsed_version, packaging.version.Version): spec = "%s==%s" % (self.project_name, self.parsed_version) else: spec = "%s===%s" % (self.project_name, self.parsed_version) return Requirement.parse(spec) def load_entry_point(self, group, name): """Return the `name` entry point of `group` or raise ImportError""" ep = self.get_entry_info(group, name) if ep is None: raise ImportError("Entry point %r not found" % ((group, name),)) return ep.load() def get_entry_map(self, group=None): """Return the entry point map for `group`, or the full entry map""" try: ep_map = self._ep_map except AttributeError: ep_map = self._ep_map = EntryPoint.parse_map( self._get_metadata('entry_points.txt'), self ) if group is not None: return ep_map.get(group, {}) return ep_map def get_entry_info(self, group, name): """Return the EntryPoint object for `group`+`name`, or ``None``""" return self.get_entry_map(group).get(name) # FIXME: 'Distribution.insert_on' is too complex (13) def insert_on(self, path, loc=None, replace=False): # noqa: C901 """Ensure self.location is on path If replace=False (default): - If location is already in path anywhere, do nothing. - Else: - If it's an egg and its parent directory is on path, insert just ahead of the parent. - Else: add to the end of path. If replace=True: - If location is already on path anywhere (not eggs) or higher priority than its parent (eggs) do nothing. - Else: - If it's an egg and its parent directory is on path, insert just ahead of the parent, removing any lower-priority entries. - Else: add it to the front of path. """ loc = loc or self.location if not loc: return nloc = _normalize_cached(loc) bdir = os.path.dirname(nloc) npath = [(p and _normalize_cached(p) or p) for p in path] for p, item in enumerate(npath): if item == nloc: if replace: break else: # don't modify path (even removing duplicates) if # found and not replace return elif item == bdir and self.precedence == EGG_DIST: # if it's an .egg, give it precedence over its directory # UNLESS it's already been added to sys.path and replace=False if (not replace) and nloc in npath[p:]: return if path is sys.path: self.check_version_conflict() path.insert(p, loc) npath.insert(p, nloc) break else: if path is sys.path: self.check_version_conflict() if replace: path.insert(0, loc) else: path.append(loc) return # p is the spot where we found or inserted loc; now remove duplicates while True: try: np = npath.index(nloc, p + 1) except ValueError: break else: del npath[np], path[np] # ha! p = np return def check_version_conflict(self): if self.key == 'setuptools': # ignore the inevitable setuptools self-conflicts :( return nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt')) loc = normalize_path(self.location) for modname in self._get_metadata('top_level.txt'): if ( modname not in sys.modules or modname in nsp or modname in _namespace_packages ): continue if modname in ('pkg_resources', 'setuptools', 'site'): continue fn = getattr(sys.modules[modname], '__file__', None) if fn and ( normalize_path(fn).startswith(loc) or fn.startswith(self.location) ): continue issue_warning( "Module %s was already imported from %s, but %s is being added" " to sys.path" % (modname, fn, self.location), ) def has_version(self): try: self.version except ValueError: issue_warning("Unbuilt egg for " + repr(self)) return False except SystemError: # TODO: remove this except clause when python/cpython#103632 is fixed. return False return True def clone(self, **kw): """Copy this distribution, substituting in any changed keyword args""" names = 'project_name version py_version platform location precedence' for attr in names.split(): kw.setdefault(attr, getattr(self, attr, None)) kw.setdefault('metadata', self._provider) return self.__class__(**kw) @property def extras(self): return [dep for dep in self._dep_map if dep] class EggInfoDistribution(Distribution): def _reload_version(self): """ Packages installed by distutils (e.g. numpy or scipy), which uses an old safe_version, and so their version numbers can get mangled when converted to filenames (e.g., 1.11.0.dev0+2329eae to 1.11.0.dev0_2329eae). These distributions will not be parsed properly downstream by Distribution and safe_version, so take an extra step and try to get the version number from the metadata file itself instead of the filename. """ md_version = self._get_version() if md_version: self._version = md_version return self class DistInfoDistribution(Distribution): """ Wrap an actual or potential sys.path entry w/metadata, .dist-info style. """ PKG_INFO = 'METADATA' EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])") @property def _parsed_pkg_info(self): """Parse and cache metadata""" try: return self._pkg_info except AttributeError: metadata = self.get_metadata(self.PKG_INFO) self._pkg_info = email.parser.Parser().parsestr(metadata) return self._pkg_info @property def _dep_map(self): try: return self.__dep_map except AttributeError: self.__dep_map = self._compute_dependencies() return self.__dep_map def _compute_dependencies(self): """Recompute this distribution's dependencies.""" dm = self.__dep_map = {None: []} reqs = [] # Including any condition expressions for req in self._parsed_pkg_info.get_all('Requires-Dist') or []: reqs.extend(parse_requirements(req)) def reqs_for_extra(extra): for req in reqs: if not req.marker or req.marker.evaluate({'extra': extra}): yield req common = types.MappingProxyType(dict.fromkeys(reqs_for_extra(None))) dm[None].extend(common) for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []: s_extra = safe_extra(extra.strip()) dm[s_extra] = [r for r in reqs_for_extra(extra) if r not in common] return dm _distributionImpl = { '.egg': Distribution, '.egg-info': EggInfoDistribution, '.dist-info': DistInfoDistribution, } def issue_warning(*args, **kw): level = 1 g = globals() try: # find the first stack frame that is *not* code in # the pkg_resources module, to use for the warning while sys._getframe(level).f_globals is g: level += 1 except ValueError: pass warnings.warn(stacklevel=level + 1, *args, **kw) def parse_requirements(strs): """ Yield ``Requirement`` objects for each specification in `strs`. `strs` must be a string, or a (possibly-nested) iterable thereof. """ return map(Requirement, join_continuation(map(drop_comment, yield_lines(strs)))) class RequirementParseError(packaging.requirements.InvalidRequirement): "Compatibility wrapper for InvalidRequirement" class Requirement(packaging.requirements.Requirement): def __init__(self, requirement_string): """DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!""" super(Requirement, self).__init__(requirement_string) self.unsafe_name = self.name project_name = safe_name(self.name) self.project_name, self.key = project_name, project_name.lower() self.specs = [(spec.operator, spec.version) for spec in self.specifier] self.extras = tuple(map(safe_extra, self.extras)) self.hashCmp = ( self.key, self.url, self.specifier, frozenset(self.extras), str(self.marker) if self.marker else None, ) self.__hash = hash(self.hashCmp) def __eq__(self, other): return isinstance(other, Requirement) and self.hashCmp == other.hashCmp def __ne__(self, other): return not self == other def __contains__(self, item): if isinstance(item, Distribution): if item.key != self.key: return False item = item.version # Allow prereleases always in order to match the previous behavior of # this method. In the future this should be smarter and follow PEP 440 # more accurately. return self.specifier.contains(item, prereleases=True) def __hash__(self): return self.__hash def __repr__(self): return "Requirement.parse(%r)" % str(self) @staticmethod def parse(s): (req,) = parse_requirements(s) return req def _always_object(classes): """ Ensure object appears in the mro even for old-style classes. """ if object not in classes: return classes + (object,) return classes def _find_adapter(registry, ob): """Return an adapter factory for `ob` from `registry`""" types = _always_object(inspect.getmro(getattr(ob, '__class__', type(ob)))) for t in types: if t in registry: return registry[t] def ensure_directory(path): """Ensure that the parent directory of `path` exists""" dirname = os.path.dirname(path) os.makedirs(dirname, exist_ok=True) def _bypass_ensure_directory(path): """Sandbox-bypassing version of ensure_directory()""" if not WRITE_SUPPORT: raise IOError('"os.mkdir" not supported on this platform.') dirname, filename = split(path) if dirname and filename and not isdir(dirname): _bypass_ensure_directory(dirname) try: mkdir(dirname, 0o755) except FileExistsError: pass def split_sections(s): """Split a string or iterable thereof into (section, content) pairs Each ``section`` is a stripped version of the section header ("[section]") and each ``content`` is a list of stripped lines excluding blank lines and comment-only lines. If there are any such lines before the first section header, they're returned in a first ``section`` of ``None``. """ section = None content = [] for line in yield_lines(s): if line.startswith("["): if line.endswith("]"): if section or content: yield section, content section = line[1:-1].strip() content = [] else: raise ValueError("Invalid section heading", line) else: content.append(line) # wrap up last segment yield section, content def _mkstemp(*args, **kw): old_open = os.open try: # temporarily bypass sandboxing os.open = os_open return tempfile.mkstemp(*args, **kw) finally: # and then put it back os.open = old_open # Silence the PEP440Warning by default, so that end users don't get hit by it # randomly just because they use pkg_resources. We want to append the rule # because we want earlier uses of filterwarnings to take precedence over this # one. warnings.filterwarnings("ignore", category=PEP440Warning, append=True) # from jaraco.functools 1.3 def _call_aside(f, *args, **kwargs): f(*args, **kwargs) return f @_call_aside def _initialize(g=globals()): "Set up global resource manager (deliberately not state-saved)" manager = ResourceManager() g['_manager'] = manager g.update( (name, getattr(manager, name)) for name in dir(manager) if not name.startswith('_') ) class PkgResourcesDeprecationWarning(Warning): """ Base class for warning about deprecations in ``pkg_resources`` This class is not derived from ``DeprecationWarning``, and as such is visible by default. """ @_call_aside def _initialize_master_working_set(): """ Prepare the master working set and make the ``require()`` API available. This function has explicit effects on the global state of pkg_resources. It is intended to be invoked once at the initialization of this module. Invocation by other packages is unsupported and done at their own risk. """ working_set = WorkingSet._build_master() _declare_state('object', working_set=working_set) require = working_set.require iter_entry_points = working_set.iter_entry_points add_activation_listener = working_set.subscribe run_script = working_set.run_script # backward compatibility run_main = run_script # Activate all distributions already on sys.path with replace=False and # ensure that all distributions added to the working set in the future # (e.g. by calling ``require()``) will get activated as well, # with higher priority (replace=True). tuple(dist.activate(replace=False) for dist in working_set) add_activation_listener( lambda dist: dist.activate(replace=True), existing=False, ) working_set.entries = [] # match order list(map(working_set.add_entry, sys.path)) globals().update(locals())
109,425
Python
31.547888
88
0.601462
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pkg_resources/_vendor/zipp.py
import io import posixpath import zipfile import itertools import contextlib import sys import pathlib if sys.version_info < (3, 7): from collections import OrderedDict else: OrderedDict = dict __all__ = ['Path'] def _parents(path): """ Given a path with elements separated by posixpath.sep, generate all parents of that path. >>> list(_parents('b/d')) ['b'] >>> list(_parents('/b/d/')) ['/b'] >>> list(_parents('b/d/f/')) ['b/d', 'b'] >>> list(_parents('b')) [] >>> list(_parents('')) [] """ return itertools.islice(_ancestry(path), 1, None) def _ancestry(path): """ Given a path with elements separated by posixpath.sep, generate all elements of that path >>> list(_ancestry('b/d')) ['b/d', 'b'] >>> list(_ancestry('/b/d/')) ['/b/d', '/b'] >>> list(_ancestry('b/d/f/')) ['b/d/f', 'b/d', 'b'] >>> list(_ancestry('b')) ['b'] >>> list(_ancestry('')) [] """ path = path.rstrip(posixpath.sep) while path and path != posixpath.sep: yield path path, tail = posixpath.split(path) _dedupe = OrderedDict.fromkeys """Deduplicate an iterable in original order""" def _difference(minuend, subtrahend): """ Return items in minuend not in subtrahend, retaining order with O(1) lookup. """ return itertools.filterfalse(set(subtrahend).__contains__, minuend) class CompleteDirs(zipfile.ZipFile): """ A ZipFile subclass that ensures that implied directories are always included in the namelist. """ @staticmethod def _implied_dirs(names): parents = itertools.chain.from_iterable(map(_parents, names)) as_dirs = (p + posixpath.sep for p in parents) return _dedupe(_difference(as_dirs, names)) def namelist(self): names = super(CompleteDirs, self).namelist() return names + list(self._implied_dirs(names)) def _name_set(self): return set(self.namelist()) def resolve_dir(self, name): """ If the name represents a directory, return that name as a directory (with the trailing slash). """ names = self._name_set() dirname = name + '/' dir_match = name not in names and dirname in names return dirname if dir_match else name @classmethod def make(cls, source): """ Given a source (filename or zipfile), return an appropriate CompleteDirs subclass. """ if isinstance(source, CompleteDirs): return source if not isinstance(source, zipfile.ZipFile): return cls(_pathlib_compat(source)) # Only allow for FastLookup when supplied zipfile is read-only if 'r' not in source.mode: cls = CompleteDirs source.__class__ = cls return source class FastLookup(CompleteDirs): """ ZipFile subclass to ensure implicit dirs exist and are resolved rapidly. """ def namelist(self): with contextlib.suppress(AttributeError): return self.__names self.__names = super(FastLookup, self).namelist() return self.__names def _name_set(self): with contextlib.suppress(AttributeError): return self.__lookup self.__lookup = super(FastLookup, self)._name_set() return self.__lookup def _pathlib_compat(path): """ For path-like objects, convert to a filename for compatibility on Python 3.6.1 and earlier. """ try: return path.__fspath__() except AttributeError: return str(path) class Path: """ A pathlib-compatible interface for zip files. Consider a zip file with this structure:: . ├── a.txt └── b ├── c.txt └── d └── e.txt >>> data = io.BytesIO() >>> zf = zipfile.ZipFile(data, 'w') >>> zf.writestr('a.txt', 'content of a') >>> zf.writestr('b/c.txt', 'content of c') >>> zf.writestr('b/d/e.txt', 'content of e') >>> zf.filename = 'mem/abcde.zip' Path accepts the zipfile object itself or a filename >>> root = Path(zf) From there, several path operations are available. Directory iteration (including the zip file itself): >>> a, b = root.iterdir() >>> a Path('mem/abcde.zip', 'a.txt') >>> b Path('mem/abcde.zip', 'b/') name property: >>> b.name 'b' join with divide operator: >>> c = b / 'c.txt' >>> c Path('mem/abcde.zip', 'b/c.txt') >>> c.name 'c.txt' Read text: >>> c.read_text() 'content of c' existence: >>> c.exists() True >>> (b / 'missing.txt').exists() False Coercion to string: >>> import os >>> str(c).replace(os.sep, posixpath.sep) 'mem/abcde.zip/b/c.txt' At the root, ``name``, ``filename``, and ``parent`` resolve to the zipfile. Note these attributes are not valid and will raise a ``ValueError`` if the zipfile has no filename. >>> root.name 'abcde.zip' >>> str(root.filename).replace(os.sep, posixpath.sep) 'mem/abcde.zip' >>> str(root.parent) 'mem' """ __repr = "{self.__class__.__name__}({self.root.filename!r}, {self.at!r})" def __init__(self, root, at=""): """ Construct a Path from a ZipFile or filename. Note: When the source is an existing ZipFile object, its type (__class__) will be mutated to a specialized type. If the caller wishes to retain the original type, the caller should either create a separate ZipFile object or pass a filename. """ self.root = FastLookup.make(root) self.at = at def open(self, mode='r', *args, pwd=None, **kwargs): """ Open this entry as text or binary following the semantics of ``pathlib.Path.open()`` by passing arguments through to io.TextIOWrapper(). """ if self.is_dir(): raise IsADirectoryError(self) zip_mode = mode[0] if not self.exists() and zip_mode == 'r': raise FileNotFoundError(self) stream = self.root.open(self.at, zip_mode, pwd=pwd) if 'b' in mode: if args or kwargs: raise ValueError("encoding args invalid for binary operation") return stream return io.TextIOWrapper(stream, *args, **kwargs) @property def name(self): return pathlib.Path(self.at).name or self.filename.name @property def suffix(self): return pathlib.Path(self.at).suffix or self.filename.suffix @property def suffixes(self): return pathlib.Path(self.at).suffixes or self.filename.suffixes @property def stem(self): return pathlib.Path(self.at).stem or self.filename.stem @property def filename(self): return pathlib.Path(self.root.filename).joinpath(self.at) def read_text(self, *args, **kwargs): with self.open('r', *args, **kwargs) as strm: return strm.read() def read_bytes(self): with self.open('rb') as strm: return strm.read() def _is_child(self, path): return posixpath.dirname(path.at.rstrip("/")) == self.at.rstrip("/") def _next(self, at): return self.__class__(self.root, at) def is_dir(self): return not self.at or self.at.endswith("/") def is_file(self): return self.exists() and not self.is_dir() def exists(self): return self.at in self.root._name_set() def iterdir(self): if not self.is_dir(): raise ValueError("Can't listdir a file") subs = map(self._next, self.root.namelist()) return filter(self._is_child, subs) def __str__(self): return posixpath.join(self.root.filename, self.at) def __repr__(self): return self.__repr.format(self=self) def joinpath(self, *other): next = posixpath.join(self.at, *map(_pathlib_compat, other)) return self._next(self.root.resolve_dir(next)) __truediv__ = joinpath @property def parent(self): if not self.at: return self.filename.parent parent_at = posixpath.dirname(self.at.rstrip('/')) if parent_at: parent_at += '/' return self._next(parent_at)
8,395
Python
24.442424
78
0.579869
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pkg_resources/_vendor/typing_extensions.py
import abc import collections import collections.abc import functools import operator import sys import types as _types import typing __all__ = [ # Super-special typing primitives. 'Any', 'ClassVar', 'Concatenate', 'Final', 'LiteralString', 'ParamSpec', 'ParamSpecArgs', 'ParamSpecKwargs', 'Self', 'Type', 'TypeVar', 'TypeVarTuple', 'Unpack', # ABCs (from collections.abc). 'Awaitable', 'AsyncIterator', 'AsyncIterable', 'Coroutine', 'AsyncGenerator', 'AsyncContextManager', 'ChainMap', # Concrete collection types. 'ContextManager', 'Counter', 'Deque', 'DefaultDict', 'NamedTuple', 'OrderedDict', 'TypedDict', # Structural checks, a.k.a. protocols. 'SupportsIndex', # One-off things. 'Annotated', 'assert_never', 'assert_type', 'clear_overloads', 'dataclass_transform', 'get_overloads', 'final', 'get_args', 'get_origin', 'get_type_hints', 'IntVar', 'is_typeddict', 'Literal', 'NewType', 'overload', 'override', 'Protocol', 'reveal_type', 'runtime', 'runtime_checkable', 'Text', 'TypeAlias', 'TypeGuard', 'TYPE_CHECKING', 'Never', 'NoReturn', 'Required', 'NotRequired', ] # for backward compatibility PEP_560 = True GenericMeta = type # The functions below are modified copies of typing internal helpers. # They are needed by _ProtocolMeta and they provide support for PEP 646. _marker = object() def _check_generic(cls, parameters, elen=_marker): """Check correct count for parameters of a generic cls (internal helper). This gives a nice error message in case of count mismatch. """ if not elen: raise TypeError(f"{cls} is not a generic class") if elen is _marker: if not hasattr(cls, "__parameters__") or not cls.__parameters__: raise TypeError(f"{cls} is not a generic class") elen = len(cls.__parameters__) alen = len(parameters) if alen != elen: if hasattr(cls, "__parameters__"): parameters = [p for p in cls.__parameters__ if not _is_unpack(p)] num_tv_tuples = sum(isinstance(p, TypeVarTuple) for p in parameters) if (num_tv_tuples > 0) and (alen >= elen - num_tv_tuples): return raise TypeError(f"Too {'many' if alen > elen else 'few'} parameters for {cls};" f" actual {alen}, expected {elen}") if sys.version_info >= (3, 10): def _should_collect_from_parameters(t): return isinstance( t, (typing._GenericAlias, _types.GenericAlias, _types.UnionType) ) elif sys.version_info >= (3, 9): def _should_collect_from_parameters(t): return isinstance(t, (typing._GenericAlias, _types.GenericAlias)) else: def _should_collect_from_parameters(t): return isinstance(t, typing._GenericAlias) and not t._special def _collect_type_vars(types, typevar_types=None): """Collect all type variable contained in types in order of first appearance (lexicographic order). For example:: _collect_type_vars((T, List[S, T])) == (T, S) """ if typevar_types is None: typevar_types = typing.TypeVar tvars = [] for t in types: if ( isinstance(t, typevar_types) and t not in tvars and not _is_unpack(t) ): tvars.append(t) if _should_collect_from_parameters(t): tvars.extend([t for t in t.__parameters__ if t not in tvars]) return tuple(tvars) NoReturn = typing.NoReturn # Some unconstrained type variables. These are used by the container types. # (These are not for export.) T = typing.TypeVar('T') # Any type. KT = typing.TypeVar('KT') # Key type. VT = typing.TypeVar('VT') # Value type. T_co = typing.TypeVar('T_co', covariant=True) # Any type covariant containers. T_contra = typing.TypeVar('T_contra', contravariant=True) # Ditto contravariant. if sys.version_info >= (3, 11): from typing import Any else: class _AnyMeta(type): def __instancecheck__(self, obj): if self is Any: raise TypeError("typing_extensions.Any cannot be used with isinstance()") return super().__instancecheck__(obj) def __repr__(self): if self is Any: return "typing_extensions.Any" return super().__repr__() class Any(metaclass=_AnyMeta): """Special type indicating an unconstrained type. - Any is compatible with every type. - Any assumed to have all methods. - All values assumed to be instances of Any. Note that all the above statements are true from the point of view of static type checkers. At runtime, Any should not be used with instance checks. """ def __new__(cls, *args, **kwargs): if cls is Any: raise TypeError("Any cannot be instantiated") return super().__new__(cls, *args, **kwargs) ClassVar = typing.ClassVar # On older versions of typing there is an internal class named "Final". # 3.8+ if hasattr(typing, 'Final') and sys.version_info[:2] >= (3, 7): Final = typing.Final # 3.7 else: class _FinalForm(typing._SpecialForm, _root=True): def __repr__(self): return 'typing_extensions.' + self._name def __getitem__(self, parameters): item = typing._type_check(parameters, f'{self._name} accepts only a single type.') return typing._GenericAlias(self, (item,)) Final = _FinalForm('Final', doc="""A special typing construct to indicate that a name cannot be re-assigned or overridden in a subclass. For example: MAX_SIZE: Final = 9000 MAX_SIZE += 1 # Error reported by type checker class Connection: TIMEOUT: Final[int] = 10 class FastConnector(Connection): TIMEOUT = 1 # Error reported by type checker There is no runtime checking of these properties.""") if sys.version_info >= (3, 11): final = typing.final else: # @final exists in 3.8+, but we backport it for all versions # before 3.11 to keep support for the __final__ attribute. # See https://bugs.python.org/issue46342 def final(f): """This decorator can be used to indicate to type checkers that the decorated method cannot be overridden, and decorated class cannot be subclassed. For example: class Base: @final def done(self) -> None: ... class Sub(Base): def done(self) -> None: # Error reported by type checker ... @final class Leaf: ... class Other(Leaf): # Error reported by type checker ... There is no runtime checking of these properties. The decorator sets the ``__final__`` attribute to ``True`` on the decorated object to allow runtime introspection. """ try: f.__final__ = True except (AttributeError, TypeError): # Skip the attribute silently if it is not writable. # AttributeError happens if the object has __slots__ or a # read-only property, TypeError if it's a builtin class. pass return f def IntVar(name): return typing.TypeVar(name) # 3.8+: if hasattr(typing, 'Literal'): Literal = typing.Literal # 3.7: else: class _LiteralForm(typing._SpecialForm, _root=True): def __repr__(self): return 'typing_extensions.' + self._name def __getitem__(self, parameters): return typing._GenericAlias(self, parameters) Literal = _LiteralForm('Literal', doc="""A type that can be used to indicate to type checkers that the corresponding value has a value literally equivalent to the provided parameter. For example: var: Literal[4] = 4 The type checker understands that 'var' is literally equal to the value 4 and no other value. Literal[...] cannot be subclassed. There is no runtime checking verifying that the parameter is actually a value instead of a type.""") _overload_dummy = typing._overload_dummy # noqa if hasattr(typing, "get_overloads"): # 3.11+ overload = typing.overload get_overloads = typing.get_overloads clear_overloads = typing.clear_overloads else: # {module: {qualname: {firstlineno: func}}} _overload_registry = collections.defaultdict( functools.partial(collections.defaultdict, dict) ) def overload(func): """Decorator for overloaded functions/methods. In a stub file, place two or more stub definitions for the same function in a row, each decorated with @overload. For example: @overload def utf8(value: None) -> None: ... @overload def utf8(value: bytes) -> bytes: ... @overload def utf8(value: str) -> bytes: ... In a non-stub file (i.e. a regular .py file), do the same but follow it with an implementation. The implementation should *not* be decorated with @overload. For example: @overload def utf8(value: None) -> None: ... @overload def utf8(value: bytes) -> bytes: ... @overload def utf8(value: str) -> bytes: ... def utf8(value): # implementation goes here The overloads for a function can be retrieved at runtime using the get_overloads() function. """ # classmethod and staticmethod f = getattr(func, "__func__", func) try: _overload_registry[f.__module__][f.__qualname__][ f.__code__.co_firstlineno ] = func except AttributeError: # Not a normal function; ignore. pass return _overload_dummy def get_overloads(func): """Return all defined overloads for *func* as a sequence.""" # classmethod and staticmethod f = getattr(func, "__func__", func) if f.__module__ not in _overload_registry: return [] mod_dict = _overload_registry[f.__module__] if f.__qualname__ not in mod_dict: return [] return list(mod_dict[f.__qualname__].values()) def clear_overloads(): """Clear all overloads in the registry.""" _overload_registry.clear() # This is not a real generic class. Don't use outside annotations. Type = typing.Type # Various ABCs mimicking those in collections.abc. # A few are simply re-exported for completeness. Awaitable = typing.Awaitable Coroutine = typing.Coroutine AsyncIterable = typing.AsyncIterable AsyncIterator = typing.AsyncIterator Deque = typing.Deque ContextManager = typing.ContextManager AsyncContextManager = typing.AsyncContextManager DefaultDict = typing.DefaultDict # 3.7.2+ if hasattr(typing, 'OrderedDict'): OrderedDict = typing.OrderedDict # 3.7.0-3.7.2 else: OrderedDict = typing._alias(collections.OrderedDict, (KT, VT)) Counter = typing.Counter ChainMap = typing.ChainMap AsyncGenerator = typing.AsyncGenerator NewType = typing.NewType Text = typing.Text TYPE_CHECKING = typing.TYPE_CHECKING _PROTO_WHITELIST = ['Callable', 'Awaitable', 'Iterable', 'Iterator', 'AsyncIterable', 'AsyncIterator', 'Hashable', 'Sized', 'Container', 'Collection', 'Reversible', 'ContextManager', 'AsyncContextManager'] def _get_protocol_attrs(cls): attrs = set() for base in cls.__mro__[:-1]: # without object if base.__name__ in ('Protocol', 'Generic'): continue annotations = getattr(base, '__annotations__', {}) for attr in list(base.__dict__.keys()) + list(annotations.keys()): if (not attr.startswith('_abc_') and attr not in ( '__abstractmethods__', '__annotations__', '__weakref__', '_is_protocol', '_is_runtime_protocol', '__dict__', '__args__', '__slots__', '__next_in_mro__', '__parameters__', '__origin__', '__orig_bases__', '__extra__', '__tree_hash__', '__doc__', '__subclasshook__', '__init__', '__new__', '__module__', '_MutableMapping__marker', '_gorg')): attrs.add(attr) return attrs def _is_callable_members_only(cls): return all(callable(getattr(cls, attr, None)) for attr in _get_protocol_attrs(cls)) def _maybe_adjust_parameters(cls): """Helper function used in Protocol.__init_subclass__ and _TypedDictMeta.__new__. The contents of this function are very similar to logic found in typing.Generic.__init_subclass__ on the CPython main branch. """ tvars = [] if '__orig_bases__' in cls.__dict__: tvars = typing._collect_type_vars(cls.__orig_bases__) # Look for Generic[T1, ..., Tn] or Protocol[T1, ..., Tn]. # If found, tvars must be a subset of it. # If not found, tvars is it. # Also check for and reject plain Generic, # and reject multiple Generic[...] and/or Protocol[...]. gvars = None for base in cls.__orig_bases__: if (isinstance(base, typing._GenericAlias) and base.__origin__ in (typing.Generic, Protocol)): # for error messages the_base = base.__origin__.__name__ if gvars is not None: raise TypeError( "Cannot inherit from Generic[...]" " and/or Protocol[...] multiple types.") gvars = base.__parameters__ if gvars is None: gvars = tvars else: tvarset = set(tvars) gvarset = set(gvars) if not tvarset <= gvarset: s_vars = ', '.join(str(t) for t in tvars if t not in gvarset) s_args = ', '.join(str(g) for g in gvars) raise TypeError(f"Some type variables ({s_vars}) are" f" not listed in {the_base}[{s_args}]") tvars = gvars cls.__parameters__ = tuple(tvars) # 3.8+ if hasattr(typing, 'Protocol'): Protocol = typing.Protocol # 3.7 else: def _no_init(self, *args, **kwargs): if type(self)._is_protocol: raise TypeError('Protocols cannot be instantiated') class _ProtocolMeta(abc.ABCMeta): # noqa: B024 # This metaclass is a bit unfortunate and exists only because of the lack # of __instancehook__. def __instancecheck__(cls, instance): # We need this method for situations where attributes are # assigned in __init__. if ((not getattr(cls, '_is_protocol', False) or _is_callable_members_only(cls)) and issubclass(instance.__class__, cls)): return True if cls._is_protocol: if all(hasattr(instance, attr) and (not callable(getattr(cls, attr, None)) or getattr(instance, attr) is not None) for attr in _get_protocol_attrs(cls)): return True return super().__instancecheck__(instance) class Protocol(metaclass=_ProtocolMeta): # There is quite a lot of overlapping code with typing.Generic. # Unfortunately it is hard to avoid this while these live in two different # modules. The duplicated code will be removed when Protocol is moved to typing. """Base class for protocol classes. Protocol classes are defined as:: class Proto(Protocol): def meth(self) -> int: ... Such classes are primarily used with static type checkers that recognize structural subtyping (static duck-typing), for example:: class C: def meth(self) -> int: return 0 def func(x: Proto) -> int: return x.meth() func(C()) # Passes static type check See PEP 544 for details. Protocol classes decorated with @typing_extensions.runtime act as simple-minded runtime protocol that checks only the presence of given attributes, ignoring their type signatures. Protocol classes can be generic, they are defined as:: class GenProto(Protocol[T]): def meth(self) -> T: ... """ __slots__ = () _is_protocol = True def __new__(cls, *args, **kwds): if cls is Protocol: raise TypeError("Type Protocol cannot be instantiated; " "it can only be used as a base class") return super().__new__(cls) @typing._tp_cache def __class_getitem__(cls, params): if not isinstance(params, tuple): params = (params,) if not params and cls is not typing.Tuple: raise TypeError( f"Parameter list to {cls.__qualname__}[...] cannot be empty") msg = "Parameters to generic types must be types." params = tuple(typing._type_check(p, msg) for p in params) # noqa if cls is Protocol: # Generic can only be subscripted with unique type variables. if not all(isinstance(p, typing.TypeVar) for p in params): i = 0 while isinstance(params[i], typing.TypeVar): i += 1 raise TypeError( "Parameters to Protocol[...] must all be type variables." f" Parameter {i + 1} is {params[i]}") if len(set(params)) != len(params): raise TypeError( "Parameters to Protocol[...] must all be unique") else: # Subscripting a regular Generic subclass. _check_generic(cls, params, len(cls.__parameters__)) return typing._GenericAlias(cls, params) def __init_subclass__(cls, *args, **kwargs): if '__orig_bases__' in cls.__dict__: error = typing.Generic in cls.__orig_bases__ else: error = typing.Generic in cls.__bases__ if error: raise TypeError("Cannot inherit from plain Generic") _maybe_adjust_parameters(cls) # Determine if this is a protocol or a concrete subclass. if not cls.__dict__.get('_is_protocol', None): cls._is_protocol = any(b is Protocol for b in cls.__bases__) # Set (or override) the protocol subclass hook. def _proto_hook(other): if not cls.__dict__.get('_is_protocol', None): return NotImplemented if not getattr(cls, '_is_runtime_protocol', False): if sys._getframe(2).f_globals['__name__'] in ['abc', 'functools']: return NotImplemented raise TypeError("Instance and class checks can only be used with" " @runtime protocols") if not _is_callable_members_only(cls): if sys._getframe(2).f_globals['__name__'] in ['abc', 'functools']: return NotImplemented raise TypeError("Protocols with non-method members" " don't support issubclass()") if not isinstance(other, type): # Same error as for issubclass(1, int) raise TypeError('issubclass() arg 1 must be a class') for attr in _get_protocol_attrs(cls): for base in other.__mro__: if attr in base.__dict__: if base.__dict__[attr] is None: return NotImplemented break annotations = getattr(base, '__annotations__', {}) if (isinstance(annotations, typing.Mapping) and attr in annotations and isinstance(other, _ProtocolMeta) and other._is_protocol): break else: return NotImplemented return True if '__subclasshook__' not in cls.__dict__: cls.__subclasshook__ = _proto_hook # We have nothing more to do for non-protocols. if not cls._is_protocol: return # Check consistency of bases. for base in cls.__bases__: if not (base in (object, typing.Generic) or base.__module__ == 'collections.abc' and base.__name__ in _PROTO_WHITELIST or isinstance(base, _ProtocolMeta) and base._is_protocol): raise TypeError('Protocols can only inherit from other' f' protocols, got {repr(base)}') cls.__init__ = _no_init # 3.8+ if hasattr(typing, 'runtime_checkable'): runtime_checkable = typing.runtime_checkable # 3.7 else: def runtime_checkable(cls): """Mark a protocol class as a runtime protocol, so that it can be used with isinstance() and issubclass(). Raise TypeError if applied to a non-protocol class. This allows a simple-minded structural check very similar to the one-offs in collections.abc such as Hashable. """ if not isinstance(cls, _ProtocolMeta) or not cls._is_protocol: raise TypeError('@runtime_checkable can be only applied to protocol classes,' f' got {cls!r}') cls._is_runtime_protocol = True return cls # Exists for backwards compatibility. runtime = runtime_checkable # 3.8+ if hasattr(typing, 'SupportsIndex'): SupportsIndex = typing.SupportsIndex # 3.7 else: @runtime_checkable class SupportsIndex(Protocol): __slots__ = () @abc.abstractmethod def __index__(self) -> int: pass if hasattr(typing, "Required"): # The standard library TypedDict in Python 3.8 does not store runtime information # about which (if any) keys are optional. See https://bugs.python.org/issue38834 # The standard library TypedDict in Python 3.9.0/1 does not honour the "total" # keyword with old-style TypedDict(). See https://bugs.python.org/issue42059 # The standard library TypedDict below Python 3.11 does not store runtime # information about optional and required keys when using Required or NotRequired. # Generic TypedDicts are also impossible using typing.TypedDict on Python <3.11. TypedDict = typing.TypedDict _TypedDictMeta = typing._TypedDictMeta is_typeddict = typing.is_typeddict else: def _check_fails(cls, other): try: if sys._getframe(1).f_globals['__name__'] not in ['abc', 'functools', 'typing']: # Typed dicts are only for static structural subtyping. raise TypeError('TypedDict does not support instance and class checks') except (AttributeError, ValueError): pass return False def _dict_new(*args, **kwargs): if not args: raise TypeError('TypedDict.__new__(): not enough arguments') _, args = args[0], args[1:] # allow the "cls" keyword be passed return dict(*args, **kwargs) _dict_new.__text_signature__ = '($cls, _typename, _fields=None, /, **kwargs)' def _typeddict_new(*args, total=True, **kwargs): if not args: raise TypeError('TypedDict.__new__(): not enough arguments') _, args = args[0], args[1:] # allow the "cls" keyword be passed if args: typename, args = args[0], args[1:] # allow the "_typename" keyword be passed elif '_typename' in kwargs: typename = kwargs.pop('_typename') import warnings warnings.warn("Passing '_typename' as keyword argument is deprecated", DeprecationWarning, stacklevel=2) else: raise TypeError("TypedDict.__new__() missing 1 required positional " "argument: '_typename'") if args: try: fields, = args # allow the "_fields" keyword be passed except ValueError: raise TypeError('TypedDict.__new__() takes from 2 to 3 ' f'positional arguments but {len(args) + 2} ' 'were given') elif '_fields' in kwargs and len(kwargs) == 1: fields = kwargs.pop('_fields') import warnings warnings.warn("Passing '_fields' as keyword argument is deprecated", DeprecationWarning, stacklevel=2) else: fields = None if fields is None: fields = kwargs elif kwargs: raise TypeError("TypedDict takes either a dict or keyword arguments," " but not both") ns = {'__annotations__': dict(fields)} try: # Setting correct module is necessary to make typed dict classes pickleable. ns['__module__'] = sys._getframe(1).f_globals.get('__name__', '__main__') except (AttributeError, ValueError): pass return _TypedDictMeta(typename, (), ns, total=total) _typeddict_new.__text_signature__ = ('($cls, _typename, _fields=None,' ' /, *, total=True, **kwargs)') class _TypedDictMeta(type): def __init__(cls, name, bases, ns, total=True): super().__init__(name, bases, ns) def __new__(cls, name, bases, ns, total=True): # Create new typed dict class object. # This method is called directly when TypedDict is subclassed, # or via _typeddict_new when TypedDict is instantiated. This way # TypedDict supports all three syntaxes described in its docstring. # Subclasses and instances of TypedDict return actual dictionaries # via _dict_new. ns['__new__'] = _typeddict_new if name == 'TypedDict' else _dict_new # Don't insert typing.Generic into __bases__ here, # or Generic.__init_subclass__ will raise TypeError # in the super().__new__() call. # Instead, monkey-patch __bases__ onto the class after it's been created. tp_dict = super().__new__(cls, name, (dict,), ns) if any(issubclass(base, typing.Generic) for base in bases): tp_dict.__bases__ = (typing.Generic, dict) _maybe_adjust_parameters(tp_dict) annotations = {} own_annotations = ns.get('__annotations__', {}) msg = "TypedDict('Name', {f0: t0, f1: t1, ...}); each t must be a type" own_annotations = { n: typing._type_check(tp, msg) for n, tp in own_annotations.items() } required_keys = set() optional_keys = set() for base in bases: annotations.update(base.__dict__.get('__annotations__', {})) required_keys.update(base.__dict__.get('__required_keys__', ())) optional_keys.update(base.__dict__.get('__optional_keys__', ())) annotations.update(own_annotations) for annotation_key, annotation_type in own_annotations.items(): annotation_origin = get_origin(annotation_type) if annotation_origin is Annotated: annotation_args = get_args(annotation_type) if annotation_args: annotation_type = annotation_args[0] annotation_origin = get_origin(annotation_type) if annotation_origin is Required: required_keys.add(annotation_key) elif annotation_origin is NotRequired: optional_keys.add(annotation_key) elif total: required_keys.add(annotation_key) else: optional_keys.add(annotation_key) tp_dict.__annotations__ = annotations tp_dict.__required_keys__ = frozenset(required_keys) tp_dict.__optional_keys__ = frozenset(optional_keys) if not hasattr(tp_dict, '__total__'): tp_dict.__total__ = total return tp_dict __instancecheck__ = __subclasscheck__ = _check_fails TypedDict = _TypedDictMeta('TypedDict', (dict,), {}) TypedDict.__module__ = __name__ TypedDict.__doc__ = \ """A simple typed name space. At runtime it is equivalent to a plain dict. TypedDict creates a dictionary type that expects all of its instances to have a certain set of keys, with each key associated with a value of a consistent type. This expectation is not checked at runtime but is only enforced by type checkers. Usage:: class Point2D(TypedDict): x: int y: int label: str a: Point2D = {'x': 1, 'y': 2, 'label': 'good'} # OK b: Point2D = {'z': 3, 'label': 'bad'} # Fails type check assert Point2D(x=1, y=2, label='first') == dict(x=1, y=2, label='first') The type info can be accessed via the Point2D.__annotations__ dict, and the Point2D.__required_keys__ and Point2D.__optional_keys__ frozensets. TypedDict supports two additional equivalent forms:: Point2D = TypedDict('Point2D', x=int, y=int, label=str) Point2D = TypedDict('Point2D', {'x': int, 'y': int, 'label': str}) The class syntax is only supported in Python 3.6+, while two other syntax forms work for Python 2.7 and 3.2+ """ if hasattr(typing, "_TypedDictMeta"): _TYPEDDICT_TYPES = (typing._TypedDictMeta, _TypedDictMeta) else: _TYPEDDICT_TYPES = (_TypedDictMeta,) def is_typeddict(tp): """Check if an annotation is a TypedDict class For example:: class Film(TypedDict): title: str year: int is_typeddict(Film) # => True is_typeddict(Union[list, str]) # => False """ return isinstance(tp, tuple(_TYPEDDICT_TYPES)) if hasattr(typing, "assert_type"): assert_type = typing.assert_type else: def assert_type(__val, __typ): """Assert (to the type checker) that the value is of the given type. When the type checker encounters a call to assert_type(), it emits an error if the value is not of the specified type:: def greet(name: str) -> None: assert_type(name, str) # ok assert_type(name, int) # type checker error At runtime this returns the first argument unchanged and otherwise does nothing. """ return __val if hasattr(typing, "Required"): get_type_hints = typing.get_type_hints else: import functools import types # replaces _strip_annotations() def _strip_extras(t): """Strips Annotated, Required and NotRequired from a given type.""" if isinstance(t, _AnnotatedAlias): return _strip_extras(t.__origin__) if hasattr(t, "__origin__") and t.__origin__ in (Required, NotRequired): return _strip_extras(t.__args__[0]) if isinstance(t, typing._GenericAlias): stripped_args = tuple(_strip_extras(a) for a in t.__args__) if stripped_args == t.__args__: return t return t.copy_with(stripped_args) if hasattr(types, "GenericAlias") and isinstance(t, types.GenericAlias): stripped_args = tuple(_strip_extras(a) for a in t.__args__) if stripped_args == t.__args__: return t return types.GenericAlias(t.__origin__, stripped_args) if hasattr(types, "UnionType") and isinstance(t, types.UnionType): stripped_args = tuple(_strip_extras(a) for a in t.__args__) if stripped_args == t.__args__: return t return functools.reduce(operator.or_, stripped_args) return t def get_type_hints(obj, globalns=None, localns=None, include_extras=False): """Return type hints for an object. This is often the same as obj.__annotations__, but it handles forward references encoded as string literals, adds Optional[t] if a default value equal to None is set and recursively replaces all 'Annotated[T, ...]', 'Required[T]' or 'NotRequired[T]' with 'T' (unless 'include_extras=True'). The argument may be a module, class, method, or function. The annotations are returned as a dictionary. For classes, annotations include also inherited members. TypeError is raised if the argument is not of a type that can contain annotations, and an empty dictionary is returned if no annotations are present. BEWARE -- the behavior of globalns and localns is counterintuitive (unless you are familiar with how eval() and exec() work). The search order is locals first, then globals. - If no dict arguments are passed, an attempt is made to use the globals from obj (or the respective module's globals for classes), and these are also used as the locals. If the object does not appear to have globals, an empty dictionary is used. - If one dict argument is passed, it is used for both globals and locals. - If two dict arguments are passed, they specify globals and locals, respectively. """ if hasattr(typing, "Annotated"): hint = typing.get_type_hints( obj, globalns=globalns, localns=localns, include_extras=True ) else: hint = typing.get_type_hints(obj, globalns=globalns, localns=localns) if include_extras: return hint return {k: _strip_extras(t) for k, t in hint.items()} # Python 3.9+ has PEP 593 (Annotated) if hasattr(typing, 'Annotated'): Annotated = typing.Annotated # Not exported and not a public API, but needed for get_origin() and get_args() # to work. _AnnotatedAlias = typing._AnnotatedAlias # 3.7-3.8 else: class _AnnotatedAlias(typing._GenericAlias, _root=True): """Runtime representation of an annotated type. At its core 'Annotated[t, dec1, dec2, ...]' is an alias for the type 't' with extra annotations. The alias behaves like a normal typing alias, instantiating is the same as instantiating the underlying type, binding it to types is also the same. """ def __init__(self, origin, metadata): if isinstance(origin, _AnnotatedAlias): metadata = origin.__metadata__ + metadata origin = origin.__origin__ super().__init__(origin, origin) self.__metadata__ = metadata def copy_with(self, params): assert len(params) == 1 new_type = params[0] return _AnnotatedAlias(new_type, self.__metadata__) def __repr__(self): return (f"typing_extensions.Annotated[{typing._type_repr(self.__origin__)}, " f"{', '.join(repr(a) for a in self.__metadata__)}]") def __reduce__(self): return operator.getitem, ( Annotated, (self.__origin__,) + self.__metadata__ ) def __eq__(self, other): if not isinstance(other, _AnnotatedAlias): return NotImplemented if self.__origin__ != other.__origin__: return False return self.__metadata__ == other.__metadata__ def __hash__(self): return hash((self.__origin__, self.__metadata__)) class Annotated: """Add context specific metadata to a type. Example: Annotated[int, runtime_check.Unsigned] indicates to the hypothetical runtime_check module that this type is an unsigned int. Every other consumer of this type can ignore this metadata and treat this type as int. The first argument to Annotated must be a valid type (and will be in the __origin__ field), the remaining arguments are kept as a tuple in the __extra__ field. Details: - It's an error to call `Annotated` with less than two arguments. - Nested Annotated are flattened:: Annotated[Annotated[T, Ann1, Ann2], Ann3] == Annotated[T, Ann1, Ann2, Ann3] - Instantiating an annotated type is equivalent to instantiating the underlying type:: Annotated[C, Ann1](5) == C(5) - Annotated can be used as a generic type alias:: Optimized = Annotated[T, runtime.Optimize()] Optimized[int] == Annotated[int, runtime.Optimize()] OptimizedList = Annotated[List[T], runtime.Optimize()] OptimizedList[int] == Annotated[List[int], runtime.Optimize()] """ __slots__ = () def __new__(cls, *args, **kwargs): raise TypeError("Type Annotated cannot be instantiated.") @typing._tp_cache def __class_getitem__(cls, params): if not isinstance(params, tuple) or len(params) < 2: raise TypeError("Annotated[...] should be used " "with at least two arguments (a type and an " "annotation).") allowed_special_forms = (ClassVar, Final) if get_origin(params[0]) in allowed_special_forms: origin = params[0] else: msg = "Annotated[t, ...]: t must be a type." origin = typing._type_check(params[0], msg) metadata = tuple(params[1:]) return _AnnotatedAlias(origin, metadata) def __init_subclass__(cls, *args, **kwargs): raise TypeError( f"Cannot subclass {cls.__module__}.Annotated" ) # Python 3.8 has get_origin() and get_args() but those implementations aren't # Annotated-aware, so we can't use those. Python 3.9's versions don't support # ParamSpecArgs and ParamSpecKwargs, so only Python 3.10's versions will do. if sys.version_info[:2] >= (3, 10): get_origin = typing.get_origin get_args = typing.get_args # 3.7-3.9 else: try: # 3.9+ from typing import _BaseGenericAlias except ImportError: _BaseGenericAlias = typing._GenericAlias try: # 3.9+ from typing import GenericAlias as _typing_GenericAlias except ImportError: _typing_GenericAlias = typing._GenericAlias def get_origin(tp): """Get the unsubscripted version of a type. This supports generic types, Callable, Tuple, Union, Literal, Final, ClassVar and Annotated. Return None for unsupported types. Examples:: get_origin(Literal[42]) is Literal get_origin(int) is None get_origin(ClassVar[int]) is ClassVar get_origin(Generic) is Generic get_origin(Generic[T]) is Generic get_origin(Union[T, int]) is Union get_origin(List[Tuple[T, T]][int]) == list get_origin(P.args) is P """ if isinstance(tp, _AnnotatedAlias): return Annotated if isinstance(tp, (typing._GenericAlias, _typing_GenericAlias, _BaseGenericAlias, ParamSpecArgs, ParamSpecKwargs)): return tp.__origin__ if tp is typing.Generic: return typing.Generic return None def get_args(tp): """Get type arguments with all substitutions performed. For unions, basic simplifications used by Union constructor are performed. Examples:: get_args(Dict[str, int]) == (str, int) get_args(int) == () get_args(Union[int, Union[T, int], str][int]) == (int, str) get_args(Union[int, Tuple[T, int]][str]) == (int, Tuple[str, int]) get_args(Callable[[], T][int]) == ([], int) """ if isinstance(tp, _AnnotatedAlias): return (tp.__origin__,) + tp.__metadata__ if isinstance(tp, (typing._GenericAlias, _typing_GenericAlias)): if getattr(tp, "_special", False): return () res = tp.__args__ if get_origin(tp) is collections.abc.Callable and res[0] is not Ellipsis: res = (list(res[:-1]), res[-1]) return res return () # 3.10+ if hasattr(typing, 'TypeAlias'): TypeAlias = typing.TypeAlias # 3.9 elif sys.version_info[:2] >= (3, 9): class _TypeAliasForm(typing._SpecialForm, _root=True): def __repr__(self): return 'typing_extensions.' + self._name @_TypeAliasForm def TypeAlias(self, parameters): """Special marker indicating that an assignment should be recognized as a proper type alias definition by type checkers. For example:: Predicate: TypeAlias = Callable[..., bool] It's invalid when used anywhere except as in the example above. """ raise TypeError(f"{self} is not subscriptable") # 3.7-3.8 else: class _TypeAliasForm(typing._SpecialForm, _root=True): def __repr__(self): return 'typing_extensions.' + self._name TypeAlias = _TypeAliasForm('TypeAlias', doc="""Special marker indicating that an assignment should be recognized as a proper type alias definition by type checkers. For example:: Predicate: TypeAlias = Callable[..., bool] It's invalid when used anywhere except as in the example above.""") class _DefaultMixin: """Mixin for TypeVarLike defaults.""" __slots__ = () def __init__(self, default): if isinstance(default, (tuple, list)): self.__default__ = tuple((typing._type_check(d, "Default must be a type") for d in default)) elif default: self.__default__ = typing._type_check(default, "Default must be a type") else: self.__default__ = None # Add default and infer_variance parameters from PEP 696 and 695 class TypeVar(typing.TypeVar, _DefaultMixin, _root=True): """Type variable.""" __module__ = 'typing' def __init__(self, name, *constraints, bound=None, covariant=False, contravariant=False, default=None, infer_variance=False): super().__init__(name, *constraints, bound=bound, covariant=covariant, contravariant=contravariant) _DefaultMixin.__init__(self, default) self.__infer_variance__ = infer_variance # for pickling: try: def_mod = sys._getframe(1).f_globals.get('__name__', '__main__') except (AttributeError, ValueError): def_mod = None if def_mod != 'typing_extensions': self.__module__ = def_mod # Python 3.10+ has PEP 612 if hasattr(typing, 'ParamSpecArgs'): ParamSpecArgs = typing.ParamSpecArgs ParamSpecKwargs = typing.ParamSpecKwargs # 3.7-3.9 else: class _Immutable: """Mixin to indicate that object should not be copied.""" __slots__ = () def __copy__(self): return self def __deepcopy__(self, memo): return self class ParamSpecArgs(_Immutable): """The args for a ParamSpec object. Given a ParamSpec object P, P.args is an instance of ParamSpecArgs. ParamSpecArgs objects have a reference back to their ParamSpec: P.args.__origin__ is P This type is meant for runtime introspection and has no special meaning to static type checkers. """ def __init__(self, origin): self.__origin__ = origin def __repr__(self): return f"{self.__origin__.__name__}.args" def __eq__(self, other): if not isinstance(other, ParamSpecArgs): return NotImplemented return self.__origin__ == other.__origin__ class ParamSpecKwargs(_Immutable): """The kwargs for a ParamSpec object. Given a ParamSpec object P, P.kwargs is an instance of ParamSpecKwargs. ParamSpecKwargs objects have a reference back to their ParamSpec: P.kwargs.__origin__ is P This type is meant for runtime introspection and has no special meaning to static type checkers. """ def __init__(self, origin): self.__origin__ = origin def __repr__(self): return f"{self.__origin__.__name__}.kwargs" def __eq__(self, other): if not isinstance(other, ParamSpecKwargs): return NotImplemented return self.__origin__ == other.__origin__ # 3.10+ if hasattr(typing, 'ParamSpec'): # Add default Parameter - PEP 696 class ParamSpec(typing.ParamSpec, _DefaultMixin, _root=True): """Parameter specification variable.""" __module__ = 'typing' def __init__(self, name, *, bound=None, covariant=False, contravariant=False, default=None): super().__init__(name, bound=bound, covariant=covariant, contravariant=contravariant) _DefaultMixin.__init__(self, default) # for pickling: try: def_mod = sys._getframe(1).f_globals.get('__name__', '__main__') except (AttributeError, ValueError): def_mod = None if def_mod != 'typing_extensions': self.__module__ = def_mod # 3.7-3.9 else: # Inherits from list as a workaround for Callable checks in Python < 3.9.2. class ParamSpec(list, _DefaultMixin): """Parameter specification variable. Usage:: P = ParamSpec('P') Parameter specification variables exist primarily for the benefit of static type checkers. They are used to forward the parameter types of one callable to another callable, a pattern commonly found in higher order functions and decorators. They are only valid when used in ``Concatenate``, or s the first argument to ``Callable``. In Python 3.10 and higher, they are also supported in user-defined Generics at runtime. See class Generic for more information on generic types. An example for annotating a decorator:: T = TypeVar('T') P = ParamSpec('P') def add_logging(f: Callable[P, T]) -> Callable[P, T]: '''A type-safe decorator to add logging to a function.''' def inner(*args: P.args, **kwargs: P.kwargs) -> T: logging.info(f'{f.__name__} was called') return f(*args, **kwargs) return inner @add_logging def add_two(x: float, y: float) -> float: '''Add two numbers together.''' return x + y Parameter specification variables defined with covariant=True or contravariant=True can be used to declare covariant or contravariant generic types. These keyword arguments are valid, but their actual semantics are yet to be decided. See PEP 612 for details. Parameter specification variables can be introspected. e.g.: P.__name__ == 'T' P.__bound__ == None P.__covariant__ == False P.__contravariant__ == False Note that only parameter specification variables defined in global scope can be pickled. """ # Trick Generic __parameters__. __class__ = typing.TypeVar @property def args(self): return ParamSpecArgs(self) @property def kwargs(self): return ParamSpecKwargs(self) def __init__(self, name, *, bound=None, covariant=False, contravariant=False, default=None): super().__init__([self]) self.__name__ = name self.__covariant__ = bool(covariant) self.__contravariant__ = bool(contravariant) if bound: self.__bound__ = typing._type_check(bound, 'Bound must be a type.') else: self.__bound__ = None _DefaultMixin.__init__(self, default) # for pickling: try: def_mod = sys._getframe(1).f_globals.get('__name__', '__main__') except (AttributeError, ValueError): def_mod = None if def_mod != 'typing_extensions': self.__module__ = def_mod def __repr__(self): if self.__covariant__: prefix = '+' elif self.__contravariant__: prefix = '-' else: prefix = '~' return prefix + self.__name__ def __hash__(self): return object.__hash__(self) def __eq__(self, other): return self is other def __reduce__(self): return self.__name__ # Hack to get typing._type_check to pass. def __call__(self, *args, **kwargs): pass # 3.7-3.9 if not hasattr(typing, 'Concatenate'): # Inherits from list as a workaround for Callable checks in Python < 3.9.2. class _ConcatenateGenericAlias(list): # Trick Generic into looking into this for __parameters__. __class__ = typing._GenericAlias # Flag in 3.8. _special = False def __init__(self, origin, args): super().__init__(args) self.__origin__ = origin self.__args__ = args def __repr__(self): _type_repr = typing._type_repr return (f'{_type_repr(self.__origin__)}' f'[{", ".join(_type_repr(arg) for arg in self.__args__)}]') def __hash__(self): return hash((self.__origin__, self.__args__)) # Hack to get typing._type_check to pass in Generic. def __call__(self, *args, **kwargs): pass @property def __parameters__(self): return tuple( tp for tp in self.__args__ if isinstance(tp, (typing.TypeVar, ParamSpec)) ) # 3.7-3.9 @typing._tp_cache def _concatenate_getitem(self, parameters): if parameters == (): raise TypeError("Cannot take a Concatenate of no types.") if not isinstance(parameters, tuple): parameters = (parameters,) if not isinstance(parameters[-1], ParamSpec): raise TypeError("The last parameter to Concatenate should be a " "ParamSpec variable.") msg = "Concatenate[arg, ...]: each arg must be a type." parameters = tuple(typing._type_check(p, msg) for p in parameters) return _ConcatenateGenericAlias(self, parameters) # 3.10+ if hasattr(typing, 'Concatenate'): Concatenate = typing.Concatenate _ConcatenateGenericAlias = typing._ConcatenateGenericAlias # noqa # 3.9 elif sys.version_info[:2] >= (3, 9): @_TypeAliasForm def Concatenate(self, parameters): """Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a higher order function which adds, removes or transforms parameters of a callable. For example:: Callable[Concatenate[int, P], int] See PEP 612 for detailed information. """ return _concatenate_getitem(self, parameters) # 3.7-8 else: class _ConcatenateForm(typing._SpecialForm, _root=True): def __repr__(self): return 'typing_extensions.' + self._name def __getitem__(self, parameters): return _concatenate_getitem(self, parameters) Concatenate = _ConcatenateForm( 'Concatenate', doc="""Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a higher order function which adds, removes or transforms parameters of a callable. For example:: Callable[Concatenate[int, P], int] See PEP 612 for detailed information. """) # 3.10+ if hasattr(typing, 'TypeGuard'): TypeGuard = typing.TypeGuard # 3.9 elif sys.version_info[:2] >= (3, 9): class _TypeGuardForm(typing._SpecialForm, _root=True): def __repr__(self): return 'typing_extensions.' + self._name @_TypeGuardForm def TypeGuard(self, parameters): """Special typing form used to annotate the return type of a user-defined type guard function. ``TypeGuard`` only accepts a single type argument. At runtime, functions marked this way should return a boolean. ``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static type checkers to determine a more precise type of an expression within a program's code flow. Usually type narrowing is done by analyzing conditional code flow and applying the narrowing to a block of code. The conditional expression here is sometimes referred to as a "type guard". Sometimes it would be convenient to use a user-defined boolean function as a type guard. Such a function should use ``TypeGuard[...]`` as its return type to alert static type checkers to this intention. Using ``-> TypeGuard`` tells the static type checker that for a given function: 1. The return value is a boolean. 2. If the return value is ``True``, the type of its argument is the type inside ``TypeGuard``. For example:: def is_str(val: Union[str, float]): # "isinstance" type guard if isinstance(val, str): # Type of ``val`` is narrowed to ``str`` ... else: # Else, type of ``val`` is narrowed to ``float``. ... Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower form of ``TypeA`` (it can even be a wider form) and this may lead to type-unsafe results. The main reason is to allow for things like narrowing ``List[object]`` to ``List[str]`` even though the latter is not a subtype of the former, since ``List`` is invariant. The responsibility of writing type-safe type guards is left to the user. ``TypeGuard`` also works with type variables. For more information, see PEP 647 (User-Defined Type Guards). """ item = typing._type_check(parameters, f'{self} accepts only a single type.') return typing._GenericAlias(self, (item,)) # 3.7-3.8 else: class _TypeGuardForm(typing._SpecialForm, _root=True): def __repr__(self): return 'typing_extensions.' + self._name def __getitem__(self, parameters): item = typing._type_check(parameters, f'{self._name} accepts only a single type') return typing._GenericAlias(self, (item,)) TypeGuard = _TypeGuardForm( 'TypeGuard', doc="""Special typing form used to annotate the return type of a user-defined type guard function. ``TypeGuard`` only accepts a single type argument. At runtime, functions marked this way should return a boolean. ``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static type checkers to determine a more precise type of an expression within a program's code flow. Usually type narrowing is done by analyzing conditional code flow and applying the narrowing to a block of code. The conditional expression here is sometimes referred to as a "type guard". Sometimes it would be convenient to use a user-defined boolean function as a type guard. Such a function should use ``TypeGuard[...]`` as its return type to alert static type checkers to this intention. Using ``-> TypeGuard`` tells the static type checker that for a given function: 1. The return value is a boolean. 2. If the return value is ``True``, the type of its argument is the type inside ``TypeGuard``. For example:: def is_str(val: Union[str, float]): # "isinstance" type guard if isinstance(val, str): # Type of ``val`` is narrowed to ``str`` ... else: # Else, type of ``val`` is narrowed to ``float``. ... Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower form of ``TypeA`` (it can even be a wider form) and this may lead to type-unsafe results. The main reason is to allow for things like narrowing ``List[object]`` to ``List[str]`` even though the latter is not a subtype of the former, since ``List`` is invariant. The responsibility of writing type-safe type guards is left to the user. ``TypeGuard`` also works with type variables. For more information, see PEP 647 (User-Defined Type Guards). """) # Vendored from cpython typing._SpecialFrom class _SpecialForm(typing._Final, _root=True): __slots__ = ('_name', '__doc__', '_getitem') def __init__(self, getitem): self._getitem = getitem self._name = getitem.__name__ self.__doc__ = getitem.__doc__ def __getattr__(self, item): if item in {'__name__', '__qualname__'}: return self._name raise AttributeError(item) def __mro_entries__(self, bases): raise TypeError(f"Cannot subclass {self!r}") def __repr__(self): return f'typing_extensions.{self._name}' def __reduce__(self): return self._name def __call__(self, *args, **kwds): raise TypeError(f"Cannot instantiate {self!r}") def __or__(self, other): return typing.Union[self, other] def __ror__(self, other): return typing.Union[other, self] def __instancecheck__(self, obj): raise TypeError(f"{self} cannot be used with isinstance()") def __subclasscheck__(self, cls): raise TypeError(f"{self} cannot be used with issubclass()") @typing._tp_cache def __getitem__(self, parameters): return self._getitem(self, parameters) if hasattr(typing, "LiteralString"): LiteralString = typing.LiteralString else: @_SpecialForm def LiteralString(self, params): """Represents an arbitrary literal string. Example:: from typing_extensions import LiteralString def query(sql: LiteralString) -> ...: ... query("SELECT * FROM table") # ok query(f"SELECT * FROM {input()}") # not ok See PEP 675 for details. """ raise TypeError(f"{self} is not subscriptable") if hasattr(typing, "Self"): Self = typing.Self else: @_SpecialForm def Self(self, params): """Used to spell the type of "self" in classes. Example:: from typing import Self class ReturnsSelf: def parse(self, data: bytes) -> Self: ... return self """ raise TypeError(f"{self} is not subscriptable") if hasattr(typing, "Never"): Never = typing.Never else: @_SpecialForm def Never(self, params): """The bottom type, a type that has no members. This can be used to define a function that should never be called, or a function that never returns:: from typing_extensions import Never def never_call_me(arg: Never) -> None: pass def int_or_str(arg: int | str) -> None: never_call_me(arg) # type checker error match arg: case int(): print("It's an int") case str(): print("It's a str") case _: never_call_me(arg) # ok, arg is of type Never """ raise TypeError(f"{self} is not subscriptable") if hasattr(typing, 'Required'): Required = typing.Required NotRequired = typing.NotRequired elif sys.version_info[:2] >= (3, 9): class _ExtensionsSpecialForm(typing._SpecialForm, _root=True): def __repr__(self): return 'typing_extensions.' + self._name @_ExtensionsSpecialForm def Required(self, parameters): """A special typing construct to mark a key of a total=False TypedDict as required. For example: class Movie(TypedDict, total=False): title: Required[str] year: int m = Movie( title='The Matrix', # typechecker error if key is omitted year=1999, ) There is no runtime checking that a required key is actually provided when instantiating a related TypedDict. """ item = typing._type_check(parameters, f'{self._name} accepts only a single type.') return typing._GenericAlias(self, (item,)) @_ExtensionsSpecialForm def NotRequired(self, parameters): """A special typing construct to mark a key of a TypedDict as potentially missing. For example: class Movie(TypedDict): title: str year: NotRequired[int] m = Movie( title='The Matrix', # typechecker error if key is omitted year=1999, ) """ item = typing._type_check(parameters, f'{self._name} accepts only a single type.') return typing._GenericAlias(self, (item,)) else: class _RequiredForm(typing._SpecialForm, _root=True): def __repr__(self): return 'typing_extensions.' + self._name def __getitem__(self, parameters): item = typing._type_check(parameters, f'{self._name} accepts only a single type.') return typing._GenericAlias(self, (item,)) Required = _RequiredForm( 'Required', doc="""A special typing construct to mark a key of a total=False TypedDict as required. For example: class Movie(TypedDict, total=False): title: Required[str] year: int m = Movie( title='The Matrix', # typechecker error if key is omitted year=1999, ) There is no runtime checking that a required key is actually provided when instantiating a related TypedDict. """) NotRequired = _RequiredForm( 'NotRequired', doc="""A special typing construct to mark a key of a TypedDict as potentially missing. For example: class Movie(TypedDict): title: str year: NotRequired[int] m = Movie( title='The Matrix', # typechecker error if key is omitted year=1999, ) """) if hasattr(typing, "Unpack"): # 3.11+ Unpack = typing.Unpack elif sys.version_info[:2] >= (3, 9): class _UnpackSpecialForm(typing._SpecialForm, _root=True): def __repr__(self): return 'typing_extensions.' + self._name class _UnpackAlias(typing._GenericAlias, _root=True): __class__ = typing.TypeVar @_UnpackSpecialForm def Unpack(self, parameters): """A special typing construct to unpack a variadic type. For example: Shape = TypeVarTuple('Shape') Batch = NewType('Batch', int) def add_batch_axis( x: Array[Unpack[Shape]] ) -> Array[Batch, Unpack[Shape]]: ... """ item = typing._type_check(parameters, f'{self._name} accepts only a single type.') return _UnpackAlias(self, (item,)) def _is_unpack(obj): return isinstance(obj, _UnpackAlias) else: class _UnpackAlias(typing._GenericAlias, _root=True): __class__ = typing.TypeVar class _UnpackForm(typing._SpecialForm, _root=True): def __repr__(self): return 'typing_extensions.' + self._name def __getitem__(self, parameters): item = typing._type_check(parameters, f'{self._name} accepts only a single type.') return _UnpackAlias(self, (item,)) Unpack = _UnpackForm( 'Unpack', doc="""A special typing construct to unpack a variadic type. For example: Shape = TypeVarTuple('Shape') Batch = NewType('Batch', int) def add_batch_axis( x: Array[Unpack[Shape]] ) -> Array[Batch, Unpack[Shape]]: ... """) def _is_unpack(obj): return isinstance(obj, _UnpackAlias) if hasattr(typing, "TypeVarTuple"): # 3.11+ # Add default Parameter - PEP 696 class TypeVarTuple(typing.TypeVarTuple, _DefaultMixin, _root=True): """Type variable tuple.""" def __init__(self, name, *, default=None): super().__init__(name) _DefaultMixin.__init__(self, default) # for pickling: try: def_mod = sys._getframe(1).f_globals.get('__name__', '__main__') except (AttributeError, ValueError): def_mod = None if def_mod != 'typing_extensions': self.__module__ = def_mod else: class TypeVarTuple(_DefaultMixin): """Type variable tuple. Usage:: Ts = TypeVarTuple('Ts') In the same way that a normal type variable is a stand-in for a single type such as ``int``, a type variable *tuple* is a stand-in for a *tuple* type such as ``Tuple[int, str]``. Type variable tuples can be used in ``Generic`` declarations. Consider the following example:: class Array(Generic[*Ts]): ... The ``Ts`` type variable tuple here behaves like ``tuple[T1, T2]``, where ``T1`` and ``T2`` are type variables. To use these type variables as type parameters of ``Array``, we must *unpack* the type variable tuple using the star operator: ``*Ts``. The signature of ``Array`` then behaves as if we had simply written ``class Array(Generic[T1, T2]): ...``. In contrast to ``Generic[T1, T2]``, however, ``Generic[*Shape]`` allows us to parameterise the class with an *arbitrary* number of type parameters. Type variable tuples can be used anywhere a normal ``TypeVar`` can. This includes class definitions, as shown above, as well as function signatures and variable annotations:: class Array(Generic[*Ts]): def __init__(self, shape: Tuple[*Ts]): self._shape: Tuple[*Ts] = shape def get_shape(self) -> Tuple[*Ts]: return self._shape shape = (Height(480), Width(640)) x: Array[Height, Width] = Array(shape) y = abs(x) # Inferred type is Array[Height, Width] z = x + x # ... is Array[Height, Width] x.get_shape() # ... is tuple[Height, Width] """ # Trick Generic __parameters__. __class__ = typing.TypeVar def __iter__(self): yield self.__unpacked__ def __init__(self, name, *, default=None): self.__name__ = name _DefaultMixin.__init__(self, default) # for pickling: try: def_mod = sys._getframe(1).f_globals.get('__name__', '__main__') except (AttributeError, ValueError): def_mod = None if def_mod != 'typing_extensions': self.__module__ = def_mod self.__unpacked__ = Unpack[self] def __repr__(self): return self.__name__ def __hash__(self): return object.__hash__(self) def __eq__(self, other): return self is other def __reduce__(self): return self.__name__ def __init_subclass__(self, *args, **kwds): if '_root' not in kwds: raise TypeError("Cannot subclass special typing classes") if hasattr(typing, "reveal_type"): reveal_type = typing.reveal_type else: def reveal_type(__obj: T) -> T: """Reveal the inferred type of a variable. When a static type checker encounters a call to ``reveal_type()``, it will emit the inferred type of the argument:: x: int = 1 reveal_type(x) Running a static type checker (e.g., ``mypy``) on this example will produce output similar to 'Revealed type is "builtins.int"'. At runtime, the function prints the runtime type of the argument and returns it unchanged. """ print(f"Runtime type is {type(__obj).__name__!r}", file=sys.stderr) return __obj if hasattr(typing, "assert_never"): assert_never = typing.assert_never else: def assert_never(__arg: Never) -> Never: """Assert to the type checker that a line of code is unreachable. Example:: def int_or_str(arg: int | str) -> None: match arg: case int(): print("It's an int") case str(): print("It's a str") case _: assert_never(arg) If a type checker finds that a call to assert_never() is reachable, it will emit an error. At runtime, this throws an exception when called. """ raise AssertionError("Expected code to be unreachable") if hasattr(typing, 'dataclass_transform'): dataclass_transform = typing.dataclass_transform else: def dataclass_transform( *, eq_default: bool = True, order_default: bool = False, kw_only_default: bool = False, field_specifiers: typing.Tuple[ typing.Union[typing.Type[typing.Any], typing.Callable[..., typing.Any]], ... ] = (), **kwargs: typing.Any, ) -> typing.Callable[[T], T]: """Decorator that marks a function, class, or metaclass as providing dataclass-like behavior. Example: from typing_extensions import dataclass_transform _T = TypeVar("_T") # Used on a decorator function @dataclass_transform() def create_model(cls: type[_T]) -> type[_T]: ... return cls @create_model class CustomerModel: id: int name: str # Used on a base class @dataclass_transform() class ModelBase: ... class CustomerModel(ModelBase): id: int name: str # Used on a metaclass @dataclass_transform() class ModelMeta(type): ... class ModelBase(metaclass=ModelMeta): ... class CustomerModel(ModelBase): id: int name: str Each of the ``CustomerModel`` classes defined in this example will now behave similarly to a dataclass created with the ``@dataclasses.dataclass`` decorator. For example, the type checker will synthesize an ``__init__`` method. The arguments to this decorator can be used to customize this behavior: - ``eq_default`` indicates whether the ``eq`` parameter is assumed to be True or False if it is omitted by the caller. - ``order_default`` indicates whether the ``order`` parameter is assumed to be True or False if it is omitted by the caller. - ``kw_only_default`` indicates whether the ``kw_only`` parameter is assumed to be True or False if it is omitted by the caller. - ``field_specifiers`` specifies a static list of supported classes or functions that describe fields, similar to ``dataclasses.field()``. At runtime, this decorator records its arguments in the ``__dataclass_transform__`` attribute on the decorated object. See PEP 681 for details. """ def decorator(cls_or_fn): cls_or_fn.__dataclass_transform__ = { "eq_default": eq_default, "order_default": order_default, "kw_only_default": kw_only_default, "field_specifiers": field_specifiers, "kwargs": kwargs, } return cls_or_fn return decorator if hasattr(typing, "override"): override = typing.override else: _F = typing.TypeVar("_F", bound=typing.Callable[..., typing.Any]) def override(__arg: _F) -> _F: """Indicate that a method is intended to override a method in a base class. Usage: class Base: def method(self) -> None: ... pass class Child(Base): @override def method(self) -> None: super().method() When this decorator is applied to a method, the type checker will validate that it overrides a method with the same name on a base class. This helps prevent bugs that may occur when a base class is changed without an equivalent change to a child class. See PEP 698 for details. """ return __arg # We have to do some monkey patching to deal with the dual nature of # Unpack/TypeVarTuple: # - We want Unpack to be a kind of TypeVar so it gets accepted in # Generic[Unpack[Ts]] # - We want it to *not* be treated as a TypeVar for the purposes of # counting generic parameters, so that when we subscript a generic, # the runtime doesn't try to substitute the Unpack with the subscripted type. if not hasattr(typing, "TypeVarTuple"): typing._collect_type_vars = _collect_type_vars typing._check_generic = _check_generic # Backport typing.NamedTuple as it exists in Python 3.11. # In 3.11, the ability to define generic `NamedTuple`s was supported. # This was explicitly disallowed in 3.9-3.10, and only half-worked in <=3.8. if sys.version_info >= (3, 11): NamedTuple = typing.NamedTuple else: def _caller(): try: return sys._getframe(2).f_globals.get('__name__', '__main__') except (AttributeError, ValueError): # For platforms without _getframe() return None def _make_nmtuple(name, types, module, defaults=()): fields = [n for n, t in types] annotations = {n: typing._type_check(t, f"field {n} annotation must be a type") for n, t in types} nm_tpl = collections.namedtuple(name, fields, defaults=defaults, module=module) nm_tpl.__annotations__ = nm_tpl.__new__.__annotations__ = annotations # The `_field_types` attribute was removed in 3.9; # in earlier versions, it is the same as the `__annotations__` attribute if sys.version_info < (3, 9): nm_tpl._field_types = annotations return nm_tpl _prohibited_namedtuple_fields = typing._prohibited _special_namedtuple_fields = frozenset({'__module__', '__name__', '__annotations__'}) class _NamedTupleMeta(type): def __new__(cls, typename, bases, ns): assert _NamedTuple in bases for base in bases: if base is not _NamedTuple and base is not typing.Generic: raise TypeError( 'can only inherit from a NamedTuple type and Generic') bases = tuple(tuple if base is _NamedTuple else base for base in bases) types = ns.get('__annotations__', {}) default_names = [] for field_name in types: if field_name in ns: default_names.append(field_name) elif default_names: raise TypeError(f"Non-default namedtuple field {field_name} " f"cannot follow default field" f"{'s' if len(default_names) > 1 else ''} " f"{', '.join(default_names)}") nm_tpl = _make_nmtuple( typename, types.items(), defaults=[ns[n] for n in default_names], module=ns['__module__'] ) nm_tpl.__bases__ = bases if typing.Generic in bases: class_getitem = typing.Generic.__class_getitem__.__func__ nm_tpl.__class_getitem__ = classmethod(class_getitem) # update from user namespace without overriding special namedtuple attributes for key in ns: if key in _prohibited_namedtuple_fields: raise AttributeError("Cannot overwrite NamedTuple attribute " + key) elif key not in _special_namedtuple_fields and key not in nm_tpl._fields: setattr(nm_tpl, key, ns[key]) if typing.Generic in bases: nm_tpl.__init_subclass__() return nm_tpl def NamedTuple(__typename, __fields=None, **kwargs): if __fields is None: __fields = kwargs.items() elif kwargs: raise TypeError("Either list of fields or keywords" " can be provided to NamedTuple, not both") return _make_nmtuple(__typename, __fields, module=_caller()) NamedTuple.__doc__ = typing.NamedTuple.__doc__ _NamedTuple = type.__new__(_NamedTupleMeta, 'NamedTuple', (), {}) # On 3.8+, alter the signature so that it matches typing.NamedTuple. # The signature of typing.NamedTuple on >=3.8 is invalid syntax in Python 3.7, # so just leave the signature as it is on 3.7. if sys.version_info >= (3, 8): NamedTuple.__text_signature__ = '(typename, fields=None, /, **kwargs)' def _namedtuple_mro_entries(bases): assert NamedTuple in bases return (_NamedTuple,) NamedTuple.__mro_entries__ = _namedtuple_mro_entries
80,078
Python
35.234842
90
0.562614
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pkg_resources/_vendor/more_itertools/recipes.py
"""Imported from the recipes section of the itertools documentation. All functions taken from the recipes section of the itertools library docs [1]_. Some backward-compatible usability improvements have been made. .. [1] http://docs.python.org/library/itertools.html#recipes """ import math import operator import warnings from collections import deque from collections.abc import Sized from functools import reduce from itertools import ( chain, combinations, compress, count, cycle, groupby, islice, product, repeat, starmap, tee, zip_longest, ) from random import randrange, sample, choice from sys import hexversion __all__ = [ 'all_equal', 'batched', 'before_and_after', 'consume', 'convolve', 'dotproduct', 'first_true', 'factor', 'flatten', 'grouper', 'iter_except', 'iter_index', 'matmul', 'ncycles', 'nth', 'nth_combination', 'padnone', 'pad_none', 'pairwise', 'partition', 'polynomial_from_roots', 'powerset', 'prepend', 'quantify', 'random_combination_with_replacement', 'random_combination', 'random_permutation', 'random_product', 'repeatfunc', 'roundrobin', 'sieve', 'sliding_window', 'subslices', 'tabulate', 'tail', 'take', 'transpose', 'triplewise', 'unique_everseen', 'unique_justseen', ] _marker = object() def take(n, iterable): """Return first *n* items of the iterable as a list. >>> take(3, range(10)) [0, 1, 2] If there are fewer than *n* items in the iterable, all of them are returned. >>> take(10, range(3)) [0, 1, 2] """ return list(islice(iterable, n)) def tabulate(function, start=0): """Return an iterator over the results of ``func(start)``, ``func(start + 1)``, ``func(start + 2)``... *func* should be a function that accepts one integer argument. If *start* is not specified it defaults to 0. It will be incremented each time the iterator is advanced. >>> square = lambda x: x ** 2 >>> iterator = tabulate(square, -3) >>> take(4, iterator) [9, 4, 1, 0] """ return map(function, count(start)) def tail(n, iterable): """Return an iterator over the last *n* items of *iterable*. >>> t = tail(3, 'ABCDEFG') >>> list(t) ['E', 'F', 'G'] """ # If the given iterable has a length, then we can use islice to get its # final elements. Note that if the iterable is not actually Iterable, # either islice or deque will throw a TypeError. This is why we don't # check if it is Iterable. if isinstance(iterable, Sized): yield from islice(iterable, max(0, len(iterable) - n), None) else: yield from iter(deque(iterable, maxlen=n)) def consume(iterator, n=None): """Advance *iterable* by *n* steps. If *n* is ``None``, consume it entirely. Efficiently exhausts an iterator without returning values. Defaults to consuming the whole iterator, but an optional second argument may be provided to limit consumption. >>> i = (x for x in range(10)) >>> next(i) 0 >>> consume(i, 3) >>> next(i) 4 >>> consume(i) >>> next(i) Traceback (most recent call last): File "<stdin>", line 1, in <module> StopIteration If the iterator has fewer items remaining than the provided limit, the whole iterator will be consumed. >>> i = (x for x in range(3)) >>> consume(i, 5) >>> next(i) Traceback (most recent call last): File "<stdin>", line 1, in <module> StopIteration """ # Use functions that consume iterators at C speed. if n is None: # feed the entire iterator into a zero-length deque deque(iterator, maxlen=0) else: # advance to the empty slice starting at position n next(islice(iterator, n, n), None) def nth(iterable, n, default=None): """Returns the nth item or a default value. >>> l = range(10) >>> nth(l, 3) 3 >>> nth(l, 20, "zebra") 'zebra' """ return next(islice(iterable, n, None), default) def all_equal(iterable): """ Returns ``True`` if all the elements are equal to each other. >>> all_equal('aaaa') True >>> all_equal('aaab') False """ g = groupby(iterable) return next(g, True) and not next(g, False) def quantify(iterable, pred=bool): """Return the how many times the predicate is true. >>> quantify([True, False, True]) 2 """ return sum(map(pred, iterable)) def pad_none(iterable): """Returns the sequence of elements and then returns ``None`` indefinitely. >>> take(5, pad_none(range(3))) [0, 1, 2, None, None] Useful for emulating the behavior of the built-in :func:`map` function. See also :func:`padded`. """ return chain(iterable, repeat(None)) padnone = pad_none def ncycles(iterable, n): """Returns the sequence elements *n* times >>> list(ncycles(["a", "b"], 3)) ['a', 'b', 'a', 'b', 'a', 'b'] """ return chain.from_iterable(repeat(tuple(iterable), n)) def dotproduct(vec1, vec2): """Returns the dot product of the two iterables. >>> dotproduct([10, 10], [20, 20]) 400 """ return sum(map(operator.mul, vec1, vec2)) def flatten(listOfLists): """Return an iterator flattening one level of nesting in a list of lists. >>> list(flatten([[0, 1], [2, 3]])) [0, 1, 2, 3] See also :func:`collapse`, which can flatten multiple levels of nesting. """ return chain.from_iterable(listOfLists) def repeatfunc(func, times=None, *args): """Call *func* with *args* repeatedly, returning an iterable over the results. If *times* is specified, the iterable will terminate after that many repetitions: >>> from operator import add >>> times = 4 >>> args = 3, 5 >>> list(repeatfunc(add, times, *args)) [8, 8, 8, 8] If *times* is ``None`` the iterable will not terminate: >>> from random import randrange >>> times = None >>> args = 1, 11 >>> take(6, repeatfunc(randrange, times, *args)) # doctest:+SKIP [2, 4, 8, 1, 8, 4] """ if times is None: return starmap(func, repeat(args)) return starmap(func, repeat(args, times)) def _pairwise(iterable): """Returns an iterator of paired items, overlapping, from the original >>> take(4, pairwise(count())) [(0, 1), (1, 2), (2, 3), (3, 4)] On Python 3.10 and above, this is an alias for :func:`itertools.pairwise`. """ a, b = tee(iterable) next(b, None) yield from zip(a, b) try: from itertools import pairwise as itertools_pairwise except ImportError: pairwise = _pairwise else: def pairwise(iterable): yield from itertools_pairwise(iterable) pairwise.__doc__ = _pairwise.__doc__ class UnequalIterablesError(ValueError): def __init__(self, details=None): msg = 'Iterables have different lengths' if details is not None: msg += (': index 0 has length {}; index {} has length {}').format( *details ) super().__init__(msg) def _zip_equal_generator(iterables): for combo in zip_longest(*iterables, fillvalue=_marker): for val in combo: if val is _marker: raise UnequalIterablesError() yield combo def _zip_equal(*iterables): # Check whether the iterables are all the same size. try: first_size = len(iterables[0]) for i, it in enumerate(iterables[1:], 1): size = len(it) if size != first_size: break else: # If we didn't break out, we can use the built-in zip. return zip(*iterables) # If we did break out, there was a mismatch. raise UnequalIterablesError(details=(first_size, i, size)) # If any one of the iterables didn't have a length, start reading # them until one runs out. except TypeError: return _zip_equal_generator(iterables) def grouper(iterable, n, incomplete='fill', fillvalue=None): """Group elements from *iterable* into fixed-length groups of length *n*. >>> list(grouper('ABCDEF', 3)) [('A', 'B', 'C'), ('D', 'E', 'F')] The keyword arguments *incomplete* and *fillvalue* control what happens for iterables whose length is not a multiple of *n*. When *incomplete* is `'fill'`, the last group will contain instances of *fillvalue*. >>> list(grouper('ABCDEFG', 3, incomplete='fill', fillvalue='x')) [('A', 'B', 'C'), ('D', 'E', 'F'), ('G', 'x', 'x')] When *incomplete* is `'ignore'`, the last group will not be emitted. >>> list(grouper('ABCDEFG', 3, incomplete='ignore', fillvalue='x')) [('A', 'B', 'C'), ('D', 'E', 'F')] When *incomplete* is `'strict'`, a subclass of `ValueError` will be raised. >>> it = grouper('ABCDEFG', 3, incomplete='strict') >>> list(it) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... UnequalIterablesError """ args = [iter(iterable)] * n if incomplete == 'fill': return zip_longest(*args, fillvalue=fillvalue) if incomplete == 'strict': return _zip_equal(*args) if incomplete == 'ignore': return zip(*args) else: raise ValueError('Expected fill, strict, or ignore') def roundrobin(*iterables): """Yields an item from each iterable, alternating between them. >>> list(roundrobin('ABC', 'D', 'EF')) ['A', 'D', 'E', 'B', 'F', 'C'] This function produces the same output as :func:`interleave_longest`, but may perform better for some inputs (in particular when the number of iterables is small). """ # Recipe credited to George Sakkis pending = len(iterables) nexts = cycle(iter(it).__next__ for it in iterables) while pending: try: for next in nexts: yield next() except StopIteration: pending -= 1 nexts = cycle(islice(nexts, pending)) def partition(pred, iterable): """ Returns a 2-tuple of iterables derived from the input iterable. The first yields the items that have ``pred(item) == False``. The second yields the items that have ``pred(item) == True``. >>> is_odd = lambda x: x % 2 != 0 >>> iterable = range(10) >>> even_items, odd_items = partition(is_odd, iterable) >>> list(even_items), list(odd_items) ([0, 2, 4, 6, 8], [1, 3, 5, 7, 9]) If *pred* is None, :func:`bool` is used. >>> iterable = [0, 1, False, True, '', ' '] >>> false_items, true_items = partition(None, iterable) >>> list(false_items), list(true_items) ([0, False, ''], [1, True, ' ']) """ if pred is None: pred = bool evaluations = ((pred(x), x) for x in iterable) t1, t2 = tee(evaluations) return ( (x for (cond, x) in t1 if not cond), (x for (cond, x) in t2 if cond), ) def powerset(iterable): """Yields all possible subsets of the iterable. >>> list(powerset([1, 2, 3])) [(), (1,), (2,), (3,), (1, 2), (1, 3), (2, 3), (1, 2, 3)] :func:`powerset` will operate on iterables that aren't :class:`set` instances, so repeated elements in the input will produce repeated elements in the output. Use :func:`unique_everseen` on the input to avoid generating duplicates: >>> seq = [1, 1, 0] >>> list(powerset(seq)) [(), (1,), (1,), (0,), (1, 1), (1, 0), (1, 0), (1, 1, 0)] >>> from more_itertools import unique_everseen >>> list(powerset(unique_everseen(seq))) [(), (1,), (0,), (1, 0)] """ s = list(iterable) return chain.from_iterable(combinations(s, r) for r in range(len(s) + 1)) def unique_everseen(iterable, key=None): """ Yield unique elements, preserving order. >>> list(unique_everseen('AAAABBBCCDAABBB')) ['A', 'B', 'C', 'D'] >>> list(unique_everseen('ABBCcAD', str.lower)) ['A', 'B', 'C', 'D'] Sequences with a mix of hashable and unhashable items can be used. The function will be slower (i.e., `O(n^2)`) for unhashable items. Remember that ``list`` objects are unhashable - you can use the *key* parameter to transform the list to a tuple (which is hashable) to avoid a slowdown. >>> iterable = ([1, 2], [2, 3], [1, 2]) >>> list(unique_everseen(iterable)) # Slow [[1, 2], [2, 3]] >>> list(unique_everseen(iterable, key=tuple)) # Faster [[1, 2], [2, 3]] Similary, you may want to convert unhashable ``set`` objects with ``key=frozenset``. For ``dict`` objects, ``key=lambda x: frozenset(x.items())`` can be used. """ seenset = set() seenset_add = seenset.add seenlist = [] seenlist_add = seenlist.append use_key = key is not None for element in iterable: k = key(element) if use_key else element try: if k not in seenset: seenset_add(k) yield element except TypeError: if k not in seenlist: seenlist_add(k) yield element def unique_justseen(iterable, key=None): """Yields elements in order, ignoring serial duplicates >>> list(unique_justseen('AAAABBBCCDAABBB')) ['A', 'B', 'C', 'D', 'A', 'B'] >>> list(unique_justseen('ABBCcAD', str.lower)) ['A', 'B', 'C', 'A', 'D'] """ return map(next, map(operator.itemgetter(1), groupby(iterable, key))) def iter_except(func, exception, first=None): """Yields results from a function repeatedly until an exception is raised. Converts a call-until-exception interface to an iterator interface. Like ``iter(func, sentinel)``, but uses an exception instead of a sentinel to end the loop. >>> l = [0, 1, 2] >>> list(iter_except(l.pop, IndexError)) [2, 1, 0] Multiple exceptions can be specified as a stopping condition: >>> l = [1, 2, 3, '...', 4, 5, 6] >>> list(iter_except(lambda: 1 + l.pop(), (IndexError, TypeError))) [7, 6, 5] >>> list(iter_except(lambda: 1 + l.pop(), (IndexError, TypeError))) [4, 3, 2] >>> list(iter_except(lambda: 1 + l.pop(), (IndexError, TypeError))) [] """ try: if first is not None: yield first() while 1: yield func() except exception: pass def first_true(iterable, default=None, pred=None): """ Returns the first true value in the iterable. If no true value is found, returns *default* If *pred* is not None, returns the first item for which ``pred(item) == True`` . >>> first_true(range(10)) 1 >>> first_true(range(10), pred=lambda x: x > 5) 6 >>> first_true(range(10), default='missing', pred=lambda x: x > 9) 'missing' """ return next(filter(pred, iterable), default) def random_product(*args, repeat=1): """Draw an item at random from each of the input iterables. >>> random_product('abc', range(4), 'XYZ') # doctest:+SKIP ('c', 3, 'Z') If *repeat* is provided as a keyword argument, that many items will be drawn from each iterable. >>> random_product('abcd', range(4), repeat=2) # doctest:+SKIP ('a', 2, 'd', 3) This equivalent to taking a random selection from ``itertools.product(*args, **kwarg)``. """ pools = [tuple(pool) for pool in args] * repeat return tuple(choice(pool) for pool in pools) def random_permutation(iterable, r=None): """Return a random *r* length permutation of the elements in *iterable*. If *r* is not specified or is ``None``, then *r* defaults to the length of *iterable*. >>> random_permutation(range(5)) # doctest:+SKIP (3, 4, 0, 1, 2) This equivalent to taking a random selection from ``itertools.permutations(iterable, r)``. """ pool = tuple(iterable) r = len(pool) if r is None else r return tuple(sample(pool, r)) def random_combination(iterable, r): """Return a random *r* length subsequence of the elements in *iterable*. >>> random_combination(range(5), 3) # doctest:+SKIP (2, 3, 4) This equivalent to taking a random selection from ``itertools.combinations(iterable, r)``. """ pool = tuple(iterable) n = len(pool) indices = sorted(sample(range(n), r)) return tuple(pool[i] for i in indices) def random_combination_with_replacement(iterable, r): """Return a random *r* length subsequence of elements in *iterable*, allowing individual elements to be repeated. >>> random_combination_with_replacement(range(3), 5) # doctest:+SKIP (0, 0, 1, 2, 2) This equivalent to taking a random selection from ``itertools.combinations_with_replacement(iterable, r)``. """ pool = tuple(iterable) n = len(pool) indices = sorted(randrange(n) for i in range(r)) return tuple(pool[i] for i in indices) def nth_combination(iterable, r, index): """Equivalent to ``list(combinations(iterable, r))[index]``. The subsequences of *iterable* that are of length *r* can be ordered lexicographically. :func:`nth_combination` computes the subsequence at sort position *index* directly, without computing the previous subsequences. >>> nth_combination(range(5), 3, 5) (0, 3, 4) ``ValueError`` will be raised If *r* is negative or greater than the length of *iterable*. ``IndexError`` will be raised if the given *index* is invalid. """ pool = tuple(iterable) n = len(pool) if (r < 0) or (r > n): raise ValueError c = 1 k = min(r, n - r) for i in range(1, k + 1): c = c * (n - k + i) // i if index < 0: index += c if (index < 0) or (index >= c): raise IndexError result = [] while r: c, n, r = c * r // n, n - 1, r - 1 while index >= c: index -= c c, n = c * (n - r) // n, n - 1 result.append(pool[-1 - n]) return tuple(result) def prepend(value, iterator): """Yield *value*, followed by the elements in *iterator*. >>> value = '0' >>> iterator = ['1', '2', '3'] >>> list(prepend(value, iterator)) ['0', '1', '2', '3'] To prepend multiple values, see :func:`itertools.chain` or :func:`value_chain`. """ return chain([value], iterator) def convolve(signal, kernel): """Convolve the iterable *signal* with the iterable *kernel*. >>> signal = (1, 2, 3, 4, 5) >>> kernel = [3, 2, 1] >>> list(convolve(signal, kernel)) [3, 8, 14, 20, 26, 14, 5] Note: the input arguments are not interchangeable, as the *kernel* is immediately consumed and stored. """ kernel = tuple(kernel)[::-1] n = len(kernel) window = deque([0], maxlen=n) * n for x in chain(signal, repeat(0, n - 1)): window.append(x) yield sum(map(operator.mul, kernel, window)) def before_and_after(predicate, it): """A variant of :func:`takewhile` that allows complete access to the remainder of the iterator. >>> it = iter('ABCdEfGhI') >>> all_upper, remainder = before_and_after(str.isupper, it) >>> ''.join(all_upper) 'ABC' >>> ''.join(remainder) # takewhile() would lose the 'd' 'dEfGhI' Note that the first iterator must be fully consumed before the second iterator can generate valid results. """ it = iter(it) transition = [] def true_iterator(): for elem in it: if predicate(elem): yield elem else: transition.append(elem) return # Note: this is different from itertools recipes to allow nesting # before_and_after remainders into before_and_after again. See tests # for an example. remainder_iterator = chain(transition, it) return true_iterator(), remainder_iterator def triplewise(iterable): """Return overlapping triplets from *iterable*. >>> list(triplewise('ABCDE')) [('A', 'B', 'C'), ('B', 'C', 'D'), ('C', 'D', 'E')] """ for (a, _), (b, c) in pairwise(pairwise(iterable)): yield a, b, c def sliding_window(iterable, n): """Return a sliding window of width *n* over *iterable*. >>> list(sliding_window(range(6), 4)) [(0, 1, 2, 3), (1, 2, 3, 4), (2, 3, 4, 5)] If *iterable* has fewer than *n* items, then nothing is yielded: >>> list(sliding_window(range(3), 4)) [] For a variant with more features, see :func:`windowed`. """ it = iter(iterable) window = deque(islice(it, n), maxlen=n) if len(window) == n: yield tuple(window) for x in it: window.append(x) yield tuple(window) def subslices(iterable): """Return all contiguous non-empty subslices of *iterable*. >>> list(subslices('ABC')) [['A'], ['A', 'B'], ['A', 'B', 'C'], ['B'], ['B', 'C'], ['C']] This is similar to :func:`substrings`, but emits items in a different order. """ seq = list(iterable) slices = starmap(slice, combinations(range(len(seq) + 1), 2)) return map(operator.getitem, repeat(seq), slices) def polynomial_from_roots(roots): """Compute a polynomial's coefficients from its roots. >>> roots = [5, -4, 3] # (x - 5) * (x + 4) * (x - 3) >>> polynomial_from_roots(roots) # x^3 - 4 * x^2 - 17 * x + 60 [1, -4, -17, 60] """ # Use math.prod for Python 3.8+, prod = getattr(math, 'prod', lambda x: reduce(operator.mul, x, 1)) roots = list(map(operator.neg, roots)) return [ sum(map(prod, combinations(roots, k))) for k in range(len(roots) + 1) ] def iter_index(iterable, value, start=0): """Yield the index of each place in *iterable* that *value* occurs, beginning with index *start*. See :func:`locate` for a more general means of finding the indexes associated with particular values. >>> list(iter_index('AABCADEAF', 'A')) [0, 1, 4, 7] """ try: seq_index = iterable.index except AttributeError: # Slow path for general iterables it = islice(iterable, start, None) for i, element in enumerate(it, start): if element is value or element == value: yield i else: # Fast path for sequences i = start - 1 try: while True: i = seq_index(value, i + 1) yield i except ValueError: pass def sieve(n): """Yield the primes less than n. >>> list(sieve(30)) [2, 3, 5, 7, 11, 13, 17, 19, 23, 29] """ isqrt = getattr(math, 'isqrt', lambda x: int(math.sqrt(x))) data = bytearray((0, 1)) * (n // 2) data[:3] = 0, 0, 0 limit = isqrt(n) + 1 for p in compress(range(limit), data): data[p * p : n : p + p] = bytes(len(range(p * p, n, p + p))) data[2] = 1 return iter_index(data, 1) if n > 2 else iter([]) def batched(iterable, n): """Batch data into lists of length *n*. The last batch may be shorter. >>> list(batched('ABCDEFG', 3)) [['A', 'B', 'C'], ['D', 'E', 'F'], ['G']] This recipe is from the ``itertools`` docs. This library also provides :func:`chunked`, which has a different implementation. """ if hexversion >= 0x30C00A0: # Python 3.12.0a0 warnings.warn( ( 'batched will be removed in a future version of ' 'more-itertools. Use the standard library ' 'itertools.batched function instead' ), DeprecationWarning, ) it = iter(iterable) while True: batch = list(islice(it, n)) if not batch: break yield batch def transpose(it): """Swap the rows and columns of the input. >>> list(transpose([(1, 2, 3), (11, 22, 33)])) [(1, 11), (2, 22), (3, 33)] The caller should ensure that the dimensions of the input are compatible. """ # TODO: when 3.9 goes end-of-life, add stric=True to this. return zip(*it) def matmul(m1, m2): """Multiply two matrices. >>> list(matmul([(7, 5), (3, 5)], [(2, 5), (7, 9)])) [[49, 80], [41, 60]] The caller should ensure that the dimensions of the input matrices are compatible with each other. """ n = len(m2[0]) return batched(starmap(dotproduct, product(m1, transpose(m2))), n) def factor(n): """Yield the prime factors of n. >>> list(factor(360)) [2, 2, 2, 3, 3, 5] """ isqrt = getattr(math, 'isqrt', lambda x: int(math.sqrt(x))) for prime in sieve(isqrt(n) + 1): while True: quotient, remainder = divmod(n, prime) if remainder: break yield prime n = quotient if n == 1: return if n >= 2: yield n
25,416
Python
26.300752
79
0.572553
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pkg_resources/_vendor/more_itertools/__init__.py
"""More routines for operating on iterables, beyond itertools""" from .more import * # noqa from .recipes import * # noqa __version__ = '9.1.0'
148
Python
20.285711
64
0.668919
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pkg_resources/_vendor/more_itertools/more.py
import warnings from collections import Counter, defaultdict, deque, abc from collections.abc import Sequence from functools import partial, reduce, wraps from heapq import heapify, heapreplace, heappop from itertools import ( chain, compress, count, cycle, dropwhile, groupby, islice, repeat, starmap, takewhile, tee, zip_longest, ) from math import exp, factorial, floor, log from queue import Empty, Queue from random import random, randrange, uniform from operator import itemgetter, mul, sub, gt, lt, ge, le from sys import hexversion, maxsize from time import monotonic from .recipes import ( _marker, _zip_equal, UnequalIterablesError, consume, flatten, pairwise, powerset, take, unique_everseen, all_equal, ) __all__ = [ 'AbortThread', 'SequenceView', 'UnequalIterablesError', 'adjacent', 'all_unique', 'always_iterable', 'always_reversible', 'bucket', 'callback_iter', 'chunked', 'chunked_even', 'circular_shifts', 'collapse', 'combination_index', 'consecutive_groups', 'constrained_batches', 'consumer', 'count_cycle', 'countable', 'difference', 'distinct_combinations', 'distinct_permutations', 'distribute', 'divide', 'duplicates_everseen', 'duplicates_justseen', 'exactly_n', 'filter_except', 'first', 'gray_product', 'groupby_transform', 'ichunked', 'iequals', 'ilen', 'interleave', 'interleave_evenly', 'interleave_longest', 'intersperse', 'is_sorted', 'islice_extended', 'iterate', 'last', 'locate', 'longest_common_prefix', 'lstrip', 'make_decorator', 'map_except', 'map_if', 'map_reduce', 'mark_ends', 'minmax', 'nth_or_last', 'nth_permutation', 'nth_product', 'numeric_range', 'one', 'only', 'padded', 'partitions', 'peekable', 'permutation_index', 'product_index', 'raise_', 'repeat_each', 'repeat_last', 'replace', 'rlocate', 'rstrip', 'run_length', 'sample', 'seekable', 'set_partitions', 'side_effect', 'sliced', 'sort_together', 'split_after', 'split_at', 'split_before', 'split_into', 'split_when', 'spy', 'stagger', 'strip', 'strictly_n', 'substrings', 'substrings_indexes', 'time_limited', 'unique_in_window', 'unique_to_each', 'unzip', 'value_chain', 'windowed', 'windowed_complete', 'with_iter', 'zip_broadcast', 'zip_equal', 'zip_offset', ] def chunked(iterable, n, strict=False): """Break *iterable* into lists of length *n*: >>> list(chunked([1, 2, 3, 4, 5, 6], 3)) [[1, 2, 3], [4, 5, 6]] By the default, the last yielded list will have fewer than *n* elements if the length of *iterable* is not divisible by *n*: >>> list(chunked([1, 2, 3, 4, 5, 6, 7, 8], 3)) [[1, 2, 3], [4, 5, 6], [7, 8]] To use a fill-in value instead, see the :func:`grouper` recipe. If the length of *iterable* is not divisible by *n* and *strict* is ``True``, then ``ValueError`` will be raised before the last list is yielded. """ iterator = iter(partial(take, n, iter(iterable)), []) if strict: if n is None: raise ValueError('n must not be None when using strict mode.') def ret(): for chunk in iterator: if len(chunk) != n: raise ValueError('iterable is not divisible by n.') yield chunk return iter(ret()) else: return iterator def first(iterable, default=_marker): """Return the first item of *iterable*, or *default* if *iterable* is empty. >>> first([0, 1, 2, 3]) 0 >>> first([], 'some default') 'some default' If *default* is not provided and there are no items in the iterable, raise ``ValueError``. :func:`first` is useful when you have a generator of expensive-to-retrieve values and want any arbitrary one. It is marginally shorter than ``next(iter(iterable), default)``. """ try: return next(iter(iterable)) except StopIteration as e: if default is _marker: raise ValueError( 'first() was called on an empty iterable, and no ' 'default value was provided.' ) from e return default def last(iterable, default=_marker): """Return the last item of *iterable*, or *default* if *iterable* is empty. >>> last([0, 1, 2, 3]) 3 >>> last([], 'some default') 'some default' If *default* is not provided and there are no items in the iterable, raise ``ValueError``. """ try: if isinstance(iterable, Sequence): return iterable[-1] # Work around https://bugs.python.org/issue38525 elif hasattr(iterable, '__reversed__') and (hexversion != 0x030800F0): return next(reversed(iterable)) else: return deque(iterable, maxlen=1)[-1] except (IndexError, TypeError, StopIteration): if default is _marker: raise ValueError( 'last() was called on an empty iterable, and no default was ' 'provided.' ) return default def nth_or_last(iterable, n, default=_marker): """Return the nth or the last item of *iterable*, or *default* if *iterable* is empty. >>> nth_or_last([0, 1, 2, 3], 2) 2 >>> nth_or_last([0, 1], 2) 1 >>> nth_or_last([], 0, 'some default') 'some default' If *default* is not provided and there are no items in the iterable, raise ``ValueError``. """ return last(islice(iterable, n + 1), default=default) class peekable: """Wrap an iterator to allow lookahead and prepending elements. Call :meth:`peek` on the result to get the value that will be returned by :func:`next`. This won't advance the iterator: >>> p = peekable(['a', 'b']) >>> p.peek() 'a' >>> next(p) 'a' Pass :meth:`peek` a default value to return that instead of raising ``StopIteration`` when the iterator is exhausted. >>> p = peekable([]) >>> p.peek('hi') 'hi' peekables also offer a :meth:`prepend` method, which "inserts" items at the head of the iterable: >>> p = peekable([1, 2, 3]) >>> p.prepend(10, 11, 12) >>> next(p) 10 >>> p.peek() 11 >>> list(p) [11, 12, 1, 2, 3] peekables can be indexed. Index 0 is the item that will be returned by :func:`next`, index 1 is the item after that, and so on: The values up to the given index will be cached. >>> p = peekable(['a', 'b', 'c', 'd']) >>> p[0] 'a' >>> p[1] 'b' >>> next(p) 'a' Negative indexes are supported, but be aware that they will cache the remaining items in the source iterator, which may require significant storage. To check whether a peekable is exhausted, check its truth value: >>> p = peekable(['a', 'b']) >>> if p: # peekable has items ... list(p) ['a', 'b'] >>> if not p: # peekable is exhausted ... list(p) [] """ def __init__(self, iterable): self._it = iter(iterable) self._cache = deque() def __iter__(self): return self def __bool__(self): try: self.peek() except StopIteration: return False return True def peek(self, default=_marker): """Return the item that will be next returned from ``next()``. Return ``default`` if there are no items left. If ``default`` is not provided, raise ``StopIteration``. """ if not self._cache: try: self._cache.append(next(self._it)) except StopIteration: if default is _marker: raise return default return self._cache[0] def prepend(self, *items): """Stack up items to be the next ones returned from ``next()`` or ``self.peek()``. The items will be returned in first in, first out order:: >>> p = peekable([1, 2, 3]) >>> p.prepend(10, 11, 12) >>> next(p) 10 >>> list(p) [11, 12, 1, 2, 3] It is possible, by prepending items, to "resurrect" a peekable that previously raised ``StopIteration``. >>> p = peekable([]) >>> next(p) Traceback (most recent call last): ... StopIteration >>> p.prepend(1) >>> next(p) 1 >>> next(p) Traceback (most recent call last): ... StopIteration """ self._cache.extendleft(reversed(items)) def __next__(self): if self._cache: return self._cache.popleft() return next(self._it) def _get_slice(self, index): # Normalize the slice's arguments step = 1 if (index.step is None) else index.step if step > 0: start = 0 if (index.start is None) else index.start stop = maxsize if (index.stop is None) else index.stop elif step < 0: start = -1 if (index.start is None) else index.start stop = (-maxsize - 1) if (index.stop is None) else index.stop else: raise ValueError('slice step cannot be zero') # If either the start or stop index is negative, we'll need to cache # the rest of the iterable in order to slice from the right side. if (start < 0) or (stop < 0): self._cache.extend(self._it) # Otherwise we'll need to find the rightmost index and cache to that # point. else: n = min(max(start, stop) + 1, maxsize) cache_len = len(self._cache) if n >= cache_len: self._cache.extend(islice(self._it, n - cache_len)) return list(self._cache)[index] def __getitem__(self, index): if isinstance(index, slice): return self._get_slice(index) cache_len = len(self._cache) if index < 0: self._cache.extend(self._it) elif index >= cache_len: self._cache.extend(islice(self._it, index + 1 - cache_len)) return self._cache[index] def consumer(func): """Decorator that automatically advances a PEP-342-style "reverse iterator" to its first yield point so you don't have to call ``next()`` on it manually. >>> @consumer ... def tally(): ... i = 0 ... while True: ... print('Thing number %s is %s.' % (i, (yield))) ... i += 1 ... >>> t = tally() >>> t.send('red') Thing number 0 is red. >>> t.send('fish') Thing number 1 is fish. Without the decorator, you would have to call ``next(t)`` before ``t.send()`` could be used. """ @wraps(func) def wrapper(*args, **kwargs): gen = func(*args, **kwargs) next(gen) return gen return wrapper def ilen(iterable): """Return the number of items in *iterable*. >>> ilen(x for x in range(1000000) if x % 3 == 0) 333334 This consumes the iterable, so handle with care. """ # This approach was selected because benchmarks showed it's likely the # fastest of the known implementations at the time of writing. # See GitHub tracker: #236, #230. counter = count() deque(zip(iterable, counter), maxlen=0) return next(counter) def iterate(func, start): """Return ``start``, ``func(start)``, ``func(func(start))``, ... >>> from itertools import islice >>> list(islice(iterate(lambda x: 2*x, 1), 10)) [1, 2, 4, 8, 16, 32, 64, 128, 256, 512] """ while True: yield start start = func(start) def with_iter(context_manager): """Wrap an iterable in a ``with`` statement, so it closes once exhausted. For example, this will close the file when the iterator is exhausted:: upper_lines = (line.upper() for line in with_iter(open('foo'))) Any context manager which returns an iterable is a candidate for ``with_iter``. """ with context_manager as iterable: yield from iterable def one(iterable, too_short=None, too_long=None): """Return the first item from *iterable*, which is expected to contain only that item. Raise an exception if *iterable* is empty or has more than one item. :func:`one` is useful for ensuring that an iterable contains only one item. For example, it can be used to retrieve the result of a database query that is expected to return a single row. If *iterable* is empty, ``ValueError`` will be raised. You may specify a different exception with the *too_short* keyword: >>> it = [] >>> one(it) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ValueError: too many items in iterable (expected 1)' >>> too_short = IndexError('too few items') >>> one(it, too_short=too_short) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... IndexError: too few items Similarly, if *iterable* contains more than one item, ``ValueError`` will be raised. You may specify a different exception with the *too_long* keyword: >>> it = ['too', 'many'] >>> one(it) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ValueError: Expected exactly one item in iterable, but got 'too', 'many', and perhaps more. >>> too_long = RuntimeError >>> one(it, too_long=too_long) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... RuntimeError Note that :func:`one` attempts to advance *iterable* twice to ensure there is only one item. See :func:`spy` or :func:`peekable` to check iterable contents less destructively. """ it = iter(iterable) try: first_value = next(it) except StopIteration as e: raise ( too_short or ValueError('too few items in iterable (expected 1)') ) from e try: second_value = next(it) except StopIteration: pass else: msg = ( 'Expected exactly one item in iterable, but got {!r}, {!r}, ' 'and perhaps more.'.format(first_value, second_value) ) raise too_long or ValueError(msg) return first_value def raise_(exception, *args): raise exception(*args) def strictly_n(iterable, n, too_short=None, too_long=None): """Validate that *iterable* has exactly *n* items and return them if it does. If it has fewer than *n* items, call function *too_short* with those items. If it has more than *n* items, call function *too_long* with the first ``n + 1`` items. >>> iterable = ['a', 'b', 'c', 'd'] >>> n = 4 >>> list(strictly_n(iterable, n)) ['a', 'b', 'c', 'd'] By default, *too_short* and *too_long* are functions that raise ``ValueError``. >>> list(strictly_n('ab', 3)) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ValueError: too few items in iterable (got 2) >>> list(strictly_n('abc', 2)) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ValueError: too many items in iterable (got at least 3) You can instead supply functions that do something else. *too_short* will be called with the number of items in *iterable*. *too_long* will be called with `n + 1`. >>> def too_short(item_count): ... raise RuntimeError >>> it = strictly_n('abcd', 6, too_short=too_short) >>> list(it) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... RuntimeError >>> def too_long(item_count): ... print('The boss is going to hear about this') >>> it = strictly_n('abcdef', 4, too_long=too_long) >>> list(it) The boss is going to hear about this ['a', 'b', 'c', 'd'] """ if too_short is None: too_short = lambda item_count: raise_( ValueError, 'Too few items in iterable (got {})'.format(item_count), ) if too_long is None: too_long = lambda item_count: raise_( ValueError, 'Too many items in iterable (got at least {})'.format(item_count), ) it = iter(iterable) for i in range(n): try: item = next(it) except StopIteration: too_short(i) return else: yield item try: next(it) except StopIteration: pass else: too_long(n + 1) def distinct_permutations(iterable, r=None): """Yield successive distinct permutations of the elements in *iterable*. >>> sorted(distinct_permutations([1, 0, 1])) [(0, 1, 1), (1, 0, 1), (1, 1, 0)] Equivalent to ``set(permutations(iterable))``, except duplicates are not generated and thrown away. For larger input sequences this is much more efficient. Duplicate permutations arise when there are duplicated elements in the input iterable. The number of items returned is `n! / (x_1! * x_2! * ... * x_n!)`, where `n` is the total number of items input, and each `x_i` is the count of a distinct item in the input sequence. If *r* is given, only the *r*-length permutations are yielded. >>> sorted(distinct_permutations([1, 0, 1], r=2)) [(0, 1), (1, 0), (1, 1)] >>> sorted(distinct_permutations(range(3), r=2)) [(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)] """ # Algorithm: https://w.wiki/Qai def _full(A): while True: # Yield the permutation we have yield tuple(A) # Find the largest index i such that A[i] < A[i + 1] for i in range(size - 2, -1, -1): if A[i] < A[i + 1]: break # If no such index exists, this permutation is the last one else: return # Find the largest index j greater than j such that A[i] < A[j] for j in range(size - 1, i, -1): if A[i] < A[j]: break # Swap the value of A[i] with that of A[j], then reverse the # sequence from A[i + 1] to form the new permutation A[i], A[j] = A[j], A[i] A[i + 1 :] = A[: i - size : -1] # A[i + 1:][::-1] # Algorithm: modified from the above def _partial(A, r): # Split A into the first r items and the last r items head, tail = A[:r], A[r:] right_head_indexes = range(r - 1, -1, -1) left_tail_indexes = range(len(tail)) while True: # Yield the permutation we have yield tuple(head) # Starting from the right, find the first index of the head with # value smaller than the maximum value of the tail - call it i. pivot = tail[-1] for i in right_head_indexes: if head[i] < pivot: break pivot = head[i] else: return # Starting from the left, find the first value of the tail # with a value greater than head[i] and swap. for j in left_tail_indexes: if tail[j] > head[i]: head[i], tail[j] = tail[j], head[i] break # If we didn't find one, start from the right and find the first # index of the head with a value greater than head[i] and swap. else: for j in right_head_indexes: if head[j] > head[i]: head[i], head[j] = head[j], head[i] break # Reverse head[i + 1:] and swap it with tail[:r - (i + 1)] tail += head[: i - r : -1] # head[i + 1:][::-1] i += 1 head[i:], tail[:] = tail[: r - i], tail[r - i :] items = sorted(iterable) size = len(items) if r is None: r = size if 0 < r <= size: return _full(items) if (r == size) else _partial(items, r) return iter(() if r else ((),)) def intersperse(e, iterable, n=1): """Intersperse filler element *e* among the items in *iterable*, leaving *n* items between each filler element. >>> list(intersperse('!', [1, 2, 3, 4, 5])) [1, '!', 2, '!', 3, '!', 4, '!', 5] >>> list(intersperse(None, [1, 2, 3, 4, 5], n=2)) [1, 2, None, 3, 4, None, 5] """ if n == 0: raise ValueError('n must be > 0') elif n == 1: # interleave(repeat(e), iterable) -> e, x_0, e, x_1, e, x_2... # islice(..., 1, None) -> x_0, e, x_1, e, x_2... return islice(interleave(repeat(e), iterable), 1, None) else: # interleave(filler, chunks) -> [e], [x_0, x_1], [e], [x_2, x_3]... # islice(..., 1, None) -> [x_0, x_1], [e], [x_2, x_3]... # flatten(...) -> x_0, x_1, e, x_2, x_3... filler = repeat([e]) chunks = chunked(iterable, n) return flatten(islice(interleave(filler, chunks), 1, None)) def unique_to_each(*iterables): """Return the elements from each of the input iterables that aren't in the other input iterables. For example, suppose you have a set of packages, each with a set of dependencies:: {'pkg_1': {'A', 'B'}, 'pkg_2': {'B', 'C'}, 'pkg_3': {'B', 'D'}} If you remove one package, which dependencies can also be removed? If ``pkg_1`` is removed, then ``A`` is no longer necessary - it is not associated with ``pkg_2`` or ``pkg_3``. Similarly, ``C`` is only needed for ``pkg_2``, and ``D`` is only needed for ``pkg_3``:: >>> unique_to_each({'A', 'B'}, {'B', 'C'}, {'B', 'D'}) [['A'], ['C'], ['D']] If there are duplicates in one input iterable that aren't in the others they will be duplicated in the output. Input order is preserved:: >>> unique_to_each("mississippi", "missouri") [['p', 'p'], ['o', 'u', 'r']] It is assumed that the elements of each iterable are hashable. """ pool = [list(it) for it in iterables] counts = Counter(chain.from_iterable(map(set, pool))) uniques = {element for element in counts if counts[element] == 1} return [list(filter(uniques.__contains__, it)) for it in pool] def windowed(seq, n, fillvalue=None, step=1): """Return a sliding window of width *n* over the given iterable. >>> all_windows = windowed([1, 2, 3, 4, 5], 3) >>> list(all_windows) [(1, 2, 3), (2, 3, 4), (3, 4, 5)] When the window is larger than the iterable, *fillvalue* is used in place of missing values: >>> list(windowed([1, 2, 3], 4)) [(1, 2, 3, None)] Each window will advance in increments of *step*: >>> list(windowed([1, 2, 3, 4, 5, 6], 3, fillvalue='!', step=2)) [(1, 2, 3), (3, 4, 5), (5, 6, '!')] To slide into the iterable's items, use :func:`chain` to add filler items to the left: >>> iterable = [1, 2, 3, 4] >>> n = 3 >>> padding = [None] * (n - 1) >>> list(windowed(chain(padding, iterable), 3)) [(None, None, 1), (None, 1, 2), (1, 2, 3), (2, 3, 4)] """ if n < 0: raise ValueError('n must be >= 0') if n == 0: yield tuple() return if step < 1: raise ValueError('step must be >= 1') window = deque(maxlen=n) i = n for _ in map(window.append, seq): i -= 1 if not i: i = step yield tuple(window) size = len(window) if size == 0: return elif size < n: yield tuple(chain(window, repeat(fillvalue, n - size))) elif 0 < i < min(step, n): window += (fillvalue,) * i yield tuple(window) def substrings(iterable): """Yield all of the substrings of *iterable*. >>> [''.join(s) for s in substrings('more')] ['m', 'o', 'r', 'e', 'mo', 'or', 're', 'mor', 'ore', 'more'] Note that non-string iterables can also be subdivided. >>> list(substrings([0, 1, 2])) [(0,), (1,), (2,), (0, 1), (1, 2), (0, 1, 2)] """ # The length-1 substrings seq = [] for item in iter(iterable): seq.append(item) yield (item,) seq = tuple(seq) item_count = len(seq) # And the rest for n in range(2, item_count + 1): for i in range(item_count - n + 1): yield seq[i : i + n] def substrings_indexes(seq, reverse=False): """Yield all substrings and their positions in *seq* The items yielded will be a tuple of the form ``(substr, i, j)``, where ``substr == seq[i:j]``. This function only works for iterables that support slicing, such as ``str`` objects. >>> for item in substrings_indexes('more'): ... print(item) ('m', 0, 1) ('o', 1, 2) ('r', 2, 3) ('e', 3, 4) ('mo', 0, 2) ('or', 1, 3) ('re', 2, 4) ('mor', 0, 3) ('ore', 1, 4) ('more', 0, 4) Set *reverse* to ``True`` to yield the same items in the opposite order. """ r = range(1, len(seq) + 1) if reverse: r = reversed(r) return ( (seq[i : i + L], i, i + L) for L in r for i in range(len(seq) - L + 1) ) class bucket: """Wrap *iterable* and return an object that buckets it iterable into child iterables based on a *key* function. >>> iterable = ['a1', 'b1', 'c1', 'a2', 'b2', 'c2', 'b3'] >>> s = bucket(iterable, key=lambda x: x[0]) # Bucket by 1st character >>> sorted(list(s)) # Get the keys ['a', 'b', 'c'] >>> a_iterable = s['a'] >>> next(a_iterable) 'a1' >>> next(a_iterable) 'a2' >>> list(s['b']) ['b1', 'b2', 'b3'] The original iterable will be advanced and its items will be cached until they are used by the child iterables. This may require significant storage. By default, attempting to select a bucket to which no items belong will exhaust the iterable and cache all values. If you specify a *validator* function, selected buckets will instead be checked against it. >>> from itertools import count >>> it = count(1, 2) # Infinite sequence of odd numbers >>> key = lambda x: x % 10 # Bucket by last digit >>> validator = lambda x: x in {1, 3, 5, 7, 9} # Odd digits only >>> s = bucket(it, key=key, validator=validator) >>> 2 in s False >>> list(s[2]) [] """ def __init__(self, iterable, key, validator=None): self._it = iter(iterable) self._key = key self._cache = defaultdict(deque) self._validator = validator or (lambda x: True) def __contains__(self, value): if not self._validator(value): return False try: item = next(self[value]) except StopIteration: return False else: self._cache[value].appendleft(item) return True def _get_values(self, value): """ Helper to yield items from the parent iterator that match *value*. Items that don't match are stored in the local cache as they are encountered. """ while True: # If we've cached some items that match the target value, emit # the first one and evict it from the cache. if self._cache[value]: yield self._cache[value].popleft() # Otherwise we need to advance the parent iterator to search for # a matching item, caching the rest. else: while True: try: item = next(self._it) except StopIteration: return item_value = self._key(item) if item_value == value: yield item break elif self._validator(item_value): self._cache[item_value].append(item) def __iter__(self): for item in self._it: item_value = self._key(item) if self._validator(item_value): self._cache[item_value].append(item) yield from self._cache.keys() def __getitem__(self, value): if not self._validator(value): return iter(()) return self._get_values(value) def spy(iterable, n=1): """Return a 2-tuple with a list containing the first *n* elements of *iterable*, and an iterator with the same items as *iterable*. This allows you to "look ahead" at the items in the iterable without advancing it. There is one item in the list by default: >>> iterable = 'abcdefg' >>> head, iterable = spy(iterable) >>> head ['a'] >>> list(iterable) ['a', 'b', 'c', 'd', 'e', 'f', 'g'] You may use unpacking to retrieve items instead of lists: >>> (head,), iterable = spy('abcdefg') >>> head 'a' >>> (first, second), iterable = spy('abcdefg', 2) >>> first 'a' >>> second 'b' The number of items requested can be larger than the number of items in the iterable: >>> iterable = [1, 2, 3, 4, 5] >>> head, iterable = spy(iterable, 10) >>> head [1, 2, 3, 4, 5] >>> list(iterable) [1, 2, 3, 4, 5] """ it = iter(iterable) head = take(n, it) return head.copy(), chain(head, it) def interleave(*iterables): """Return a new iterable yielding from each iterable in turn, until the shortest is exhausted. >>> list(interleave([1, 2, 3], [4, 5], [6, 7, 8])) [1, 4, 6, 2, 5, 7] For a version that doesn't terminate after the shortest iterable is exhausted, see :func:`interleave_longest`. """ return chain.from_iterable(zip(*iterables)) def interleave_longest(*iterables): """Return a new iterable yielding from each iterable in turn, skipping any that are exhausted. >>> list(interleave_longest([1, 2, 3], [4, 5], [6, 7, 8])) [1, 4, 6, 2, 5, 7, 3, 8] This function produces the same output as :func:`roundrobin`, but may perform better for some inputs (in particular when the number of iterables is large). """ i = chain.from_iterable(zip_longest(*iterables, fillvalue=_marker)) return (x for x in i if x is not _marker) def interleave_evenly(iterables, lengths=None): """ Interleave multiple iterables so that their elements are evenly distributed throughout the output sequence. >>> iterables = [1, 2, 3, 4, 5], ['a', 'b'] >>> list(interleave_evenly(iterables)) [1, 2, 'a', 3, 4, 'b', 5] >>> iterables = [[1, 2, 3], [4, 5], [6, 7, 8]] >>> list(interleave_evenly(iterables)) [1, 6, 4, 2, 7, 3, 8, 5] This function requires iterables of known length. Iterables without ``__len__()`` can be used by manually specifying lengths with *lengths*: >>> from itertools import combinations, repeat >>> iterables = [combinations(range(4), 2), ['a', 'b', 'c']] >>> lengths = [4 * (4 - 1) // 2, 3] >>> list(interleave_evenly(iterables, lengths=lengths)) [(0, 1), (0, 2), 'a', (0, 3), (1, 2), 'b', (1, 3), (2, 3), 'c'] Based on Bresenham's algorithm. """ if lengths is None: try: lengths = [len(it) for it in iterables] except TypeError: raise ValueError( 'Iterable lengths could not be determined automatically. ' 'Specify them with the lengths keyword.' ) elif len(iterables) != len(lengths): raise ValueError('Mismatching number of iterables and lengths.') dims = len(lengths) # sort iterables by length, descending lengths_permute = sorted( range(dims), key=lambda i: lengths[i], reverse=True ) lengths_desc = [lengths[i] for i in lengths_permute] iters_desc = [iter(iterables[i]) for i in lengths_permute] # the longest iterable is the primary one (Bresenham: the longest # distance along an axis) delta_primary, deltas_secondary = lengths_desc[0], lengths_desc[1:] iter_primary, iters_secondary = iters_desc[0], iters_desc[1:] errors = [delta_primary // dims] * len(deltas_secondary) to_yield = sum(lengths) while to_yield: yield next(iter_primary) to_yield -= 1 # update errors for each secondary iterable errors = [e - delta for e, delta in zip(errors, deltas_secondary)] # those iterables for which the error is negative are yielded # ("diagonal step" in Bresenham) for i, e in enumerate(errors): if e < 0: yield next(iters_secondary[i]) to_yield -= 1 errors[i] += delta_primary def collapse(iterable, base_type=None, levels=None): """Flatten an iterable with multiple levels of nesting (e.g., a list of lists of tuples) into non-iterable types. >>> iterable = [(1, 2), ([3, 4], [[5], [6]])] >>> list(collapse(iterable)) [1, 2, 3, 4, 5, 6] Binary and text strings are not considered iterable and will not be collapsed. To avoid collapsing other types, specify *base_type*: >>> iterable = ['ab', ('cd', 'ef'), ['gh', 'ij']] >>> list(collapse(iterable, base_type=tuple)) ['ab', ('cd', 'ef'), 'gh', 'ij'] Specify *levels* to stop flattening after a certain level: >>> iterable = [('a', ['b']), ('c', ['d'])] >>> list(collapse(iterable)) # Fully flattened ['a', 'b', 'c', 'd'] >>> list(collapse(iterable, levels=1)) # Only one level flattened ['a', ['b'], 'c', ['d']] """ def walk(node, level): if ( ((levels is not None) and (level > levels)) or isinstance(node, (str, bytes)) or ((base_type is not None) and isinstance(node, base_type)) ): yield node return try: tree = iter(node) except TypeError: yield node return else: for child in tree: yield from walk(child, level + 1) yield from walk(iterable, 0) def side_effect(func, iterable, chunk_size=None, before=None, after=None): """Invoke *func* on each item in *iterable* (or on each *chunk_size* group of items) before yielding the item. `func` must be a function that takes a single argument. Its return value will be discarded. *before* and *after* are optional functions that take no arguments. They will be executed before iteration starts and after it ends, respectively. `side_effect` can be used for logging, updating progress bars, or anything that is not functionally "pure." Emitting a status message: >>> from more_itertools import consume >>> func = lambda item: print('Received {}'.format(item)) >>> consume(side_effect(func, range(2))) Received 0 Received 1 Operating on chunks of items: >>> pair_sums = [] >>> func = lambda chunk: pair_sums.append(sum(chunk)) >>> list(side_effect(func, [0, 1, 2, 3, 4, 5], 2)) [0, 1, 2, 3, 4, 5] >>> list(pair_sums) [1, 5, 9] Writing to a file-like object: >>> from io import StringIO >>> from more_itertools import consume >>> f = StringIO() >>> func = lambda x: print(x, file=f) >>> before = lambda: print(u'HEADER', file=f) >>> after = f.close >>> it = [u'a', u'b', u'c'] >>> consume(side_effect(func, it, before=before, after=after)) >>> f.closed True """ try: if before is not None: before() if chunk_size is None: for item in iterable: func(item) yield item else: for chunk in chunked(iterable, chunk_size): func(chunk) yield from chunk finally: if after is not None: after() def sliced(seq, n, strict=False): """Yield slices of length *n* from the sequence *seq*. >>> list(sliced((1, 2, 3, 4, 5, 6), 3)) [(1, 2, 3), (4, 5, 6)] By the default, the last yielded slice will have fewer than *n* elements if the length of *seq* is not divisible by *n*: >>> list(sliced((1, 2, 3, 4, 5, 6, 7, 8), 3)) [(1, 2, 3), (4, 5, 6), (7, 8)] If the length of *seq* is not divisible by *n* and *strict* is ``True``, then ``ValueError`` will be raised before the last slice is yielded. This function will only work for iterables that support slicing. For non-sliceable iterables, see :func:`chunked`. """ iterator = takewhile(len, (seq[i : i + n] for i in count(0, n))) if strict: def ret(): for _slice in iterator: if len(_slice) != n: raise ValueError("seq is not divisible by n.") yield _slice return iter(ret()) else: return iterator def split_at(iterable, pred, maxsplit=-1, keep_separator=False): """Yield lists of items from *iterable*, where each list is delimited by an item where callable *pred* returns ``True``. >>> list(split_at('abcdcba', lambda x: x == 'b')) [['a'], ['c', 'd', 'c'], ['a']] >>> list(split_at(range(10), lambda n: n % 2 == 1)) [[0], [2], [4], [6], [8], []] At most *maxsplit* splits are done. If *maxsplit* is not specified or -1, then there is no limit on the number of splits: >>> list(split_at(range(10), lambda n: n % 2 == 1, maxsplit=2)) [[0], [2], [4, 5, 6, 7, 8, 9]] By default, the delimiting items are not included in the output. To include them, set *keep_separator* to ``True``. >>> list(split_at('abcdcba', lambda x: x == 'b', keep_separator=True)) [['a'], ['b'], ['c', 'd', 'c'], ['b'], ['a']] """ if maxsplit == 0: yield list(iterable) return buf = [] it = iter(iterable) for item in it: if pred(item): yield buf if keep_separator: yield [item] if maxsplit == 1: yield list(it) return buf = [] maxsplit -= 1 else: buf.append(item) yield buf def split_before(iterable, pred, maxsplit=-1): """Yield lists of items from *iterable*, where each list ends just before an item for which callable *pred* returns ``True``: >>> list(split_before('OneTwo', lambda s: s.isupper())) [['O', 'n', 'e'], ['T', 'w', 'o']] >>> list(split_before(range(10), lambda n: n % 3 == 0)) [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]] At most *maxsplit* splits are done. If *maxsplit* is not specified or -1, then there is no limit on the number of splits: >>> list(split_before(range(10), lambda n: n % 3 == 0, maxsplit=2)) [[0, 1, 2], [3, 4, 5], [6, 7, 8, 9]] """ if maxsplit == 0: yield list(iterable) return buf = [] it = iter(iterable) for item in it: if pred(item) and buf: yield buf if maxsplit == 1: yield [item] + list(it) return buf = [] maxsplit -= 1 buf.append(item) if buf: yield buf def split_after(iterable, pred, maxsplit=-1): """Yield lists of items from *iterable*, where each list ends with an item where callable *pred* returns ``True``: >>> list(split_after('one1two2', lambda s: s.isdigit())) [['o', 'n', 'e', '1'], ['t', 'w', 'o', '2']] >>> list(split_after(range(10), lambda n: n % 3 == 0)) [[0], [1, 2, 3], [4, 5, 6], [7, 8, 9]] At most *maxsplit* splits are done. If *maxsplit* is not specified or -1, then there is no limit on the number of splits: >>> list(split_after(range(10), lambda n: n % 3 == 0, maxsplit=2)) [[0], [1, 2, 3], [4, 5, 6, 7, 8, 9]] """ if maxsplit == 0: yield list(iterable) return buf = [] it = iter(iterable) for item in it: buf.append(item) if pred(item) and buf: yield buf if maxsplit == 1: buf = list(it) if buf: yield buf return buf = [] maxsplit -= 1 if buf: yield buf def split_when(iterable, pred, maxsplit=-1): """Split *iterable* into pieces based on the output of *pred*. *pred* should be a function that takes successive pairs of items and returns ``True`` if the iterable should be split in between them. For example, to find runs of increasing numbers, split the iterable when element ``i`` is larger than element ``i + 1``: >>> list(split_when([1, 2, 3, 3, 2, 5, 2, 4, 2], lambda x, y: x > y)) [[1, 2, 3, 3], [2, 5], [2, 4], [2]] At most *maxsplit* splits are done. If *maxsplit* is not specified or -1, then there is no limit on the number of splits: >>> list(split_when([1, 2, 3, 3, 2, 5, 2, 4, 2], ... lambda x, y: x > y, maxsplit=2)) [[1, 2, 3, 3], [2, 5], [2, 4, 2]] """ if maxsplit == 0: yield list(iterable) return it = iter(iterable) try: cur_item = next(it) except StopIteration: return buf = [cur_item] for next_item in it: if pred(cur_item, next_item): yield buf if maxsplit == 1: yield [next_item] + list(it) return buf = [] maxsplit -= 1 buf.append(next_item) cur_item = next_item yield buf def split_into(iterable, sizes): """Yield a list of sequential items from *iterable* of length 'n' for each integer 'n' in *sizes*. >>> list(split_into([1,2,3,4,5,6], [1,2,3])) [[1], [2, 3], [4, 5, 6]] If the sum of *sizes* is smaller than the length of *iterable*, then the remaining items of *iterable* will not be returned. >>> list(split_into([1,2,3,4,5,6], [2,3])) [[1, 2], [3, 4, 5]] If the sum of *sizes* is larger than the length of *iterable*, fewer items will be returned in the iteration that overruns *iterable* and further lists will be empty: >>> list(split_into([1,2,3,4], [1,2,3,4])) [[1], [2, 3], [4], []] When a ``None`` object is encountered in *sizes*, the returned list will contain items up to the end of *iterable* the same way that itertools.slice does: >>> list(split_into([1,2,3,4,5,6,7,8,9,0], [2,3,None])) [[1, 2], [3, 4, 5], [6, 7, 8, 9, 0]] :func:`split_into` can be useful for grouping a series of items where the sizes of the groups are not uniform. An example would be where in a row from a table, multiple columns represent elements of the same feature (e.g. a point represented by x,y,z) but, the format is not the same for all columns. """ # convert the iterable argument into an iterator so its contents can # be consumed by islice in case it is a generator it = iter(iterable) for size in sizes: if size is None: yield list(it) return else: yield list(islice(it, size)) def padded(iterable, fillvalue=None, n=None, next_multiple=False): """Yield the elements from *iterable*, followed by *fillvalue*, such that at least *n* items are emitted. >>> list(padded([1, 2, 3], '?', 5)) [1, 2, 3, '?', '?'] If *next_multiple* is ``True``, *fillvalue* will be emitted until the number of items emitted is a multiple of *n*:: >>> list(padded([1, 2, 3, 4], n=3, next_multiple=True)) [1, 2, 3, 4, None, None] If *n* is ``None``, *fillvalue* will be emitted indefinitely. """ it = iter(iterable) if n is None: yield from chain(it, repeat(fillvalue)) elif n < 1: raise ValueError('n must be at least 1') else: item_count = 0 for item in it: yield item item_count += 1 remaining = (n - item_count) % n if next_multiple else n - item_count for _ in range(remaining): yield fillvalue def repeat_each(iterable, n=2): """Repeat each element in *iterable* *n* times. >>> list(repeat_each('ABC', 3)) ['A', 'A', 'A', 'B', 'B', 'B', 'C', 'C', 'C'] """ return chain.from_iterable(map(repeat, iterable, repeat(n))) def repeat_last(iterable, default=None): """After the *iterable* is exhausted, keep yielding its last element. >>> list(islice(repeat_last(range(3)), 5)) [0, 1, 2, 2, 2] If the iterable is empty, yield *default* forever:: >>> list(islice(repeat_last(range(0), 42), 5)) [42, 42, 42, 42, 42] """ item = _marker for item in iterable: yield item final = default if item is _marker else item yield from repeat(final) def distribute(n, iterable): """Distribute the items from *iterable* among *n* smaller iterables. >>> group_1, group_2 = distribute(2, [1, 2, 3, 4, 5, 6]) >>> list(group_1) [1, 3, 5] >>> list(group_2) [2, 4, 6] If the length of *iterable* is not evenly divisible by *n*, then the length of the returned iterables will not be identical: >>> children = distribute(3, [1, 2, 3, 4, 5, 6, 7]) >>> [list(c) for c in children] [[1, 4, 7], [2, 5], [3, 6]] If the length of *iterable* is smaller than *n*, then the last returned iterables will be empty: >>> children = distribute(5, [1, 2, 3]) >>> [list(c) for c in children] [[1], [2], [3], [], []] This function uses :func:`itertools.tee` and may require significant storage. If you need the order items in the smaller iterables to match the original iterable, see :func:`divide`. """ if n < 1: raise ValueError('n must be at least 1') children = tee(iterable, n) return [islice(it, index, None, n) for index, it in enumerate(children)] def stagger(iterable, offsets=(-1, 0, 1), longest=False, fillvalue=None): """Yield tuples whose elements are offset from *iterable*. The amount by which the `i`-th item in each tuple is offset is given by the `i`-th item in *offsets*. >>> list(stagger([0, 1, 2, 3])) [(None, 0, 1), (0, 1, 2), (1, 2, 3)] >>> list(stagger(range(8), offsets=(0, 2, 4))) [(0, 2, 4), (1, 3, 5), (2, 4, 6), (3, 5, 7)] By default, the sequence will end when the final element of a tuple is the last item in the iterable. To continue until the first element of a tuple is the last item in the iterable, set *longest* to ``True``:: >>> list(stagger([0, 1, 2, 3], longest=True)) [(None, 0, 1), (0, 1, 2), (1, 2, 3), (2, 3, None), (3, None, None)] By default, ``None`` will be used to replace offsets beyond the end of the sequence. Specify *fillvalue* to use some other value. """ children = tee(iterable, len(offsets)) return zip_offset( *children, offsets=offsets, longest=longest, fillvalue=fillvalue ) def zip_equal(*iterables): """``zip`` the input *iterables* together, but raise ``UnequalIterablesError`` if they aren't all the same length. >>> it_1 = range(3) >>> it_2 = iter('abc') >>> list(zip_equal(it_1, it_2)) [(0, 'a'), (1, 'b'), (2, 'c')] >>> it_1 = range(3) >>> it_2 = iter('abcd') >>> list(zip_equal(it_1, it_2)) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... more_itertools.more.UnequalIterablesError: Iterables have different lengths """ if hexversion >= 0x30A00A6: warnings.warn( ( 'zip_equal will be removed in a future version of ' 'more-itertools. Use the builtin zip function with ' 'strict=True instead.' ), DeprecationWarning, ) return _zip_equal(*iterables) def zip_offset(*iterables, offsets, longest=False, fillvalue=None): """``zip`` the input *iterables* together, but offset the `i`-th iterable by the `i`-th item in *offsets*. >>> list(zip_offset('0123', 'abcdef', offsets=(0, 1))) [('0', 'b'), ('1', 'c'), ('2', 'd'), ('3', 'e')] This can be used as a lightweight alternative to SciPy or pandas to analyze data sets in which some series have a lead or lag relationship. By default, the sequence will end when the shortest iterable is exhausted. To continue until the longest iterable is exhausted, set *longest* to ``True``. >>> list(zip_offset('0123', 'abcdef', offsets=(0, 1), longest=True)) [('0', 'b'), ('1', 'c'), ('2', 'd'), ('3', 'e'), (None, 'f')] By default, ``None`` will be used to replace offsets beyond the end of the sequence. Specify *fillvalue* to use some other value. """ if len(iterables) != len(offsets): raise ValueError("Number of iterables and offsets didn't match") staggered = [] for it, n in zip(iterables, offsets): if n < 0: staggered.append(chain(repeat(fillvalue, -n), it)) elif n > 0: staggered.append(islice(it, n, None)) else: staggered.append(it) if longest: return zip_longest(*staggered, fillvalue=fillvalue) return zip(*staggered) def sort_together(iterables, key_list=(0,), key=None, reverse=False): """Return the input iterables sorted together, with *key_list* as the priority for sorting. All iterables are trimmed to the length of the shortest one. This can be used like the sorting function in a spreadsheet. If each iterable represents a column of data, the key list determines which columns are used for sorting. By default, all iterables are sorted using the ``0``-th iterable:: >>> iterables = [(4, 3, 2, 1), ('a', 'b', 'c', 'd')] >>> sort_together(iterables) [(1, 2, 3, 4), ('d', 'c', 'b', 'a')] Set a different key list to sort according to another iterable. Specifying multiple keys dictates how ties are broken:: >>> iterables = [(3, 1, 2), (0, 1, 0), ('c', 'b', 'a')] >>> sort_together(iterables, key_list=(1, 2)) [(2, 3, 1), (0, 0, 1), ('a', 'c', 'b')] To sort by a function of the elements of the iterable, pass a *key* function. Its arguments are the elements of the iterables corresponding to the key list:: >>> names = ('a', 'b', 'c') >>> lengths = (1, 2, 3) >>> widths = (5, 2, 1) >>> def area(length, width): ... return length * width >>> sort_together([names, lengths, widths], key_list=(1, 2), key=area) [('c', 'b', 'a'), (3, 2, 1), (1, 2, 5)] Set *reverse* to ``True`` to sort in descending order. >>> sort_together([(1, 2, 3), ('c', 'b', 'a')], reverse=True) [(3, 2, 1), ('a', 'b', 'c')] """ if key is None: # if there is no key function, the key argument to sorted is an # itemgetter key_argument = itemgetter(*key_list) else: # if there is a key function, call it with the items at the offsets # specified by the key function as arguments key_list = list(key_list) if len(key_list) == 1: # if key_list contains a single item, pass the item at that offset # as the only argument to the key function key_offset = key_list[0] key_argument = lambda zipped_items: key(zipped_items[key_offset]) else: # if key_list contains multiple items, use itemgetter to return a # tuple of items, which we pass as *args to the key function get_key_items = itemgetter(*key_list) key_argument = lambda zipped_items: key( *get_key_items(zipped_items) ) return list( zip(*sorted(zip(*iterables), key=key_argument, reverse=reverse)) ) def unzip(iterable): """The inverse of :func:`zip`, this function disaggregates the elements of the zipped *iterable*. The ``i``-th iterable contains the ``i``-th element from each element of the zipped iterable. The first element is used to determine the length of the remaining elements. >>> iterable = [('a', 1), ('b', 2), ('c', 3), ('d', 4)] >>> letters, numbers = unzip(iterable) >>> list(letters) ['a', 'b', 'c', 'd'] >>> list(numbers) [1, 2, 3, 4] This is similar to using ``zip(*iterable)``, but it avoids reading *iterable* into memory. Note, however, that this function uses :func:`itertools.tee` and thus may require significant storage. """ head, iterable = spy(iter(iterable)) if not head: # empty iterable, e.g. zip([], [], []) return () # spy returns a one-length iterable as head head = head[0] iterables = tee(iterable, len(head)) def itemgetter(i): def getter(obj): try: return obj[i] except IndexError: # basically if we have an iterable like # iter([(1, 2, 3), (4, 5), (6,)]) # the second unzipped iterable would fail at the third tuple # since it would try to access tup[1] # same with the third unzipped iterable and the second tuple # to support these "improperly zipped" iterables, # we create a custom itemgetter # which just stops the unzipped iterables # at first length mismatch raise StopIteration return getter return tuple(map(itemgetter(i), it) for i, it in enumerate(iterables)) def divide(n, iterable): """Divide the elements from *iterable* into *n* parts, maintaining order. >>> group_1, group_2 = divide(2, [1, 2, 3, 4, 5, 6]) >>> list(group_1) [1, 2, 3] >>> list(group_2) [4, 5, 6] If the length of *iterable* is not evenly divisible by *n*, then the length of the returned iterables will not be identical: >>> children = divide(3, [1, 2, 3, 4, 5, 6, 7]) >>> [list(c) for c in children] [[1, 2, 3], [4, 5], [6, 7]] If the length of the iterable is smaller than n, then the last returned iterables will be empty: >>> children = divide(5, [1, 2, 3]) >>> [list(c) for c in children] [[1], [2], [3], [], []] This function will exhaust the iterable before returning and may require significant storage. If order is not important, see :func:`distribute`, which does not first pull the iterable into memory. """ if n < 1: raise ValueError('n must be at least 1') try: iterable[:0] except TypeError: seq = tuple(iterable) else: seq = iterable q, r = divmod(len(seq), n) ret = [] stop = 0 for i in range(1, n + 1): start = stop stop += q + 1 if i <= r else q ret.append(iter(seq[start:stop])) return ret def always_iterable(obj, base_type=(str, bytes)): """If *obj* is iterable, return an iterator over its items:: >>> obj = (1, 2, 3) >>> list(always_iterable(obj)) [1, 2, 3] If *obj* is not iterable, return a one-item iterable containing *obj*:: >>> obj = 1 >>> list(always_iterable(obj)) [1] If *obj* is ``None``, return an empty iterable: >>> obj = None >>> list(always_iterable(None)) [] By default, binary and text strings are not considered iterable:: >>> obj = 'foo' >>> list(always_iterable(obj)) ['foo'] If *base_type* is set, objects for which ``isinstance(obj, base_type)`` returns ``True`` won't be considered iterable. >>> obj = {'a': 1} >>> list(always_iterable(obj)) # Iterate over the dict's keys ['a'] >>> list(always_iterable(obj, base_type=dict)) # Treat dicts as a unit [{'a': 1}] Set *base_type* to ``None`` to avoid any special handling and treat objects Python considers iterable as iterable: >>> obj = 'foo' >>> list(always_iterable(obj, base_type=None)) ['f', 'o', 'o'] """ if obj is None: return iter(()) if (base_type is not None) and isinstance(obj, base_type): return iter((obj,)) try: return iter(obj) except TypeError: return iter((obj,)) def adjacent(predicate, iterable, distance=1): """Return an iterable over `(bool, item)` tuples where the `item` is drawn from *iterable* and the `bool` indicates whether that item satisfies the *predicate* or is adjacent to an item that does. For example, to find whether items are adjacent to a ``3``:: >>> list(adjacent(lambda x: x == 3, range(6))) [(False, 0), (False, 1), (True, 2), (True, 3), (True, 4), (False, 5)] Set *distance* to change what counts as adjacent. For example, to find whether items are two places away from a ``3``: >>> list(adjacent(lambda x: x == 3, range(6), distance=2)) [(False, 0), (True, 1), (True, 2), (True, 3), (True, 4), (True, 5)] This is useful for contextualizing the results of a search function. For example, a code comparison tool might want to identify lines that have changed, but also surrounding lines to give the viewer of the diff context. The predicate function will only be called once for each item in the iterable. See also :func:`groupby_transform`, which can be used with this function to group ranges of items with the same `bool` value. """ # Allow distance=0 mainly for testing that it reproduces results with map() if distance < 0: raise ValueError('distance must be at least 0') i1, i2 = tee(iterable) padding = [False] * distance selected = chain(padding, map(predicate, i1), padding) adjacent_to_selected = map(any, windowed(selected, 2 * distance + 1)) return zip(adjacent_to_selected, i2) def groupby_transform(iterable, keyfunc=None, valuefunc=None, reducefunc=None): """An extension of :func:`itertools.groupby` that can apply transformations to the grouped data. * *keyfunc* is a function computing a key value for each item in *iterable* * *valuefunc* is a function that transforms the individual items from *iterable* after grouping * *reducefunc* is a function that transforms each group of items >>> iterable = 'aAAbBBcCC' >>> keyfunc = lambda k: k.upper() >>> valuefunc = lambda v: v.lower() >>> reducefunc = lambda g: ''.join(g) >>> list(groupby_transform(iterable, keyfunc, valuefunc, reducefunc)) [('A', 'aaa'), ('B', 'bbb'), ('C', 'ccc')] Each optional argument defaults to an identity function if not specified. :func:`groupby_transform` is useful when grouping elements of an iterable using a separate iterable as the key. To do this, :func:`zip` the iterables and pass a *keyfunc* that extracts the first element and a *valuefunc* that extracts the second element:: >>> from operator import itemgetter >>> keys = [0, 0, 1, 1, 1, 2, 2, 2, 3] >>> values = 'abcdefghi' >>> iterable = zip(keys, values) >>> grouper = groupby_transform(iterable, itemgetter(0), itemgetter(1)) >>> [(k, ''.join(g)) for k, g in grouper] [(0, 'ab'), (1, 'cde'), (2, 'fgh'), (3, 'i')] Note that the order of items in the iterable is significant. Only adjacent items are grouped together, so if you don't want any duplicate groups, you should sort the iterable by the key function. """ ret = groupby(iterable, keyfunc) if valuefunc: ret = ((k, map(valuefunc, g)) for k, g in ret) if reducefunc: ret = ((k, reducefunc(g)) for k, g in ret) return ret class numeric_range(abc.Sequence, abc.Hashable): """An extension of the built-in ``range()`` function whose arguments can be any orderable numeric type. With only *stop* specified, *start* defaults to ``0`` and *step* defaults to ``1``. The output items will match the type of *stop*: >>> list(numeric_range(3.5)) [0.0, 1.0, 2.0, 3.0] With only *start* and *stop* specified, *step* defaults to ``1``. The output items will match the type of *start*: >>> from decimal import Decimal >>> start = Decimal('2.1') >>> stop = Decimal('5.1') >>> list(numeric_range(start, stop)) [Decimal('2.1'), Decimal('3.1'), Decimal('4.1')] With *start*, *stop*, and *step* specified the output items will match the type of ``start + step``: >>> from fractions import Fraction >>> start = Fraction(1, 2) # Start at 1/2 >>> stop = Fraction(5, 2) # End at 5/2 >>> step = Fraction(1, 2) # Count by 1/2 >>> list(numeric_range(start, stop, step)) [Fraction(1, 2), Fraction(1, 1), Fraction(3, 2), Fraction(2, 1)] If *step* is zero, ``ValueError`` is raised. Negative steps are supported: >>> list(numeric_range(3, -1, -1.0)) [3.0, 2.0, 1.0, 0.0] Be aware of the limitations of floating point numbers; the representation of the yielded numbers may be surprising. ``datetime.datetime`` objects can be used for *start* and *stop*, if *step* is a ``datetime.timedelta`` object: >>> import datetime >>> start = datetime.datetime(2019, 1, 1) >>> stop = datetime.datetime(2019, 1, 3) >>> step = datetime.timedelta(days=1) >>> items = iter(numeric_range(start, stop, step)) >>> next(items) datetime.datetime(2019, 1, 1, 0, 0) >>> next(items) datetime.datetime(2019, 1, 2, 0, 0) """ _EMPTY_HASH = hash(range(0, 0)) def __init__(self, *args): argc = len(args) if argc == 1: (self._stop,) = args self._start = type(self._stop)(0) self._step = type(self._stop - self._start)(1) elif argc == 2: self._start, self._stop = args self._step = type(self._stop - self._start)(1) elif argc == 3: self._start, self._stop, self._step = args elif argc == 0: raise TypeError( 'numeric_range expected at least ' '1 argument, got {}'.format(argc) ) else: raise TypeError( 'numeric_range expected at most ' '3 arguments, got {}'.format(argc) ) self._zero = type(self._step)(0) if self._step == self._zero: raise ValueError('numeric_range() arg 3 must not be zero') self._growing = self._step > self._zero self._init_len() def __bool__(self): if self._growing: return self._start < self._stop else: return self._start > self._stop def __contains__(self, elem): if self._growing: if self._start <= elem < self._stop: return (elem - self._start) % self._step == self._zero else: if self._start >= elem > self._stop: return (self._start - elem) % (-self._step) == self._zero return False def __eq__(self, other): if isinstance(other, numeric_range): empty_self = not bool(self) empty_other = not bool(other) if empty_self or empty_other: return empty_self and empty_other # True if both empty else: return ( self._start == other._start and self._step == other._step and self._get_by_index(-1) == other._get_by_index(-1) ) else: return False def __getitem__(self, key): if isinstance(key, int): return self._get_by_index(key) elif isinstance(key, slice): step = self._step if key.step is None else key.step * self._step if key.start is None or key.start <= -self._len: start = self._start elif key.start >= self._len: start = self._stop else: # -self._len < key.start < self._len start = self._get_by_index(key.start) if key.stop is None or key.stop >= self._len: stop = self._stop elif key.stop <= -self._len: stop = self._start else: # -self._len < key.stop < self._len stop = self._get_by_index(key.stop) return numeric_range(start, stop, step) else: raise TypeError( 'numeric range indices must be ' 'integers or slices, not {}'.format(type(key).__name__) ) def __hash__(self): if self: return hash((self._start, self._get_by_index(-1), self._step)) else: return self._EMPTY_HASH def __iter__(self): values = (self._start + (n * self._step) for n in count()) if self._growing: return takewhile(partial(gt, self._stop), values) else: return takewhile(partial(lt, self._stop), values) def __len__(self): return self._len def _init_len(self): if self._growing: start = self._start stop = self._stop step = self._step else: start = self._stop stop = self._start step = -self._step distance = stop - start if distance <= self._zero: self._len = 0 else: # distance > 0 and step > 0: regular euclidean division q, r = divmod(distance, step) self._len = int(q) + int(r != self._zero) def __reduce__(self): return numeric_range, (self._start, self._stop, self._step) def __repr__(self): if self._step == 1: return "numeric_range({}, {})".format( repr(self._start), repr(self._stop) ) else: return "numeric_range({}, {}, {})".format( repr(self._start), repr(self._stop), repr(self._step) ) def __reversed__(self): return iter( numeric_range( self._get_by_index(-1), self._start - self._step, -self._step ) ) def count(self, value): return int(value in self) def index(self, value): if self._growing: if self._start <= value < self._stop: q, r = divmod(value - self._start, self._step) if r == self._zero: return int(q) else: if self._start >= value > self._stop: q, r = divmod(self._start - value, -self._step) if r == self._zero: return int(q) raise ValueError("{} is not in numeric range".format(value)) def _get_by_index(self, i): if i < 0: i += self._len if i < 0 or i >= self._len: raise IndexError("numeric range object index out of range") return self._start + i * self._step def count_cycle(iterable, n=None): """Cycle through the items from *iterable* up to *n* times, yielding the number of completed cycles along with each item. If *n* is omitted the process repeats indefinitely. >>> list(count_cycle('AB', 3)) [(0, 'A'), (0, 'B'), (1, 'A'), (1, 'B'), (2, 'A'), (2, 'B')] """ iterable = tuple(iterable) if not iterable: return iter(()) counter = count() if n is None else range(n) return ((i, item) for i in counter for item in iterable) def mark_ends(iterable): """Yield 3-tuples of the form ``(is_first, is_last, item)``. >>> list(mark_ends('ABC')) [(True, False, 'A'), (False, False, 'B'), (False, True, 'C')] Use this when looping over an iterable to take special action on its first and/or last items: >>> iterable = ['Header', 100, 200, 'Footer'] >>> total = 0 >>> for is_first, is_last, item in mark_ends(iterable): ... if is_first: ... continue # Skip the header ... if is_last: ... continue # Skip the footer ... total += item >>> print(total) 300 """ it = iter(iterable) try: b = next(it) except StopIteration: return try: for i in count(): a = b b = next(it) yield i == 0, False, a except StopIteration: yield i == 0, True, a def locate(iterable, pred=bool, window_size=None): """Yield the index of each item in *iterable* for which *pred* returns ``True``. *pred* defaults to :func:`bool`, which will select truthy items: >>> list(locate([0, 1, 1, 0, 1, 0, 0])) [1, 2, 4] Set *pred* to a custom function to, e.g., find the indexes for a particular item. >>> list(locate(['a', 'b', 'c', 'b'], lambda x: x == 'b')) [1, 3] If *window_size* is given, then the *pred* function will be called with that many items. This enables searching for sub-sequences: >>> iterable = [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3] >>> pred = lambda *args: args == (1, 2, 3) >>> list(locate(iterable, pred=pred, window_size=3)) [1, 5, 9] Use with :func:`seekable` to find indexes and then retrieve the associated items: >>> from itertools import count >>> from more_itertools import seekable >>> source = (3 * n + 1 if (n % 2) else n // 2 for n in count()) >>> it = seekable(source) >>> pred = lambda x: x > 100 >>> indexes = locate(it, pred=pred) >>> i = next(indexes) >>> it.seek(i) >>> next(it) 106 """ if window_size is None: return compress(count(), map(pred, iterable)) if window_size < 1: raise ValueError('window size must be at least 1') it = windowed(iterable, window_size, fillvalue=_marker) return compress(count(), starmap(pred, it)) def longest_common_prefix(iterables): """Yield elements of the longest common prefix amongst given *iterables*. >>> ''.join(longest_common_prefix(['abcd', 'abc', 'abf'])) 'ab' """ return (c[0] for c in takewhile(all_equal, zip(*iterables))) def lstrip(iterable, pred): """Yield the items from *iterable*, but strip any from the beginning for which *pred* returns ``True``. For example, to remove a set of items from the start of an iterable: >>> iterable = (None, False, None, 1, 2, None, 3, False, None) >>> pred = lambda x: x in {None, False, ''} >>> list(lstrip(iterable, pred)) [1, 2, None, 3, False, None] This function is analogous to to :func:`str.lstrip`, and is essentially an wrapper for :func:`itertools.dropwhile`. """ return dropwhile(pred, iterable) def rstrip(iterable, pred): """Yield the items from *iterable*, but strip any from the end for which *pred* returns ``True``. For example, to remove a set of items from the end of an iterable: >>> iterable = (None, False, None, 1, 2, None, 3, False, None) >>> pred = lambda x: x in {None, False, ''} >>> list(rstrip(iterable, pred)) [None, False, None, 1, 2, None, 3] This function is analogous to :func:`str.rstrip`. """ cache = [] cache_append = cache.append cache_clear = cache.clear for x in iterable: if pred(x): cache_append(x) else: yield from cache cache_clear() yield x def strip(iterable, pred): """Yield the items from *iterable*, but strip any from the beginning and end for which *pred* returns ``True``. For example, to remove a set of items from both ends of an iterable: >>> iterable = (None, False, None, 1, 2, None, 3, False, None) >>> pred = lambda x: x in {None, False, ''} >>> list(strip(iterable, pred)) [1, 2, None, 3] This function is analogous to :func:`str.strip`. """ return rstrip(lstrip(iterable, pred), pred) class islice_extended: """An extension of :func:`itertools.islice` that supports negative values for *stop*, *start*, and *step*. >>> iterable = iter('abcdefgh') >>> list(islice_extended(iterable, -4, -1)) ['e', 'f', 'g'] Slices with negative values require some caching of *iterable*, but this function takes care to minimize the amount of memory required. For example, you can use a negative step with an infinite iterator: >>> from itertools import count >>> list(islice_extended(count(), 110, 99, -2)) [110, 108, 106, 104, 102, 100] You can also use slice notation directly: >>> iterable = map(str, count()) >>> it = islice_extended(iterable)[10:20:2] >>> list(it) ['10', '12', '14', '16', '18'] """ def __init__(self, iterable, *args): it = iter(iterable) if args: self._iterable = _islice_helper(it, slice(*args)) else: self._iterable = it def __iter__(self): return self def __next__(self): return next(self._iterable) def __getitem__(self, key): if isinstance(key, slice): return islice_extended(_islice_helper(self._iterable, key)) raise TypeError('islice_extended.__getitem__ argument must be a slice') def _islice_helper(it, s): start = s.start stop = s.stop if s.step == 0: raise ValueError('step argument must be a non-zero integer or None.') step = s.step or 1 if step > 0: start = 0 if (start is None) else start if start < 0: # Consume all but the last -start items cache = deque(enumerate(it, 1), maxlen=-start) len_iter = cache[-1][0] if cache else 0 # Adjust start to be positive i = max(len_iter + start, 0) # Adjust stop to be positive if stop is None: j = len_iter elif stop >= 0: j = min(stop, len_iter) else: j = max(len_iter + stop, 0) # Slice the cache n = j - i if n <= 0: return for index, item in islice(cache, 0, n, step): yield item elif (stop is not None) and (stop < 0): # Advance to the start position next(islice(it, start, start), None) # When stop is negative, we have to carry -stop items while # iterating cache = deque(islice(it, -stop), maxlen=-stop) for index, item in enumerate(it): cached_item = cache.popleft() if index % step == 0: yield cached_item cache.append(item) else: # When both start and stop are positive we have the normal case yield from islice(it, start, stop, step) else: start = -1 if (start is None) else start if (stop is not None) and (stop < 0): # Consume all but the last items n = -stop - 1 cache = deque(enumerate(it, 1), maxlen=n) len_iter = cache[-1][0] if cache else 0 # If start and stop are both negative they are comparable and # we can just slice. Otherwise we can adjust start to be negative # and then slice. if start < 0: i, j = start, stop else: i, j = min(start - len_iter, -1), None for index, item in list(cache)[i:j:step]: yield item else: # Advance to the stop position if stop is not None: m = stop + 1 next(islice(it, m, m), None) # stop is positive, so if start is negative they are not comparable # and we need the rest of the items. if start < 0: i = start n = None # stop is None and start is positive, so we just need items up to # the start index. elif stop is None: i = None n = start + 1 # Both stop and start are positive, so they are comparable. else: i = None n = start - stop if n <= 0: return cache = list(islice(it, n)) yield from cache[i::step] def always_reversible(iterable): """An extension of :func:`reversed` that supports all iterables, not just those which implement the ``Reversible`` or ``Sequence`` protocols. >>> print(*always_reversible(x for x in range(3))) 2 1 0 If the iterable is already reversible, this function returns the result of :func:`reversed()`. If the iterable is not reversible, this function will cache the remaining items in the iterable and yield them in reverse order, which may require significant storage. """ try: return reversed(iterable) except TypeError: return reversed(list(iterable)) def consecutive_groups(iterable, ordering=lambda x: x): """Yield groups of consecutive items using :func:`itertools.groupby`. The *ordering* function determines whether two items are adjacent by returning their position. By default, the ordering function is the identity function. This is suitable for finding runs of numbers: >>> iterable = [1, 10, 11, 12, 20, 30, 31, 32, 33, 40] >>> for group in consecutive_groups(iterable): ... print(list(group)) [1] [10, 11, 12] [20] [30, 31, 32, 33] [40] For finding runs of adjacent letters, try using the :meth:`index` method of a string of letters: >>> from string import ascii_lowercase >>> iterable = 'abcdfgilmnop' >>> ordering = ascii_lowercase.index >>> for group in consecutive_groups(iterable, ordering): ... print(list(group)) ['a', 'b', 'c', 'd'] ['f', 'g'] ['i'] ['l', 'm', 'n', 'o', 'p'] Each group of consecutive items is an iterator that shares it source with *iterable*. When an an output group is advanced, the previous group is no longer available unless its elements are copied (e.g., into a ``list``). >>> iterable = [1, 2, 11, 12, 21, 22] >>> saved_groups = [] >>> for group in consecutive_groups(iterable): ... saved_groups.append(list(group)) # Copy group elements >>> saved_groups [[1, 2], [11, 12], [21, 22]] """ for k, g in groupby( enumerate(iterable), key=lambda x: x[0] - ordering(x[1]) ): yield map(itemgetter(1), g) def difference(iterable, func=sub, *, initial=None): """This function is the inverse of :func:`itertools.accumulate`. By default it will compute the first difference of *iterable* using :func:`operator.sub`: >>> from itertools import accumulate >>> iterable = accumulate([0, 1, 2, 3, 4]) # produces 0, 1, 3, 6, 10 >>> list(difference(iterable)) [0, 1, 2, 3, 4] *func* defaults to :func:`operator.sub`, but other functions can be specified. They will be applied as follows:: A, B, C, D, ... --> A, func(B, A), func(C, B), func(D, C), ... For example, to do progressive division: >>> iterable = [1, 2, 6, 24, 120] >>> func = lambda x, y: x // y >>> list(difference(iterable, func)) [1, 2, 3, 4, 5] If the *initial* keyword is set, the first element will be skipped when computing successive differences. >>> it = [10, 11, 13, 16] # from accumulate([1, 2, 3], initial=10) >>> list(difference(it, initial=10)) [1, 2, 3] """ a, b = tee(iterable) try: first = [next(b)] except StopIteration: return iter([]) if initial is not None: first = [] return chain(first, map(func, b, a)) class SequenceView(Sequence): """Return a read-only view of the sequence object *target*. :class:`SequenceView` objects are analogous to Python's built-in "dictionary view" types. They provide a dynamic view of a sequence's items, meaning that when the sequence updates, so does the view. >>> seq = ['0', '1', '2'] >>> view = SequenceView(seq) >>> view SequenceView(['0', '1', '2']) >>> seq.append('3') >>> view SequenceView(['0', '1', '2', '3']) Sequence views support indexing, slicing, and length queries. They act like the underlying sequence, except they don't allow assignment: >>> view[1] '1' >>> view[1:-1] ['1', '2'] >>> len(view) 4 Sequence views are useful as an alternative to copying, as they don't require (much) extra storage. """ def __init__(self, target): if not isinstance(target, Sequence): raise TypeError self._target = target def __getitem__(self, index): return self._target[index] def __len__(self): return len(self._target) def __repr__(self): return '{}({})'.format(self.__class__.__name__, repr(self._target)) class seekable: """Wrap an iterator to allow for seeking backward and forward. This progressively caches the items in the source iterable so they can be re-visited. Call :meth:`seek` with an index to seek to that position in the source iterable. To "reset" an iterator, seek to ``0``: >>> from itertools import count >>> it = seekable((str(n) for n in count())) >>> next(it), next(it), next(it) ('0', '1', '2') >>> it.seek(0) >>> next(it), next(it), next(it) ('0', '1', '2') >>> next(it) '3' You can also seek forward: >>> it = seekable((str(n) for n in range(20))) >>> it.seek(10) >>> next(it) '10' >>> it.seek(20) # Seeking past the end of the source isn't a problem >>> list(it) [] >>> it.seek(0) # Resetting works even after hitting the end >>> next(it), next(it), next(it) ('0', '1', '2') Call :meth:`peek` to look ahead one item without advancing the iterator: >>> it = seekable('1234') >>> it.peek() '1' >>> list(it) ['1', '2', '3', '4'] >>> it.peek(default='empty') 'empty' Before the iterator is at its end, calling :func:`bool` on it will return ``True``. After it will return ``False``: >>> it = seekable('5678') >>> bool(it) True >>> list(it) ['5', '6', '7', '8'] >>> bool(it) False You may view the contents of the cache with the :meth:`elements` method. That returns a :class:`SequenceView`, a view that updates automatically: >>> it = seekable((str(n) for n in range(10))) >>> next(it), next(it), next(it) ('0', '1', '2') >>> elements = it.elements() >>> elements SequenceView(['0', '1', '2']) >>> next(it) '3' >>> elements SequenceView(['0', '1', '2', '3']) By default, the cache grows as the source iterable progresses, so beware of wrapping very large or infinite iterables. Supply *maxlen* to limit the size of the cache (this of course limits how far back you can seek). >>> from itertools import count >>> it = seekable((str(n) for n in count()), maxlen=2) >>> next(it), next(it), next(it), next(it) ('0', '1', '2', '3') >>> list(it.elements()) ['2', '3'] >>> it.seek(0) >>> next(it), next(it), next(it), next(it) ('2', '3', '4', '5') >>> next(it) '6' """ def __init__(self, iterable, maxlen=None): self._source = iter(iterable) if maxlen is None: self._cache = [] else: self._cache = deque([], maxlen) self._index = None def __iter__(self): return self def __next__(self): if self._index is not None: try: item = self._cache[self._index] except IndexError: self._index = None else: self._index += 1 return item item = next(self._source) self._cache.append(item) return item def __bool__(self): try: self.peek() except StopIteration: return False return True def peek(self, default=_marker): try: peeked = next(self) except StopIteration: if default is _marker: raise return default if self._index is None: self._index = len(self._cache) self._index -= 1 return peeked def elements(self): return SequenceView(self._cache) def seek(self, index): self._index = index remainder = index - len(self._cache) if remainder > 0: consume(self, remainder) class run_length: """ :func:`run_length.encode` compresses an iterable with run-length encoding. It yields groups of repeated items with the count of how many times they were repeated: >>> uncompressed = 'abbcccdddd' >>> list(run_length.encode(uncompressed)) [('a', 1), ('b', 2), ('c', 3), ('d', 4)] :func:`run_length.decode` decompresses an iterable that was previously compressed with run-length encoding. It yields the items of the decompressed iterable: >>> compressed = [('a', 1), ('b', 2), ('c', 3), ('d', 4)] >>> list(run_length.decode(compressed)) ['a', 'b', 'b', 'c', 'c', 'c', 'd', 'd', 'd', 'd'] """ @staticmethod def encode(iterable): return ((k, ilen(g)) for k, g in groupby(iterable)) @staticmethod def decode(iterable): return chain.from_iterable(repeat(k, n) for k, n in iterable) def exactly_n(iterable, n, predicate=bool): """Return ``True`` if exactly ``n`` items in the iterable are ``True`` according to the *predicate* function. >>> exactly_n([True, True, False], 2) True >>> exactly_n([True, True, False], 1) False >>> exactly_n([0, 1, 2, 3, 4, 5], 3, lambda x: x < 3) True The iterable will be advanced until ``n + 1`` truthy items are encountered, so avoid calling it on infinite iterables. """ return len(take(n + 1, filter(predicate, iterable))) == n def circular_shifts(iterable): """Return a list of circular shifts of *iterable*. >>> circular_shifts(range(4)) [(0, 1, 2, 3), (1, 2, 3, 0), (2, 3, 0, 1), (3, 0, 1, 2)] """ lst = list(iterable) return take(len(lst), windowed(cycle(lst), len(lst))) def make_decorator(wrapping_func, result_index=0): """Return a decorator version of *wrapping_func*, which is a function that modifies an iterable. *result_index* is the position in that function's signature where the iterable goes. This lets you use itertools on the "production end," i.e. at function definition. This can augment what the function returns without changing the function's code. For example, to produce a decorator version of :func:`chunked`: >>> from more_itertools import chunked >>> chunker = make_decorator(chunked, result_index=0) >>> @chunker(3) ... def iter_range(n): ... return iter(range(n)) ... >>> list(iter_range(9)) [[0, 1, 2], [3, 4, 5], [6, 7, 8]] To only allow truthy items to be returned: >>> truth_serum = make_decorator(filter, result_index=1) >>> @truth_serum(bool) ... def boolean_test(): ... return [0, 1, '', ' ', False, True] ... >>> list(boolean_test()) [1, ' ', True] The :func:`peekable` and :func:`seekable` wrappers make for practical decorators: >>> from more_itertools import peekable >>> peekable_function = make_decorator(peekable) >>> @peekable_function() ... def str_range(*args): ... return (str(x) for x in range(*args)) ... >>> it = str_range(1, 20, 2) >>> next(it), next(it), next(it) ('1', '3', '5') >>> it.peek() '7' >>> next(it) '7' """ # See https://sites.google.com/site/bbayles/index/decorator_factory for # notes on how this works. def decorator(*wrapping_args, **wrapping_kwargs): def outer_wrapper(f): def inner_wrapper(*args, **kwargs): result = f(*args, **kwargs) wrapping_args_ = list(wrapping_args) wrapping_args_.insert(result_index, result) return wrapping_func(*wrapping_args_, **wrapping_kwargs) return inner_wrapper return outer_wrapper return decorator def map_reduce(iterable, keyfunc, valuefunc=None, reducefunc=None): """Return a dictionary that maps the items in *iterable* to categories defined by *keyfunc*, transforms them with *valuefunc*, and then summarizes them by category with *reducefunc*. *valuefunc* defaults to the identity function if it is unspecified. If *reducefunc* is unspecified, no summarization takes place: >>> keyfunc = lambda x: x.upper() >>> result = map_reduce('abbccc', keyfunc) >>> sorted(result.items()) [('A', ['a']), ('B', ['b', 'b']), ('C', ['c', 'c', 'c'])] Specifying *valuefunc* transforms the categorized items: >>> keyfunc = lambda x: x.upper() >>> valuefunc = lambda x: 1 >>> result = map_reduce('abbccc', keyfunc, valuefunc) >>> sorted(result.items()) [('A', [1]), ('B', [1, 1]), ('C', [1, 1, 1])] Specifying *reducefunc* summarizes the categorized items: >>> keyfunc = lambda x: x.upper() >>> valuefunc = lambda x: 1 >>> reducefunc = sum >>> result = map_reduce('abbccc', keyfunc, valuefunc, reducefunc) >>> sorted(result.items()) [('A', 1), ('B', 2), ('C', 3)] You may want to filter the input iterable before applying the map/reduce procedure: >>> all_items = range(30) >>> items = [x for x in all_items if 10 <= x <= 20] # Filter >>> keyfunc = lambda x: x % 2 # Evens map to 0; odds to 1 >>> categories = map_reduce(items, keyfunc=keyfunc) >>> sorted(categories.items()) [(0, [10, 12, 14, 16, 18, 20]), (1, [11, 13, 15, 17, 19])] >>> summaries = map_reduce(items, keyfunc=keyfunc, reducefunc=sum) >>> sorted(summaries.items()) [(0, 90), (1, 75)] Note that all items in the iterable are gathered into a list before the summarization step, which may require significant storage. The returned object is a :obj:`collections.defaultdict` with the ``default_factory`` set to ``None``, such that it behaves like a normal dictionary. """ valuefunc = (lambda x: x) if (valuefunc is None) else valuefunc ret = defaultdict(list) for item in iterable: key = keyfunc(item) value = valuefunc(item) ret[key].append(value) if reducefunc is not None: for key, value_list in ret.items(): ret[key] = reducefunc(value_list) ret.default_factory = None return ret def rlocate(iterable, pred=bool, window_size=None): """Yield the index of each item in *iterable* for which *pred* returns ``True``, starting from the right and moving left. *pred* defaults to :func:`bool`, which will select truthy items: >>> list(rlocate([0, 1, 1, 0, 1, 0, 0])) # Truthy at 1, 2, and 4 [4, 2, 1] Set *pred* to a custom function to, e.g., find the indexes for a particular item: >>> iterable = iter('abcb') >>> pred = lambda x: x == 'b' >>> list(rlocate(iterable, pred)) [3, 1] If *window_size* is given, then the *pred* function will be called with that many items. This enables searching for sub-sequences: >>> iterable = [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3] >>> pred = lambda *args: args == (1, 2, 3) >>> list(rlocate(iterable, pred=pred, window_size=3)) [9, 5, 1] Beware, this function won't return anything for infinite iterables. If *iterable* is reversible, ``rlocate`` will reverse it and search from the right. Otherwise, it will search from the left and return the results in reverse order. See :func:`locate` to for other example applications. """ if window_size is None: try: len_iter = len(iterable) return (len_iter - i - 1 for i in locate(reversed(iterable), pred)) except TypeError: pass return reversed(list(locate(iterable, pred, window_size))) def replace(iterable, pred, substitutes, count=None, window_size=1): """Yield the items from *iterable*, replacing the items for which *pred* returns ``True`` with the items from the iterable *substitutes*. >>> iterable = [1, 1, 0, 1, 1, 0, 1, 1] >>> pred = lambda x: x == 0 >>> substitutes = (2, 3) >>> list(replace(iterable, pred, substitutes)) [1, 1, 2, 3, 1, 1, 2, 3, 1, 1] If *count* is given, the number of replacements will be limited: >>> iterable = [1, 1, 0, 1, 1, 0, 1, 1, 0] >>> pred = lambda x: x == 0 >>> substitutes = [None] >>> list(replace(iterable, pred, substitutes, count=2)) [1, 1, None, 1, 1, None, 1, 1, 0] Use *window_size* to control the number of items passed as arguments to *pred*. This allows for locating and replacing subsequences. >>> iterable = [0, 1, 2, 5, 0, 1, 2, 5] >>> window_size = 3 >>> pred = lambda *args: args == (0, 1, 2) # 3 items passed to pred >>> substitutes = [3, 4] # Splice in these items >>> list(replace(iterable, pred, substitutes, window_size=window_size)) [3, 4, 5, 3, 4, 5] """ if window_size < 1: raise ValueError('window_size must be at least 1') # Save the substitutes iterable, since it's used more than once substitutes = tuple(substitutes) # Add padding such that the number of windows matches the length of the # iterable it = chain(iterable, [_marker] * (window_size - 1)) windows = windowed(it, window_size) n = 0 for w in windows: # If the current window matches our predicate (and we haven't hit # our maximum number of replacements), splice in the substitutes # and then consume the following windows that overlap with this one. # For example, if the iterable is (0, 1, 2, 3, 4...) # and the window size is 2, we have (0, 1), (1, 2), (2, 3)... # If the predicate matches on (0, 1), we need to zap (0, 1) and (1, 2) if pred(*w): if (count is None) or (n < count): n += 1 yield from substitutes consume(windows, window_size - 1) continue # If there was no match (or we've reached the replacement limit), # yield the first item from the window. if w and (w[0] is not _marker): yield w[0] def partitions(iterable): """Yield all possible order-preserving partitions of *iterable*. >>> iterable = 'abc' >>> for part in partitions(iterable): ... print([''.join(p) for p in part]) ['abc'] ['a', 'bc'] ['ab', 'c'] ['a', 'b', 'c'] This is unrelated to :func:`partition`. """ sequence = list(iterable) n = len(sequence) for i in powerset(range(1, n)): yield [sequence[i:j] for i, j in zip((0,) + i, i + (n,))] def set_partitions(iterable, k=None): """ Yield the set partitions of *iterable* into *k* parts. Set partitions are not order-preserving. >>> iterable = 'abc' >>> for part in set_partitions(iterable, 2): ... print([''.join(p) for p in part]) ['a', 'bc'] ['ab', 'c'] ['b', 'ac'] If *k* is not given, every set partition is generated. >>> iterable = 'abc' >>> for part in set_partitions(iterable): ... print([''.join(p) for p in part]) ['abc'] ['a', 'bc'] ['ab', 'c'] ['b', 'ac'] ['a', 'b', 'c'] """ L = list(iterable) n = len(L) if k is not None: if k < 1: raise ValueError( "Can't partition in a negative or zero number of groups" ) elif k > n: return def set_partitions_helper(L, k): n = len(L) if k == 1: yield [L] elif n == k: yield [[s] for s in L] else: e, *M = L for p in set_partitions_helper(M, k - 1): yield [[e], *p] for p in set_partitions_helper(M, k): for i in range(len(p)): yield p[:i] + [[e] + p[i]] + p[i + 1 :] if k is None: for k in range(1, n + 1): yield from set_partitions_helper(L, k) else: yield from set_partitions_helper(L, k) class time_limited: """ Yield items from *iterable* until *limit_seconds* have passed. If the time limit expires before all items have been yielded, the ``timed_out`` parameter will be set to ``True``. >>> from time import sleep >>> def generator(): ... yield 1 ... yield 2 ... sleep(0.2) ... yield 3 >>> iterable = time_limited(0.1, generator()) >>> list(iterable) [1, 2] >>> iterable.timed_out True Note that the time is checked before each item is yielded, and iteration stops if the time elapsed is greater than *limit_seconds*. If your time limit is 1 second, but it takes 2 seconds to generate the first item from the iterable, the function will run for 2 seconds and not yield anything. """ def __init__(self, limit_seconds, iterable): if limit_seconds < 0: raise ValueError('limit_seconds must be positive') self.limit_seconds = limit_seconds self._iterable = iter(iterable) self._start_time = monotonic() self.timed_out = False def __iter__(self): return self def __next__(self): item = next(self._iterable) if monotonic() - self._start_time > self.limit_seconds: self.timed_out = True raise StopIteration return item def only(iterable, default=None, too_long=None): """If *iterable* has only one item, return it. If it has zero items, return *default*. If it has more than one item, raise the exception given by *too_long*, which is ``ValueError`` by default. >>> only([], default='missing') 'missing' >>> only([1]) 1 >>> only([1, 2]) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ValueError: Expected exactly one item in iterable, but got 1, 2, and perhaps more.' >>> only([1, 2], too_long=TypeError) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... TypeError Note that :func:`only` attempts to advance *iterable* twice to ensure there is only one item. See :func:`spy` or :func:`peekable` to check iterable contents less destructively. """ it = iter(iterable) first_value = next(it, default) try: second_value = next(it) except StopIteration: pass else: msg = ( 'Expected exactly one item in iterable, but got {!r}, {!r}, ' 'and perhaps more.'.format(first_value, second_value) ) raise too_long or ValueError(msg) return first_value class _IChunk: def __init__(self, iterable, n): self._it = islice(iterable, n) self._cache = deque() def fill_cache(self): self._cache.extend(self._it) def __iter__(self): return self def __next__(self): try: return next(self._it) except StopIteration: if self._cache: return self._cache.popleft() else: raise def ichunked(iterable, n): """Break *iterable* into sub-iterables with *n* elements each. :func:`ichunked` is like :func:`chunked`, but it yields iterables instead of lists. If the sub-iterables are read in order, the elements of *iterable* won't be stored in memory. If they are read out of order, :func:`itertools.tee` is used to cache elements as necessary. >>> from itertools import count >>> all_chunks = ichunked(count(), 4) >>> c_1, c_2, c_3 = next(all_chunks), next(all_chunks), next(all_chunks) >>> list(c_2) # c_1's elements have been cached; c_3's haven't been [4, 5, 6, 7] >>> list(c_1) [0, 1, 2, 3] >>> list(c_3) [8, 9, 10, 11] """ source = peekable(iter(iterable)) ichunk_marker = object() while True: # Check to see whether we're at the end of the source iterable item = source.peek(ichunk_marker) if item is ichunk_marker: return chunk = _IChunk(source, n) yield chunk # Advance the source iterable and fill previous chunk's cache chunk.fill_cache() def iequals(*iterables): """Return ``True`` if all given *iterables* are equal to each other, which means that they contain the same elements in the same order. The function is useful for comparing iterables of different data types or iterables that do not support equality checks. >>> iequals("abc", ['a', 'b', 'c'], ('a', 'b', 'c'), iter("abc")) True >>> iequals("abc", "acb") False Not to be confused with :func:`all_equals`, which checks whether all elements of iterable are equal to each other. """ return all(map(all_equal, zip_longest(*iterables, fillvalue=object()))) def distinct_combinations(iterable, r): """Yield the distinct combinations of *r* items taken from *iterable*. >>> list(distinct_combinations([0, 0, 1], 2)) [(0, 0), (0, 1)] Equivalent to ``set(combinations(iterable))``, except duplicates are not generated and thrown away. For larger input sequences this is much more efficient. """ if r < 0: raise ValueError('r must be non-negative') elif r == 0: yield () return pool = tuple(iterable) generators = [unique_everseen(enumerate(pool), key=itemgetter(1))] current_combo = [None] * r level = 0 while generators: try: cur_idx, p = next(generators[-1]) except StopIteration: generators.pop() level -= 1 continue current_combo[level] = p if level + 1 == r: yield tuple(current_combo) else: generators.append( unique_everseen( enumerate(pool[cur_idx + 1 :], cur_idx + 1), key=itemgetter(1), ) ) level += 1 def filter_except(validator, iterable, *exceptions): """Yield the items from *iterable* for which the *validator* function does not raise one of the specified *exceptions*. *validator* is called for each item in *iterable*. It should be a function that accepts one argument and raises an exception if that item is not valid. >>> iterable = ['1', '2', 'three', '4', None] >>> list(filter_except(int, iterable, ValueError, TypeError)) ['1', '2', '4'] If an exception other than one given by *exceptions* is raised by *validator*, it is raised like normal. """ for item in iterable: try: validator(item) except exceptions: pass else: yield item def map_except(function, iterable, *exceptions): """Transform each item from *iterable* with *function* and yield the result, unless *function* raises one of the specified *exceptions*. *function* is called to transform each item in *iterable*. It should accept one argument. >>> iterable = ['1', '2', 'three', '4', None] >>> list(map_except(int, iterable, ValueError, TypeError)) [1, 2, 4] If an exception other than one given by *exceptions* is raised by *function*, it is raised like normal. """ for item in iterable: try: yield function(item) except exceptions: pass def map_if(iterable, pred, func, func_else=lambda x: x): """Evaluate each item from *iterable* using *pred*. If the result is equivalent to ``True``, transform the item with *func* and yield it. Otherwise, transform the item with *func_else* and yield it. *pred*, *func*, and *func_else* should each be functions that accept one argument. By default, *func_else* is the identity function. >>> from math import sqrt >>> iterable = list(range(-5, 5)) >>> iterable [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4] >>> list(map_if(iterable, lambda x: x > 3, lambda x: 'toobig')) [-5, -4, -3, -2, -1, 0, 1, 2, 3, 'toobig'] >>> list(map_if(iterable, lambda x: x >= 0, ... lambda x: f'{sqrt(x):.2f}', lambda x: None)) [None, None, None, None, None, '0.00', '1.00', '1.41', '1.73', '2.00'] """ for item in iterable: yield func(item) if pred(item) else func_else(item) def _sample_unweighted(iterable, k): # Implementation of "Algorithm L" from the 1994 paper by Kim-Hung Li: # "Reservoir-Sampling Algorithms of Time Complexity O(n(1+log(N/n)))". # Fill up the reservoir (collection of samples) with the first `k` samples reservoir = take(k, iterable) # Generate random number that's the largest in a sample of k U(0,1) numbers # Largest order statistic: https://en.wikipedia.org/wiki/Order_statistic W = exp(log(random()) / k) # The number of elements to skip before changing the reservoir is a random # number with a geometric distribution. Sample it using random() and logs. next_index = k + floor(log(random()) / log(1 - W)) for index, element in enumerate(iterable, k): if index == next_index: reservoir[randrange(k)] = element # The new W is the largest in a sample of k U(0, `old_W`) numbers W *= exp(log(random()) / k) next_index += floor(log(random()) / log(1 - W)) + 1 return reservoir def _sample_weighted(iterable, k, weights): # Implementation of "A-ExpJ" from the 2006 paper by Efraimidis et al. : # "Weighted random sampling with a reservoir". # Log-transform for numerical stability for weights that are small/large weight_keys = (log(random()) / weight for weight in weights) # Fill up the reservoir (collection of samples) with the first `k` # weight-keys and elements, then heapify the list. reservoir = take(k, zip(weight_keys, iterable)) heapify(reservoir) # The number of jumps before changing the reservoir is a random variable # with an exponential distribution. Sample it using random() and logs. smallest_weight_key, _ = reservoir[0] weights_to_skip = log(random()) / smallest_weight_key for weight, element in zip(weights, iterable): if weight >= weights_to_skip: # The notation here is consistent with the paper, but we store # the weight-keys in log-space for better numerical stability. smallest_weight_key, _ = reservoir[0] t_w = exp(weight * smallest_weight_key) r_2 = uniform(t_w, 1) # generate U(t_w, 1) weight_key = log(r_2) / weight heapreplace(reservoir, (weight_key, element)) smallest_weight_key, _ = reservoir[0] weights_to_skip = log(random()) / smallest_weight_key else: weights_to_skip -= weight # Equivalent to [element for weight_key, element in sorted(reservoir)] return [heappop(reservoir)[1] for _ in range(k)] def sample(iterable, k, weights=None): """Return a *k*-length list of elements chosen (without replacement) from the *iterable*. Like :func:`random.sample`, but works on iterables of unknown length. >>> iterable = range(100) >>> sample(iterable, 5) # doctest: +SKIP [81, 60, 96, 16, 4] An iterable with *weights* may also be given: >>> iterable = range(100) >>> weights = (i * i + 1 for i in range(100)) >>> sampled = sample(iterable, 5, weights=weights) # doctest: +SKIP [79, 67, 74, 66, 78] The algorithm can also be used to generate weighted random permutations. The relative weight of each item determines the probability that it appears late in the permutation. >>> data = "abcdefgh" >>> weights = range(1, len(data) + 1) >>> sample(data, k=len(data), weights=weights) # doctest: +SKIP ['c', 'a', 'b', 'e', 'g', 'd', 'h', 'f'] """ if k == 0: return [] iterable = iter(iterable) if weights is None: return _sample_unweighted(iterable, k) else: weights = iter(weights) return _sample_weighted(iterable, k, weights) def is_sorted(iterable, key=None, reverse=False, strict=False): """Returns ``True`` if the items of iterable are in sorted order, and ``False`` otherwise. *key* and *reverse* have the same meaning that they do in the built-in :func:`sorted` function. >>> is_sorted(['1', '2', '3', '4', '5'], key=int) True >>> is_sorted([5, 4, 3, 1, 2], reverse=True) False If *strict*, tests for strict sorting, that is, returns ``False`` if equal elements are found: >>> is_sorted([1, 2, 2]) True >>> is_sorted([1, 2, 2], strict=True) False The function returns ``False`` after encountering the first out-of-order item. If there are no out-of-order items, the iterable is exhausted. """ compare = (le if reverse else ge) if strict else (lt if reverse else gt) it = iterable if key is None else map(key, iterable) return not any(starmap(compare, pairwise(it))) class AbortThread(BaseException): pass class callback_iter: """Convert a function that uses callbacks to an iterator. Let *func* be a function that takes a `callback` keyword argument. For example: >>> def func(callback=None): ... for i, c in [(1, 'a'), (2, 'b'), (3, 'c')]: ... if callback: ... callback(i, c) ... return 4 Use ``with callback_iter(func)`` to get an iterator over the parameters that are delivered to the callback. >>> with callback_iter(func) as it: ... for args, kwargs in it: ... print(args) (1, 'a') (2, 'b') (3, 'c') The function will be called in a background thread. The ``done`` property indicates whether it has completed execution. >>> it.done True If it completes successfully, its return value will be available in the ``result`` property. >>> it.result 4 Notes: * If the function uses some keyword argument besides ``callback``, supply *callback_kwd*. * If it finished executing, but raised an exception, accessing the ``result`` property will raise the same exception. * If it hasn't finished executing, accessing the ``result`` property from within the ``with`` block will raise ``RuntimeError``. * If it hasn't finished executing, accessing the ``result`` property from outside the ``with`` block will raise a ``more_itertools.AbortThread`` exception. * Provide *wait_seconds* to adjust how frequently the it is polled for output. """ def __init__(self, func, callback_kwd='callback', wait_seconds=0.1): self._func = func self._callback_kwd = callback_kwd self._aborted = False self._future = None self._wait_seconds = wait_seconds # Lazily import concurrent.future self._executor = __import__( ).futures.__import__("concurrent.futures").futures.ThreadPoolExecutor(max_workers=1) self._iterator = self._reader() def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self._aborted = True self._executor.shutdown() def __iter__(self): return self def __next__(self): return next(self._iterator) @property def done(self): if self._future is None: return False return self._future.done() @property def result(self): if not self.done: raise RuntimeError('Function has not yet completed') return self._future.result() def _reader(self): q = Queue() def callback(*args, **kwargs): if self._aborted: raise AbortThread('canceled by user') q.put((args, kwargs)) self._future = self._executor.submit( self._func, **{self._callback_kwd: callback} ) while True: try: item = q.get(timeout=self._wait_seconds) except Empty: pass else: q.task_done() yield item if self._future.done(): break remaining = [] while True: try: item = q.get_nowait() except Empty: break else: q.task_done() remaining.append(item) q.join() yield from remaining def windowed_complete(iterable, n): """ Yield ``(beginning, middle, end)`` tuples, where: * Each ``middle`` has *n* items from *iterable* * Each ``beginning`` has the items before the ones in ``middle`` * Each ``end`` has the items after the ones in ``middle`` >>> iterable = range(7) >>> n = 3 >>> for beginning, middle, end in windowed_complete(iterable, n): ... print(beginning, middle, end) () (0, 1, 2) (3, 4, 5, 6) (0,) (1, 2, 3) (4, 5, 6) (0, 1) (2, 3, 4) (5, 6) (0, 1, 2) (3, 4, 5) (6,) (0, 1, 2, 3) (4, 5, 6) () Note that *n* must be at least 0 and most equal to the length of *iterable*. This function will exhaust the iterable and may require significant storage. """ if n < 0: raise ValueError('n must be >= 0') seq = tuple(iterable) size = len(seq) if n > size: raise ValueError('n must be <= len(seq)') for i in range(size - n + 1): beginning = seq[:i] middle = seq[i : i + n] end = seq[i + n :] yield beginning, middle, end def all_unique(iterable, key=None): """ Returns ``True`` if all the elements of *iterable* are unique (no two elements are equal). >>> all_unique('ABCB') False If a *key* function is specified, it will be used to make comparisons. >>> all_unique('ABCb') True >>> all_unique('ABCb', str.lower) False The function returns as soon as the first non-unique element is encountered. Iterables with a mix of hashable and unhashable items can be used, but the function will be slower for unhashable items. """ seenset = set() seenset_add = seenset.add seenlist = [] seenlist_add = seenlist.append for element in map(key, iterable) if key else iterable: try: if element in seenset: return False seenset_add(element) except TypeError: if element in seenlist: return False seenlist_add(element) return True def nth_product(index, *args): """Equivalent to ``list(product(*args))[index]``. The products of *args* can be ordered lexicographically. :func:`nth_product` computes the product at sort position *index* without computing the previous products. >>> nth_product(8, range(2), range(2), range(2), range(2)) (1, 0, 0, 0) ``IndexError`` will be raised if the given *index* is invalid. """ pools = list(map(tuple, reversed(args))) ns = list(map(len, pools)) c = reduce(mul, ns) if index < 0: index += c if not 0 <= index < c: raise IndexError result = [] for pool, n in zip(pools, ns): result.append(pool[index % n]) index //= n return tuple(reversed(result)) def nth_permutation(iterable, r, index): """Equivalent to ``list(permutations(iterable, r))[index]``` The subsequences of *iterable* that are of length *r* where order is important can be ordered lexicographically. :func:`nth_permutation` computes the subsequence at sort position *index* directly, without computing the previous subsequences. >>> nth_permutation('ghijk', 2, 5) ('h', 'i') ``ValueError`` will be raised If *r* is negative or greater than the length of *iterable*. ``IndexError`` will be raised if the given *index* is invalid. """ pool = list(iterable) n = len(pool) if r is None or r == n: r, c = n, factorial(n) elif not 0 <= r < n: raise ValueError else: c = factorial(n) // factorial(n - r) if index < 0: index += c if not 0 <= index < c: raise IndexError if c == 0: return tuple() result = [0] * r q = index * factorial(n) // c if r < n else index for d in range(1, n + 1): q, i = divmod(q, d) if 0 <= n - d < r: result[n - d] = i if q == 0: break return tuple(map(pool.pop, result)) def value_chain(*args): """Yield all arguments passed to the function in the same order in which they were passed. If an argument itself is iterable then iterate over its values. >>> list(value_chain(1, 2, 3, [4, 5, 6])) [1, 2, 3, 4, 5, 6] Binary and text strings are not considered iterable and are emitted as-is: >>> list(value_chain('12', '34', ['56', '78'])) ['12', '34', '56', '78'] Multiple levels of nesting are not flattened. """ for value in args: if isinstance(value, (str, bytes)): yield value continue try: yield from value except TypeError: yield value def product_index(element, *args): """Equivalent to ``list(product(*args)).index(element)`` The products of *args* can be ordered lexicographically. :func:`product_index` computes the first index of *element* without computing the previous products. >>> product_index([8, 2], range(10), range(5)) 42 ``ValueError`` will be raised if the given *element* isn't in the product of *args*. """ index = 0 for x, pool in zip_longest(element, args, fillvalue=_marker): if x is _marker or pool is _marker: raise ValueError('element is not a product of args') pool = tuple(pool) index = index * len(pool) + pool.index(x) return index def combination_index(element, iterable): """Equivalent to ``list(combinations(iterable, r)).index(element)`` The subsequences of *iterable* that are of length *r* can be ordered lexicographically. :func:`combination_index` computes the index of the first *element*, without computing the previous combinations. >>> combination_index('adf', 'abcdefg') 10 ``ValueError`` will be raised if the given *element* isn't one of the combinations of *iterable*. """ element = enumerate(element) k, y = next(element, (None, None)) if k is None: return 0 indexes = [] pool = enumerate(iterable) for n, x in pool: if x == y: indexes.append(n) tmp, y = next(element, (None, None)) if tmp is None: break else: k = tmp else: raise ValueError('element is not a combination of iterable') n, _ = last(pool, default=(n, None)) # Python versions below 3.8 don't have math.comb index = 1 for i, j in enumerate(reversed(indexes), start=1): j = n - j if i <= j: index += factorial(j) // (factorial(i) * factorial(j - i)) return factorial(n + 1) // (factorial(k + 1) * factorial(n - k)) - index def permutation_index(element, iterable): """Equivalent to ``list(permutations(iterable, r)).index(element)``` The subsequences of *iterable* that are of length *r* where order is important can be ordered lexicographically. :func:`permutation_index` computes the index of the first *element* directly, without computing the previous permutations. >>> permutation_index([1, 3, 2], range(5)) 19 ``ValueError`` will be raised if the given *element* isn't one of the permutations of *iterable*. """ index = 0 pool = list(iterable) for i, x in zip(range(len(pool), -1, -1), element): r = pool.index(x) index = index * i + r del pool[r] return index class countable: """Wrap *iterable* and keep a count of how many items have been consumed. The ``items_seen`` attribute starts at ``0`` and increments as the iterable is consumed: >>> iterable = map(str, range(10)) >>> it = countable(iterable) >>> it.items_seen 0 >>> next(it), next(it) ('0', '1') >>> list(it) ['2', '3', '4', '5', '6', '7', '8', '9'] >>> it.items_seen 10 """ def __init__(self, iterable): self._it = iter(iterable) self.items_seen = 0 def __iter__(self): return self def __next__(self): item = next(self._it) self.items_seen += 1 return item def chunked_even(iterable, n): """Break *iterable* into lists of approximately length *n*. Items are distributed such the lengths of the lists differ by at most 1 item. >>> iterable = [1, 2, 3, 4, 5, 6, 7] >>> n = 3 >>> list(chunked_even(iterable, n)) # List lengths: 3, 2, 2 [[1, 2, 3], [4, 5], [6, 7]] >>> list(chunked(iterable, n)) # List lengths: 3, 3, 1 [[1, 2, 3], [4, 5, 6], [7]] """ len_method = getattr(iterable, '__len__', None) if len_method is None: return _chunked_even_online(iterable, n) else: return _chunked_even_finite(iterable, len_method(), n) def _chunked_even_online(iterable, n): buffer = [] maxbuf = n + (n - 2) * (n - 1) for x in iterable: buffer.append(x) if len(buffer) == maxbuf: yield buffer[:n] buffer = buffer[n:] yield from _chunked_even_finite(buffer, len(buffer), n) def _chunked_even_finite(iterable, N, n): if N < 1: return # Lists are either size `full_size <= n` or `partial_size = full_size - 1` q, r = divmod(N, n) num_lists = q + (1 if r > 0 else 0) q, r = divmod(N, num_lists) full_size = q + (1 if r > 0 else 0) partial_size = full_size - 1 num_full = N - partial_size * num_lists num_partial = num_lists - num_full buffer = [] iterator = iter(iterable) # Yield num_full lists of full_size for x in iterator: buffer.append(x) if len(buffer) == full_size: yield buffer buffer = [] num_full -= 1 if num_full <= 0: break # Yield num_partial lists of partial_size for x in iterator: buffer.append(x) if len(buffer) == partial_size: yield buffer buffer = [] num_partial -= 1 def zip_broadcast(*objects, scalar_types=(str, bytes), strict=False): """A version of :func:`zip` that "broadcasts" any scalar (i.e., non-iterable) items into output tuples. >>> iterable_1 = [1, 2, 3] >>> iterable_2 = ['a', 'b', 'c'] >>> scalar = '_' >>> list(zip_broadcast(iterable_1, iterable_2, scalar)) [(1, 'a', '_'), (2, 'b', '_'), (3, 'c', '_')] The *scalar_types* keyword argument determines what types are considered scalar. It is set to ``(str, bytes)`` by default. Set it to ``None`` to treat strings and byte strings as iterable: >>> list(zip_broadcast('abc', 0, 'xyz', scalar_types=None)) [('a', 0, 'x'), ('b', 0, 'y'), ('c', 0, 'z')] If the *strict* keyword argument is ``True``, then ``UnequalIterablesError`` will be raised if any of the iterables have different lengths. """ def is_scalar(obj): if scalar_types and isinstance(obj, scalar_types): return True try: iter(obj) except TypeError: return True else: return False size = len(objects) if not size: return iterables, iterable_positions = [], [] scalars, scalar_positions = [], [] for i, obj in enumerate(objects): if is_scalar(obj): scalars.append(obj) scalar_positions.append(i) else: iterables.append(iter(obj)) iterable_positions.append(i) if len(scalars) == size: yield tuple(objects) return zipper = _zip_equal if strict else zip for item in zipper(*iterables): new_item = [None] * size for i, elem in zip(iterable_positions, item): new_item[i] = elem for i, elem in zip(scalar_positions, scalars): new_item[i] = elem yield tuple(new_item) def unique_in_window(iterable, n, key=None): """Yield the items from *iterable* that haven't been seen recently. *n* is the size of the lookback window. >>> iterable = [0, 1, 0, 2, 3, 0] >>> n = 3 >>> list(unique_in_window(iterable, n)) [0, 1, 2, 3, 0] The *key* function, if provided, will be used to determine uniqueness: >>> list(unique_in_window('abAcda', 3, key=lambda x: x.lower())) ['a', 'b', 'c', 'd', 'a'] The items in *iterable* must be hashable. """ if n <= 0: raise ValueError('n must be greater than 0') window = deque(maxlen=n) uniques = set() use_key = key is not None for item in iterable: k = key(item) if use_key else item if k in uniques: continue if len(uniques) == n: uniques.discard(window[0]) uniques.add(k) window.append(k) yield item def duplicates_everseen(iterable, key=None): """Yield duplicate elements after their first appearance. >>> list(duplicates_everseen('mississippi')) ['s', 'i', 's', 's', 'i', 'p', 'i'] >>> list(duplicates_everseen('AaaBbbCccAaa', str.lower)) ['a', 'a', 'b', 'b', 'c', 'c', 'A', 'a', 'a'] This function is analagous to :func:`unique_everseen` and is subject to the same performance considerations. """ seen_set = set() seen_list = [] use_key = key is not None for element in iterable: k = key(element) if use_key else element try: if k not in seen_set: seen_set.add(k) else: yield element except TypeError: if k not in seen_list: seen_list.append(k) else: yield element def duplicates_justseen(iterable, key=None): """Yields serially-duplicate elements after their first appearance. >>> list(duplicates_justseen('mississippi')) ['s', 's', 'p'] >>> list(duplicates_justseen('AaaBbbCccAaa', str.lower)) ['a', 'a', 'b', 'b', 'c', 'c', 'a', 'a'] This function is analagous to :func:`unique_justseen`. """ return flatten( map( lambda group_tuple: islice_extended(group_tuple[1])[1:], groupby(iterable, key), ) ) def minmax(iterable_or_value, *others, key=None, default=_marker): """Returns both the smallest and largest items in an iterable or the largest of two or more arguments. >>> minmax([3, 1, 5]) (1, 5) >>> minmax(4, 2, 6) (2, 6) If a *key* function is provided, it will be used to transform the input items for comparison. >>> minmax([5, 30], key=str) # '30' sorts before '5' (30, 5) If a *default* value is provided, it will be returned if there are no input items. >>> minmax([], default=(0, 0)) (0, 0) Otherwise ``ValueError`` is raised. This function is based on the `recipe <http://code.activestate.com/recipes/577916/>`__ by Raymond Hettinger and takes care to minimize the number of comparisons performed. """ iterable = (iterable_or_value, *others) if others else iterable_or_value it = iter(iterable) try: lo = hi = next(it) except StopIteration as e: if default is _marker: raise ValueError( '`minmax()` argument is an empty iterable. ' 'Provide a `default` value to suppress this error.' ) from e return default # Different branches depending on the presence of key. This saves a lot # of unimportant copies which would slow the "key=None" branch # significantly down. if key is None: for x, y in zip_longest(it, it, fillvalue=lo): if y < x: x, y = y, x if x < lo: lo = x if hi < y: hi = y else: lo_key = hi_key = key(lo) for x, y in zip_longest(it, it, fillvalue=lo): x_key, y_key = key(x), key(y) if y_key < x_key: x, y, x_key, y_key = y, x, y_key, x_key if x_key < lo_key: lo, lo_key = x, x_key if hi_key < y_key: hi, hi_key = y, y_key return lo, hi def constrained_batches( iterable, max_size, max_count=None, get_len=len, strict=True ): """Yield batches of items from *iterable* with a combined size limited by *max_size*. >>> iterable = [b'12345', b'123', b'12345678', b'1', b'1', b'12', b'1'] >>> list(constrained_batches(iterable, 10)) [(b'12345', b'123'), (b'12345678', b'1', b'1'), (b'12', b'1')] If a *max_count* is supplied, the number of items per batch is also limited: >>> iterable = [b'12345', b'123', b'12345678', b'1', b'1', b'12', b'1'] >>> list(constrained_batches(iterable, 10, max_count = 2)) [(b'12345', b'123'), (b'12345678', b'1'), (b'1', b'12'), (b'1',)] If a *get_len* function is supplied, use that instead of :func:`len` to determine item size. If *strict* is ``True``, raise ``ValueError`` if any single item is bigger than *max_size*. Otherwise, allow single items to exceed *max_size*. """ if max_size <= 0: raise ValueError('maximum size must be greater than zero') batch = [] batch_size = 0 batch_count = 0 for item in iterable: item_len = get_len(item) if strict and item_len > max_size: raise ValueError('item size exceeds maximum size') reached_count = batch_count == max_count reached_size = item_len + batch_size > max_size if batch_count and (reached_size or reached_count): yield tuple(batch) batch.clear() batch_size = 0 batch_count = 0 batch.append(item) batch_size += item_len batch_count += 1 if batch: yield tuple(batch) def gray_product(*iterables): """Like :func:`itertools.product`, but return tuples in an order such that only one element in the generated tuple changes from one iteration to the next. >>> list(gray_product('AB','CD')) [('A', 'C'), ('B', 'C'), ('B', 'D'), ('A', 'D')] This function consumes all of the input iterables before producing output. If any of the input iterables have fewer than two items, ``ValueError`` is raised. For information on the algorithm, see `this section <https://www-cs-faculty.stanford.edu/~knuth/fasc2a.ps.gz>`__ of Donald Knuth's *The Art of Computer Programming*. """ all_iterables = tuple(tuple(x) for x in iterables) iterable_count = len(all_iterables) for iterable in all_iterables: if len(iterable) < 2: raise ValueError("each iterable must have two or more items") # This is based on "Algorithm H" from section 7.2.1.1, page 20. # a holds the indexes of the source iterables for the n-tuple to be yielded # f is the array of "focus pointers" # o is the array of "directions" a = [0] * iterable_count f = list(range(iterable_count + 1)) o = [1] * iterable_count while True: yield tuple(all_iterables[i][a[i]] for i in range(iterable_count)) j = f[0] f[0] = 0 if j == iterable_count: break a[j] = a[j] + o[j] if a[j] == 0 or a[j] == len(all_iterables[j]) - 1: o[j] = -o[j] f[j] = f[j + 1] f[j + 1] = j + 1
134,976
Python
29.732468
92
0.554284
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pkg_resources/_vendor/platformdirs/android.py
from __future__ import annotations import os import re import sys from functools import lru_cache from typing import cast from .api import PlatformDirsABC class Android(PlatformDirsABC): """ Follows the guidance `from here <https://android.stackexchange.com/a/216132>`_. Makes use of the `appname <platformdirs.api.PlatformDirsABC.appname>` and `version <platformdirs.api.PlatformDirsABC.version>`. """ @property def user_data_dir(self) -> str: """:return: data directory tied to the user, e.g. ``/data/user/<userid>/<packagename>/files/<AppName>``""" return self._append_app_name_and_version(cast(str, _android_folder()), "files") @property def site_data_dir(self) -> str: """:return: data directory shared by users, same as `user_data_dir`""" return self.user_data_dir @property def user_config_dir(self) -> str: """ :return: config directory tied to the user, e.g. ``/data/user/<userid>/<packagename>/shared_prefs/<AppName>`` """ return self._append_app_name_and_version(cast(str, _android_folder()), "shared_prefs") @property def site_config_dir(self) -> str: """:return: config directory shared by the users, same as `user_config_dir`""" return self.user_config_dir @property def user_cache_dir(self) -> str: """:return: cache directory tied to the user, e.g. e.g. ``/data/user/<userid>/<packagename>/cache/<AppName>``""" return self._append_app_name_and_version(cast(str, _android_folder()), "cache") @property def user_state_dir(self) -> str: """:return: state directory tied to the user, same as `user_data_dir`""" return self.user_data_dir @property def user_log_dir(self) -> str: """ :return: log directory tied to the user, same as `user_cache_dir` if not opinionated else ``log`` in it, e.g. ``/data/user/<userid>/<packagename>/cache/<AppName>/log`` """ path = self.user_cache_dir if self.opinion: path = os.path.join(path, "log") return path @property def user_documents_dir(self) -> str: """ :return: documents directory tied to the user e.g. ``/storage/emulated/0/Documents`` """ return _android_documents_folder() @property def user_runtime_dir(self) -> str: """ :return: runtime directory tied to the user, same as `user_cache_dir` if not opinionated else ``tmp`` in it, e.g. ``/data/user/<userid>/<packagename>/cache/<AppName>/tmp`` """ path = self.user_cache_dir if self.opinion: path = os.path.join(path, "tmp") return path @lru_cache(maxsize=1) def _android_folder() -> str | None: """:return: base folder for the Android OS or None if cannot be found""" try: # First try to get path to android app via pyjnius from jnius import autoclass Context = autoclass("android.content.Context") # noqa: N806 result: str | None = Context.getFilesDir().getParentFile().getAbsolutePath() except Exception: # if fails find an android folder looking path on the sys.path pattern = re.compile(r"/data/(data|user/\d+)/(.+)/files") for path in sys.path: if pattern.match(path): result = path.split("/files")[0] break else: result = None return result @lru_cache(maxsize=1) def _android_documents_folder() -> str: """:return: documents folder for the Android OS""" # Get directories with pyjnius try: from jnius import autoclass Context = autoclass("android.content.Context") # noqa: N806 Environment = autoclass("android.os.Environment") # noqa: N806 documents_dir: str = Context.getExternalFilesDir(Environment.DIRECTORY_DOCUMENTS).getAbsolutePath() except Exception: documents_dir = "/storage/emulated/0/Documents" return documents_dir __all__ = [ "Android", ]
4,068
Python
32.628099
120
0.617748
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pkg_resources/_vendor/platformdirs/__init__.py
""" Utilities for determining application-specific dirs. See <https://github.com/platformdirs/platformdirs> for details and usage. """ from __future__ import annotations import os import sys from pathlib import Path if sys.version_info >= (3, 8): # pragma: no cover (py38+) from typing import Literal else: # pragma: no cover (py38+) from ..typing_extensions import Literal from .api import PlatformDirsABC from .version import __version__ from .version import __version_tuple__ as __version_info__ def _set_platform_dir_class() -> type[PlatformDirsABC]: if sys.platform == "win32": from .windows import Windows as Result elif sys.platform == "darwin": from .macos import MacOS as Result else: from .unix import Unix as Result if os.getenv("ANDROID_DATA") == "/data" and os.getenv("ANDROID_ROOT") == "/system": if os.getenv("SHELL") or os.getenv("PREFIX"): return Result from .android import _android_folder if _android_folder() is not None: from .android import Android return Android # return to avoid redefinition of result return Result PlatformDirs = _set_platform_dir_class() #: Currently active platform AppDirs = PlatformDirs #: Backwards compatibility with appdirs def user_data_dir( appname: str | None = None, appauthor: str | None | Literal[False] = None, version: str | None = None, roaming: bool = False, ) -> str: """ :param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`. :param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`. :param version: See `version <platformdirs.api.PlatformDirsABC.version>`. :param roaming: See `roaming <platformdirs.api.PlatformDirsABC.version>`. :returns: data directory tied to the user """ return PlatformDirs(appname=appname, appauthor=appauthor, version=version, roaming=roaming).user_data_dir def site_data_dir( appname: str | None = None, appauthor: str | None | Literal[False] = None, version: str | None = None, multipath: bool = False, ) -> str: """ :param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`. :param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`. :param version: See `version <platformdirs.api.PlatformDirsABC.version>`. :param multipath: See `roaming <platformdirs.api.PlatformDirsABC.multipath>`. :returns: data directory shared by users """ return PlatformDirs(appname=appname, appauthor=appauthor, version=version, multipath=multipath).site_data_dir def user_config_dir( appname: str | None = None, appauthor: str | None | Literal[False] = None, version: str | None = None, roaming: bool = False, ) -> str: """ :param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`. :param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`. :param version: See `version <platformdirs.api.PlatformDirsABC.version>`. :param roaming: See `roaming <platformdirs.api.PlatformDirsABC.version>`. :returns: config directory tied to the user """ return PlatformDirs(appname=appname, appauthor=appauthor, version=version, roaming=roaming).user_config_dir def site_config_dir( appname: str | None = None, appauthor: str | None | Literal[False] = None, version: str | None = None, multipath: bool = False, ) -> str: """ :param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`. :param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`. :param version: See `version <platformdirs.api.PlatformDirsABC.version>`. :param multipath: See `roaming <platformdirs.api.PlatformDirsABC.multipath>`. :returns: config directory shared by the users """ return PlatformDirs(appname=appname, appauthor=appauthor, version=version, multipath=multipath).site_config_dir def user_cache_dir( appname: str | None = None, appauthor: str | None | Literal[False] = None, version: str | None = None, opinion: bool = True, ) -> str: """ :param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`. :param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`. :param version: See `version <platformdirs.api.PlatformDirsABC.version>`. :param opinion: See `roaming <platformdirs.api.PlatformDirsABC.opinion>`. :returns: cache directory tied to the user """ return PlatformDirs(appname=appname, appauthor=appauthor, version=version, opinion=opinion).user_cache_dir def user_state_dir( appname: str | None = None, appauthor: str | None | Literal[False] = None, version: str | None = None, roaming: bool = False, ) -> str: """ :param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`. :param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`. :param version: See `version <platformdirs.api.PlatformDirsABC.version>`. :param roaming: See `roaming <platformdirs.api.PlatformDirsABC.version>`. :returns: state directory tied to the user """ return PlatformDirs(appname=appname, appauthor=appauthor, version=version, roaming=roaming).user_state_dir def user_log_dir( appname: str | None = None, appauthor: str | None | Literal[False] = None, version: str | None = None, opinion: bool = True, ) -> str: """ :param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`. :param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`. :param version: See `version <platformdirs.api.PlatformDirsABC.version>`. :param opinion: See `roaming <platformdirs.api.PlatformDirsABC.opinion>`. :returns: log directory tied to the user """ return PlatformDirs(appname=appname, appauthor=appauthor, version=version, opinion=opinion).user_log_dir def user_documents_dir() -> str: """ :returns: documents directory tied to the user """ return PlatformDirs().user_documents_dir def user_runtime_dir( appname: str | None = None, appauthor: str | None | Literal[False] = None, version: str | None = None, opinion: bool = True, ) -> str: """ :param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`. :param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`. :param version: See `version <platformdirs.api.PlatformDirsABC.version>`. :param opinion: See `opinion <platformdirs.api.PlatformDirsABC.opinion>`. :returns: runtime directory tied to the user """ return PlatformDirs(appname=appname, appauthor=appauthor, version=version, opinion=opinion).user_runtime_dir def user_data_path( appname: str | None = None, appauthor: str | None | Literal[False] = None, version: str | None = None, roaming: bool = False, ) -> Path: """ :param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`. :param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`. :param version: See `version <platformdirs.api.PlatformDirsABC.version>`. :param roaming: See `roaming <platformdirs.api.PlatformDirsABC.version>`. :returns: data path tied to the user """ return PlatformDirs(appname=appname, appauthor=appauthor, version=version, roaming=roaming).user_data_path def site_data_path( appname: str | None = None, appauthor: str | None | Literal[False] = None, version: str | None = None, multipath: bool = False, ) -> Path: """ :param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`. :param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`. :param version: See `version <platformdirs.api.PlatformDirsABC.version>`. :param multipath: See `multipath <platformdirs.api.PlatformDirsABC.multipath>`. :returns: data path shared by users """ return PlatformDirs(appname=appname, appauthor=appauthor, version=version, multipath=multipath).site_data_path def user_config_path( appname: str | None = None, appauthor: str | None | Literal[False] = None, version: str | None = None, roaming: bool = False, ) -> Path: """ :param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`. :param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`. :param version: See `version <platformdirs.api.PlatformDirsABC.version>`. :param roaming: See `roaming <platformdirs.api.PlatformDirsABC.version>`. :returns: config path tied to the user """ return PlatformDirs(appname=appname, appauthor=appauthor, version=version, roaming=roaming).user_config_path def site_config_path( appname: str | None = None, appauthor: str | None | Literal[False] = None, version: str | None = None, multipath: bool = False, ) -> Path: """ :param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`. :param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`. :param version: See `version <platformdirs.api.PlatformDirsABC.version>`. :param multipath: See `roaming <platformdirs.api.PlatformDirsABC.multipath>`. :returns: config path shared by the users """ return PlatformDirs(appname=appname, appauthor=appauthor, version=version, multipath=multipath).site_config_path def user_cache_path( appname: str | None = None, appauthor: str | None | Literal[False] = None, version: str | None = None, opinion: bool = True, ) -> Path: """ :param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`. :param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`. :param version: See `version <platformdirs.api.PlatformDirsABC.version>`. :param opinion: See `roaming <platformdirs.api.PlatformDirsABC.opinion>`. :returns: cache path tied to the user """ return PlatformDirs(appname=appname, appauthor=appauthor, version=version, opinion=opinion).user_cache_path def user_state_path( appname: str | None = None, appauthor: str | None | Literal[False] = None, version: str | None = None, roaming: bool = False, ) -> Path: """ :param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`. :param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`. :param version: See `version <platformdirs.api.PlatformDirsABC.version>`. :param roaming: See `roaming <platformdirs.api.PlatformDirsABC.version>`. :returns: state path tied to the user """ return PlatformDirs(appname=appname, appauthor=appauthor, version=version, roaming=roaming).user_state_path def user_log_path( appname: str | None = None, appauthor: str | None | Literal[False] = None, version: str | None = None, opinion: bool = True, ) -> Path: """ :param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`. :param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`. :param version: See `version <platformdirs.api.PlatformDirsABC.version>`. :param opinion: See `roaming <platformdirs.api.PlatformDirsABC.opinion>`. :returns: log path tied to the user """ return PlatformDirs(appname=appname, appauthor=appauthor, version=version, opinion=opinion).user_log_path def user_documents_path() -> Path: """ :returns: documents path tied to the user """ return PlatformDirs().user_documents_path def user_runtime_path( appname: str | None = None, appauthor: str | None | Literal[False] = None, version: str | None = None, opinion: bool = True, ) -> Path: """ :param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`. :param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`. :param version: See `version <platformdirs.api.PlatformDirsABC.version>`. :param opinion: See `opinion <platformdirs.api.PlatformDirsABC.opinion>`. :returns: runtime path tied to the user """ return PlatformDirs(appname=appname, appauthor=appauthor, version=version, opinion=opinion).user_runtime_path __all__ = [ "__version__", "__version_info__", "PlatformDirs", "AppDirs", "PlatformDirsABC", "user_data_dir", "user_config_dir", "user_cache_dir", "user_state_dir", "user_log_dir", "user_documents_dir", "user_runtime_dir", "site_data_dir", "site_config_dir", "user_data_path", "user_config_path", "user_cache_path", "user_state_path", "user_log_path", "user_documents_path", "user_runtime_path", "site_data_path", "site_config_path", ]
12,806
Python
36.338192
119
0.6928
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pkg_resources/_vendor/platformdirs/version.py
# file generated by setuptools_scm # don't change, don't track in version control __version__ = version = '2.6.2' __version_tuple__ = version_tuple = (2, 6, 2)
160
Python
31.199994
46
0.66875
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pkg_resources/_vendor/platformdirs/unix.py
from __future__ import annotations import os import sys from configparser import ConfigParser from pathlib import Path from .api import PlatformDirsABC if sys.platform.startswith("linux"): # pragma: no branch # no op check, only to please the type checker from os import getuid else: def getuid() -> int: raise RuntimeError("should only be used on Linux") class Unix(PlatformDirsABC): """ On Unix/Linux, we follow the `XDG Basedir Spec <https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html>`_. The spec allows overriding directories with environment variables. The examples show are the default values, alongside the name of the environment variable that overrides them. Makes use of the `appname <platformdirs.api.PlatformDirsABC.appname>`, `version <platformdirs.api.PlatformDirsABC.version>`, `multipath <platformdirs.api.PlatformDirsABC.multipath>`, `opinion <platformdirs.api.PlatformDirsABC.opinion>`. """ @property def user_data_dir(self) -> str: """ :return: data directory tied to the user, e.g. ``~/.local/share/$appname/$version`` or ``$XDG_DATA_HOME/$appname/$version`` """ path = os.environ.get("XDG_DATA_HOME", "") if not path.strip(): path = os.path.expanduser("~/.local/share") return self._append_app_name_and_version(path) @property def site_data_dir(self) -> str: """ :return: data directories shared by users (if `multipath <platformdirs.api.PlatformDirsABC.multipath>` is enabled and ``XDG_DATA_DIR`` is set and a multi path the response is also a multi path separated by the OS path separator), e.g. ``/usr/local/share/$appname/$version`` or ``/usr/share/$appname/$version`` """ # XDG default for $XDG_DATA_DIRS; only first, if multipath is False path = os.environ.get("XDG_DATA_DIRS", "") if not path.strip(): path = f"/usr/local/share{os.pathsep}/usr/share" return self._with_multi_path(path) def _with_multi_path(self, path: str) -> str: path_list = path.split(os.pathsep) if not self.multipath: path_list = path_list[0:1] path_list = [self._append_app_name_and_version(os.path.expanduser(p)) for p in path_list] return os.pathsep.join(path_list) @property def user_config_dir(self) -> str: """ :return: config directory tied to the user, e.g. ``~/.config/$appname/$version`` or ``$XDG_CONFIG_HOME/$appname/$version`` """ path = os.environ.get("XDG_CONFIG_HOME", "") if not path.strip(): path = os.path.expanduser("~/.config") return self._append_app_name_and_version(path) @property def site_config_dir(self) -> str: """ :return: config directories shared by users (if `multipath <platformdirs.api.PlatformDirsABC.multipath>` is enabled and ``XDG_DATA_DIR`` is set and a multi path the response is also a multi path separated by the OS path separator), e.g. ``/etc/xdg/$appname/$version`` """ # XDG default for $XDG_CONFIG_DIRS only first, if multipath is False path = os.environ.get("XDG_CONFIG_DIRS", "") if not path.strip(): path = "/etc/xdg" return self._with_multi_path(path) @property def user_cache_dir(self) -> str: """ :return: cache directory tied to the user, e.g. ``~/.cache/$appname/$version`` or ``~/$XDG_CACHE_HOME/$appname/$version`` """ path = os.environ.get("XDG_CACHE_HOME", "") if not path.strip(): path = os.path.expanduser("~/.cache") return self._append_app_name_and_version(path) @property def user_state_dir(self) -> str: """ :return: state directory tied to the user, e.g. ``~/.local/state/$appname/$version`` or ``$XDG_STATE_HOME/$appname/$version`` """ path = os.environ.get("XDG_STATE_HOME", "") if not path.strip(): path = os.path.expanduser("~/.local/state") return self._append_app_name_and_version(path) @property def user_log_dir(self) -> str: """ :return: log directory tied to the user, same as `user_state_dir` if not opinionated else ``log`` in it """ path = self.user_state_dir if self.opinion: path = os.path.join(path, "log") return path @property def user_documents_dir(self) -> str: """ :return: documents directory tied to the user, e.g. ``~/Documents`` """ documents_dir = _get_user_dirs_folder("XDG_DOCUMENTS_DIR") if documents_dir is None: documents_dir = os.environ.get("XDG_DOCUMENTS_DIR", "").strip() if not documents_dir: documents_dir = os.path.expanduser("~/Documents") return documents_dir @property def user_runtime_dir(self) -> str: """ :return: runtime directory tied to the user, e.g. ``/run/user/$(id -u)/$appname/$version`` or ``$XDG_RUNTIME_DIR/$appname/$version`` """ path = os.environ.get("XDG_RUNTIME_DIR", "") if not path.strip(): path = f"/run/user/{getuid()}" return self._append_app_name_and_version(path) @property def site_data_path(self) -> Path: """:return: data path shared by users. Only return first item, even if ``multipath`` is set to ``True``""" return self._first_item_as_path_if_multipath(self.site_data_dir) @property def site_config_path(self) -> Path: """:return: config path shared by the users. Only return first item, even if ``multipath`` is set to ``True``""" return self._first_item_as_path_if_multipath(self.site_config_dir) def _first_item_as_path_if_multipath(self, directory: str) -> Path: if self.multipath: # If multipath is True, the first path is returned. directory = directory.split(os.pathsep)[0] return Path(directory) def _get_user_dirs_folder(key: str) -> str | None: """Return directory from user-dirs.dirs config file. See https://freedesktop.org/wiki/Software/xdg-user-dirs/""" user_dirs_config_path = os.path.join(Unix().user_config_dir, "user-dirs.dirs") if os.path.exists(user_dirs_config_path): parser = ConfigParser() with open(user_dirs_config_path) as stream: # Add fake section header, so ConfigParser doesn't complain parser.read_string(f"[top]\n{stream.read()}") if key not in parser["top"]: return None path = parser["top"][key].strip('"') # Handle relative home paths path = path.replace("$HOME", os.path.expanduser("~")) return path return None __all__ = [ "Unix", ]
6,911
Python
36.978022
120
0.607148
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pkg_resources/_vendor/platformdirs/windows.py
from __future__ import annotations import ctypes import os import sys from functools import lru_cache from typing import Callable from .api import PlatformDirsABC class Windows(PlatformDirsABC): """`MSDN on where to store app data files <http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120>`_. Makes use of the `appname <platformdirs.api.PlatformDirsABC.appname>`, `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`, `version <platformdirs.api.PlatformDirsABC.version>`, `roaming <platformdirs.api.PlatformDirsABC.roaming>`, `opinion <platformdirs.api.PlatformDirsABC.opinion>`.""" @property def user_data_dir(self) -> str: """ :return: data directory tied to the user, e.g. ``%USERPROFILE%\\AppData\\Local\\$appauthor\\$appname`` (not roaming) or ``%USERPROFILE%\\AppData\\Roaming\\$appauthor\\$appname`` (roaming) """ const = "CSIDL_APPDATA" if self.roaming else "CSIDL_LOCAL_APPDATA" path = os.path.normpath(get_win_folder(const)) return self._append_parts(path) def _append_parts(self, path: str, *, opinion_value: str | None = None) -> str: params = [] if self.appname: if self.appauthor is not False: author = self.appauthor or self.appname params.append(author) params.append(self.appname) if opinion_value is not None and self.opinion: params.append(opinion_value) if self.version: params.append(self.version) return os.path.join(path, *params) @property def site_data_dir(self) -> str: """:return: data directory shared by users, e.g. ``C:\\ProgramData\\$appauthor\\$appname``""" path = os.path.normpath(get_win_folder("CSIDL_COMMON_APPDATA")) return self._append_parts(path) @property def user_config_dir(self) -> str: """:return: config directory tied to the user, same as `user_data_dir`""" return self.user_data_dir @property def site_config_dir(self) -> str: """:return: config directory shared by the users, same as `site_data_dir`""" return self.site_data_dir @property def user_cache_dir(self) -> str: """ :return: cache directory tied to the user (if opinionated with ``Cache`` folder within ``$appname``) e.g. ``%USERPROFILE%\\AppData\\Local\\$appauthor\\$appname\\Cache\\$version`` """ path = os.path.normpath(get_win_folder("CSIDL_LOCAL_APPDATA")) return self._append_parts(path, opinion_value="Cache") @property def user_state_dir(self) -> str: """:return: state directory tied to the user, same as `user_data_dir`""" return self.user_data_dir @property def user_log_dir(self) -> str: """ :return: log directory tied to the user, same as `user_data_dir` if not opinionated else ``Logs`` in it """ path = self.user_data_dir if self.opinion: path = os.path.join(path, "Logs") return path @property def user_documents_dir(self) -> str: """ :return: documents directory tied to the user e.g. ``%USERPROFILE%\\Documents`` """ return os.path.normpath(get_win_folder("CSIDL_PERSONAL")) @property def user_runtime_dir(self) -> str: """ :return: runtime directory tied to the user, e.g. ``%USERPROFILE%\\AppData\\Local\\Temp\\$appauthor\\$appname`` """ path = os.path.normpath(os.path.join(get_win_folder("CSIDL_LOCAL_APPDATA"), "Temp")) return self._append_parts(path) def get_win_folder_from_env_vars(csidl_name: str) -> str: """Get folder from environment variables.""" if csidl_name == "CSIDL_PERSONAL": # does not have an environment name return os.path.join(os.path.normpath(os.environ["USERPROFILE"]), "Documents") env_var_name = { "CSIDL_APPDATA": "APPDATA", "CSIDL_COMMON_APPDATA": "ALLUSERSPROFILE", "CSIDL_LOCAL_APPDATA": "LOCALAPPDATA", }.get(csidl_name) if env_var_name is None: raise ValueError(f"Unknown CSIDL name: {csidl_name}") result = os.environ.get(env_var_name) if result is None: raise ValueError(f"Unset environment variable: {env_var_name}") return result def get_win_folder_from_registry(csidl_name: str) -> str: """Get folder from the registry. This is a fallback technique at best. I'm not sure if using the registry for this guarantees us the correct answer for all CSIDL_* names. """ shell_folder_name = { "CSIDL_APPDATA": "AppData", "CSIDL_COMMON_APPDATA": "Common AppData", "CSIDL_LOCAL_APPDATA": "Local AppData", "CSIDL_PERSONAL": "Personal", }.get(csidl_name) if shell_folder_name is None: raise ValueError(f"Unknown CSIDL name: {csidl_name}") if sys.platform != "win32": # only needed for mypy type checker to know that this code runs only on Windows raise NotImplementedError import winreg key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders") directory, _ = winreg.QueryValueEx(key, shell_folder_name) return str(directory) def get_win_folder_via_ctypes(csidl_name: str) -> str: """Get folder with ctypes.""" csidl_const = { "CSIDL_APPDATA": 26, "CSIDL_COMMON_APPDATA": 35, "CSIDL_LOCAL_APPDATA": 28, "CSIDL_PERSONAL": 5, }.get(csidl_name) if csidl_const is None: raise ValueError(f"Unknown CSIDL name: {csidl_name}") buf = ctypes.create_unicode_buffer(1024) windll = getattr(ctypes, "windll") # noqa: B009 # using getattr to avoid false positive with mypy type checker windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf) # Downgrade to short path name if it has highbit chars. if any(ord(c) > 255 for c in buf): buf2 = ctypes.create_unicode_buffer(1024) if windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024): buf = buf2 return buf.value def _pick_get_win_folder() -> Callable[[str], str]: if hasattr(ctypes, "windll"): return get_win_folder_via_ctypes try: import winreg # noqa: F401 except ImportError: return get_win_folder_from_env_vars else: return get_win_folder_from_registry get_win_folder = lru_cache(maxsize=None)(_pick_get_win_folder()) __all__ = [ "Windows", ]
6,596
Python
34.659459
119
0.631747
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pkg_resources/_vendor/platformdirs/api.py
from __future__ import annotations import os import sys from abc import ABC, abstractmethod from pathlib import Path if sys.version_info >= (3, 8): # pragma: no branch from typing import Literal # pragma: no cover class PlatformDirsABC(ABC): """ Abstract base class for platform directories. """ def __init__( self, appname: str | None = None, appauthor: str | None | Literal[False] = None, version: str | None = None, roaming: bool = False, multipath: bool = False, opinion: bool = True, ): """ Create a new platform directory. :param appname: See `appname`. :param appauthor: See `appauthor`. :param version: See `version`. :param roaming: See `roaming`. :param multipath: See `multipath`. :param opinion: See `opinion`. """ self.appname = appname #: The name of application. self.appauthor = appauthor """ The name of the app author or distributing body for this application. Typically, it is the owning company name. Defaults to `appname`. You may pass ``False`` to disable it. """ self.version = version """ An optional version path element to append to the path. You might want to use this if you want multiple versions of your app to be able to run independently. If used, this would typically be ``<major>.<minor>``. """ self.roaming = roaming """ Whether to use the roaming appdata directory on Windows. That means that for users on a Windows network setup for roaming profiles, this user data will be synced on login (see `here <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>`_). """ self.multipath = multipath """ An optional parameter only applicable to Unix/Linux which indicates that the entire list of data dirs should be returned. By default, the first item would only be returned. """ self.opinion = opinion #: A flag to indicating to use opinionated values. def _append_app_name_and_version(self, *base: str) -> str: params = list(base[1:]) if self.appname: params.append(self.appname) if self.version: params.append(self.version) return os.path.join(base[0], *params) @property @abstractmethod def user_data_dir(self) -> str: """:return: data directory tied to the user""" @property @abstractmethod def site_data_dir(self) -> str: """:return: data directory shared by users""" @property @abstractmethod def user_config_dir(self) -> str: """:return: config directory tied to the user""" @property @abstractmethod def site_config_dir(self) -> str: """:return: config directory shared by the users""" @property @abstractmethod def user_cache_dir(self) -> str: """:return: cache directory tied to the user""" @property @abstractmethod def user_state_dir(self) -> str: """:return: state directory tied to the user""" @property @abstractmethod def user_log_dir(self) -> str: """:return: log directory tied to the user""" @property @abstractmethod def user_documents_dir(self) -> str: """:return: documents directory tied to the user""" @property @abstractmethod def user_runtime_dir(self) -> str: """:return: runtime directory tied to the user""" @property def user_data_path(self) -> Path: """:return: data path tied to the user""" return Path(self.user_data_dir) @property def site_data_path(self) -> Path: """:return: data path shared by users""" return Path(self.site_data_dir) @property def user_config_path(self) -> Path: """:return: config path tied to the user""" return Path(self.user_config_dir) @property def site_config_path(self) -> Path: """:return: config path shared by the users""" return Path(self.site_config_dir) @property def user_cache_path(self) -> Path: """:return: cache path tied to the user""" return Path(self.user_cache_dir) @property def user_state_path(self) -> Path: """:return: state path tied to the user""" return Path(self.user_state_dir) @property def user_log_path(self) -> Path: """:return: log path tied to the user""" return Path(self.user_log_dir) @property def user_documents_path(self) -> Path: """:return: documents path tied to the user""" return Path(self.user_documents_dir) @property def user_runtime_path(self) -> Path: """:return: runtime path tied to the user""" return Path(self.user_runtime_dir)
4,910
Python
30.280255
120
0.605703
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pkg_resources/_vendor/platformdirs/__main__.py
from __future__ import annotations from platformdirs import PlatformDirs, __version__ PROPS = ( "user_data_dir", "user_config_dir", "user_cache_dir", "user_state_dir", "user_log_dir", "user_documents_dir", "user_runtime_dir", "site_data_dir", "site_config_dir", ) def main() -> None: app_name = "MyApp" app_author = "MyCompany" print(f"-- platformdirs {__version__} --") print("-- app dirs (with optional 'version')") dirs = PlatformDirs(app_name, app_author, version="1.0") for prop in PROPS: print(f"{prop}: {getattr(dirs, prop)}") print("\n-- app dirs (without optional 'version')") dirs = PlatformDirs(app_name, app_author) for prop in PROPS: print(f"{prop}: {getattr(dirs, prop)}") print("\n-- app dirs (without optional 'appauthor')") dirs = PlatformDirs(app_name) for prop in PROPS: print(f"{prop}: {getattr(dirs, prop)}") print("\n-- app dirs (with disabled 'appauthor')") dirs = PlatformDirs(app_name, appauthor=False) for prop in PROPS: print(f"{prop}: {getattr(dirs, prop)}") if __name__ == "__main__": main()
1,164
Python
23.787234
60
0.593643
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pkg_resources/_vendor/platformdirs/macos.py
from __future__ import annotations import os from .api import PlatformDirsABC class MacOS(PlatformDirsABC): """ Platform directories for the macOS operating system. Follows the guidance from `Apple documentation <https://developer.apple.com/library/archive/documentation/FileManagement/Conceptual/FileSystemProgrammingGuide/MacOSXDirectories/MacOSXDirectories.html>`_. Makes use of the `appname <platformdirs.api.PlatformDirsABC.appname>` and `version <platformdirs.api.PlatformDirsABC.version>`. """ @property def user_data_dir(self) -> str: """:return: data directory tied to the user, e.g. ``~/Library/Application Support/$appname/$version``""" return self._append_app_name_and_version(os.path.expanduser("~/Library/Application Support/")) @property def site_data_dir(self) -> str: """:return: data directory shared by users, e.g. ``/Library/Application Support/$appname/$version``""" return self._append_app_name_and_version("/Library/Application Support") @property def user_config_dir(self) -> str: """:return: config directory tied to the user, e.g. ``~/Library/Preferences/$appname/$version``""" return self._append_app_name_and_version(os.path.expanduser("~/Library/Preferences/")) @property def site_config_dir(self) -> str: """:return: config directory shared by the users, e.g. ``/Library/Preferences/$appname``""" return self._append_app_name_and_version("/Library/Preferences") @property def user_cache_dir(self) -> str: """:return: cache directory tied to the user, e.g. ``~/Library/Caches/$appname/$version``""" return self._append_app_name_and_version(os.path.expanduser("~/Library/Caches")) @property def user_state_dir(self) -> str: """:return: state directory tied to the user, same as `user_data_dir`""" return self.user_data_dir @property def user_log_dir(self) -> str: """:return: log directory tied to the user, e.g. ``~/Library/Logs/$appname/$version``""" return self._append_app_name_and_version(os.path.expanduser("~/Library/Logs")) @property def user_documents_dir(self) -> str: """:return: documents directory tied to the user, e.g. ``~/Documents``""" return os.path.expanduser("~/Documents") @property def user_runtime_dir(self) -> str: """:return: runtime directory tied to the user, e.g. ``~/Library/Caches/TemporaryItems/$appname/$version``""" return self._append_app_name_and_version(os.path.expanduser("~/Library/Caches/TemporaryItems")) __all__ = [ "MacOS", ]
2,655
Python
39.861538
160
0.666667
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pkg_resources/_vendor/packaging/_structures.py
# This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. class InfinityType: def __repr__(self) -> str: return "Infinity" def __hash__(self) -> int: return hash(repr(self)) def __lt__(self, other: object) -> bool: return False def __le__(self, other: object) -> bool: return False def __eq__(self, other: object) -> bool: return isinstance(other, self.__class__) def __gt__(self, other: object) -> bool: return True def __ge__(self, other: object) -> bool: return True def __neg__(self: object) -> "NegativeInfinityType": return NegativeInfinity Infinity = InfinityType() class NegativeInfinityType: def __repr__(self) -> str: return "-Infinity" def __hash__(self) -> int: return hash(repr(self)) def __lt__(self, other: object) -> bool: return True def __le__(self, other: object) -> bool: return True def __eq__(self, other: object) -> bool: return isinstance(other, self.__class__) def __gt__(self, other: object) -> bool: return False def __ge__(self, other: object) -> bool: return False def __neg__(self: object) -> InfinityType: return Infinity NegativeInfinity = NegativeInfinityType()
1,431
Python
22.096774
79
0.587002
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pkg_resources/_vendor/packaging/requirements.py
# This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. import urllib.parse from typing import Any, List, Optional, Set from ._parser import parse_requirement as _parse_requirement from ._tokenizer import ParserSyntaxError from .markers import Marker, _normalize_extra_values from .specifiers import SpecifierSet class InvalidRequirement(ValueError): """ An invalid requirement was found, users should refer to PEP 508. """ class Requirement: """Parse a requirement. Parse a given requirement string into its parts, such as name, specifier, URL, and extras. Raises InvalidRequirement on a badly-formed requirement string. """ # TODO: Can we test whether something is contained within a requirement? # If so how do we do that? Do we need to test against the _name_ of # the thing as well as the version? What about the markers? # TODO: Can we normalize the name and extra name? def __init__(self, requirement_string: str) -> None: try: parsed = _parse_requirement(requirement_string) except ParserSyntaxError as e: raise InvalidRequirement(str(e)) from e self.name: str = parsed.name if parsed.url: parsed_url = urllib.parse.urlparse(parsed.url) if parsed_url.scheme == "file": if urllib.parse.urlunparse(parsed_url) != parsed.url: raise InvalidRequirement("Invalid URL given") elif not (parsed_url.scheme and parsed_url.netloc) or ( not parsed_url.scheme and not parsed_url.netloc ): raise InvalidRequirement(f"Invalid URL: {parsed.url}") self.url: Optional[str] = parsed.url else: self.url = None self.extras: Set[str] = set(parsed.extras if parsed.extras else []) self.specifier: SpecifierSet = SpecifierSet(parsed.specifier) self.marker: Optional[Marker] = None if parsed.marker is not None: self.marker = Marker.__new__(Marker) self.marker._markers = _normalize_extra_values(parsed.marker) def __str__(self) -> str: parts: List[str] = [self.name] if self.extras: formatted_extras = ",".join(sorted(self.extras)) parts.append(f"[{formatted_extras}]") if self.specifier: parts.append(str(self.specifier)) if self.url: parts.append(f"@ {self.url}") if self.marker: parts.append(" ") if self.marker: parts.append(f"; {self.marker}") return "".join(parts) def __repr__(self) -> str: return f"<Requirement('{self}')>" def __hash__(self) -> int: return hash((self.__class__.__name__, str(self))) def __eq__(self, other: Any) -> bool: if not isinstance(other, Requirement): return NotImplemented return ( self.name == other.name and self.extras == other.extras and self.specifier == other.specifier and self.url == other.url and self.marker == other.marker )
3,287
Python
33.25
79
0.607545
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pkg_resources/_vendor/packaging/_tokenizer.py
import contextlib import re from dataclasses import dataclass from typing import Dict, Iterator, NoReturn, Optional, Tuple, Union from .specifiers import Specifier @dataclass class Token: name: str text: str position: int class ParserSyntaxError(Exception): """The provided source text could not be parsed correctly.""" def __init__( self, message: str, *, source: str, span: Tuple[int, int], ) -> None: self.span = span self.message = message self.source = source super().__init__() def __str__(self) -> str: marker = " " * self.span[0] + "~" * (self.span[1] - self.span[0]) + "^" return "\n ".join([self.message, self.source, marker]) DEFAULT_RULES: "Dict[str, Union[str, re.Pattern[str]]]" = { "LEFT_PARENTHESIS": r"\(", "RIGHT_PARENTHESIS": r"\)", "LEFT_BRACKET": r"\[", "RIGHT_BRACKET": r"\]", "SEMICOLON": r";", "COMMA": r",", "QUOTED_STRING": re.compile( r""" ( ('[^']*') | ("[^"]*") ) """, re.VERBOSE, ), "OP": r"(===|==|~=|!=|<=|>=|<|>)", "BOOLOP": r"\b(or|and)\b", "IN": r"\bin\b", "NOT": r"\bnot\b", "VARIABLE": re.compile( r""" \b( python_version |python_full_version |os[._]name |sys[._]platform |platform_(release|system) |platform[._](version|machine|python_implementation) |python_implementation |implementation_(name|version) |extra )\b """, re.VERBOSE, ), "SPECIFIER": re.compile( Specifier._operator_regex_str + Specifier._version_regex_str, re.VERBOSE | re.IGNORECASE, ), "AT": r"\@", "URL": r"[^ \t]+", "IDENTIFIER": r"\b[a-zA-Z0-9][a-zA-Z0-9._-]*\b", "VERSION_PREFIX_TRAIL": r"\.\*", "VERSION_LOCAL_LABEL_TRAIL": r"\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*", "WS": r"[ \t]+", "END": r"$", } class Tokenizer: """Context-sensitive token parsing. Provides methods to examine the input stream to check whether the next token matches. """ def __init__( self, source: str, *, rules: "Dict[str, Union[str, re.Pattern[str]]]", ) -> None: self.source = source self.rules: Dict[str, re.Pattern[str]] = { name: re.compile(pattern) for name, pattern in rules.items() } self.next_token: Optional[Token] = None self.position = 0 def consume(self, name: str) -> None: """Move beyond provided token name, if at current position.""" if self.check(name): self.read() def check(self, name: str, *, peek: bool = False) -> bool: """Check whether the next token has the provided name. By default, if the check succeeds, the token *must* be read before another check. If `peek` is set to `True`, the token is not loaded and would need to be checked again. """ assert ( self.next_token is None ), f"Cannot check for {name!r}, already have {self.next_token!r}" assert name in self.rules, f"Unknown token name: {name!r}" expression = self.rules[name] match = expression.match(self.source, self.position) if match is None: return False if not peek: self.next_token = Token(name, match[0], self.position) return True def expect(self, name: str, *, expected: str) -> Token: """Expect a certain token name next, failing with a syntax error otherwise. The token is *not* read. """ if not self.check(name): raise self.raise_syntax_error(f"Expected {expected}") return self.read() def read(self) -> Token: """Consume the next token and return it.""" token = self.next_token assert token is not None self.position += len(token.text) self.next_token = None return token def raise_syntax_error( self, message: str, *, span_start: Optional[int] = None, span_end: Optional[int] = None, ) -> NoReturn: """Raise ParserSyntaxError at the given position.""" span = ( self.position if span_start is None else span_start, self.position if span_end is None else span_end, ) raise ParserSyntaxError( message, source=self.source, span=span, ) @contextlib.contextmanager def enclosing_tokens( self, open_token: str, close_token: str, *, around: str ) -> Iterator[None]: if self.check(open_token): open_position = self.position self.read() else: open_position = None yield if open_position is None: return if not self.check(close_token): self.raise_syntax_error( f"Expected matching {close_token} for {open_token}, after {around}", span_start=open_position, ) self.read()
5,292
Python
26.42487
84
0.520975
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pkg_resources/_vendor/packaging/specifiers.py
# This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. """ .. testsetup:: from packaging.specifiers import Specifier, SpecifierSet, InvalidSpecifier from packaging.version import Version """ import abc import itertools import re from typing import ( Callable, Iterable, Iterator, List, Optional, Set, Tuple, TypeVar, Union, ) from .utils import canonicalize_version from .version import Version UnparsedVersion = Union[Version, str] UnparsedVersionVar = TypeVar("UnparsedVersionVar", bound=UnparsedVersion) CallableOperator = Callable[[Version, str], bool] def _coerce_version(version: UnparsedVersion) -> Version: if not isinstance(version, Version): version = Version(version) return version class InvalidSpecifier(ValueError): """ Raised when attempting to create a :class:`Specifier` with a specifier string that is invalid. >>> Specifier("lolwat") Traceback (most recent call last): ... packaging.specifiers.InvalidSpecifier: Invalid specifier: 'lolwat' """ class BaseSpecifier(metaclass=abc.ABCMeta): @abc.abstractmethod def __str__(self) -> str: """ Returns the str representation of this Specifier-like object. This should be representative of the Specifier itself. """ @abc.abstractmethod def __hash__(self) -> int: """ Returns a hash value for this Specifier-like object. """ @abc.abstractmethod def __eq__(self, other: object) -> bool: """ Returns a boolean representing whether or not the two Specifier-like objects are equal. :param other: The other object to check against. """ @property @abc.abstractmethod def prereleases(self) -> Optional[bool]: """Whether or not pre-releases as a whole are allowed. This can be set to either ``True`` or ``False`` to explicitly enable or disable prereleases or it can be set to ``None`` (the default) to use default semantics. """ @prereleases.setter def prereleases(self, value: bool) -> None: """Setter for :attr:`prereleases`. :param value: The value to set. """ @abc.abstractmethod def contains(self, item: str, prereleases: Optional[bool] = None) -> bool: """ Determines if the given item is contained within this specifier. """ @abc.abstractmethod def filter( self, iterable: Iterable[UnparsedVersionVar], prereleases: Optional[bool] = None ) -> Iterator[UnparsedVersionVar]: """ Takes an iterable of items and filters them so that only items which are contained within this specifier are allowed in it. """ class Specifier(BaseSpecifier): """This class abstracts handling of version specifiers. .. tip:: It is generally not required to instantiate this manually. You should instead prefer to work with :class:`SpecifierSet` instead, which can parse comma-separated version specifiers (which is what package metadata contains). """ _operator_regex_str = r""" (?P<operator>(~=|==|!=|<=|>=|<|>|===)) """ _version_regex_str = r""" (?P<version> (?: # The identity operators allow for an escape hatch that will # do an exact string match of the version you wish to install. # This will not be parsed by PEP 440 and we cannot determine # any semantic meaning from it. This operator is discouraged # but included entirely as an escape hatch. (?<====) # Only match for the identity operator \s* [^\s;)]* # The arbitrary version can be just about anything, # we match everything except for whitespace, a # semi-colon for marker support, and a closing paren # since versions can be enclosed in them. ) | (?: # The (non)equality operators allow for wild card and local # versions to be specified so we have to define these two # operators separately to enable that. (?<===|!=) # Only match for equals and not equals \s* v? (?:[0-9]+!)? # epoch [0-9]+(?:\.[0-9]+)* # release # You cannot use a wild card and a pre-release, post-release, a dev or # local version together so group them with a | and make them optional. (?: \.\* # Wild card syntax of .* | (?: # pre release [-_\.]? (alpha|beta|preview|pre|a|b|c|rc) [-_\.]? [0-9]* )? (?: # post release (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) )? (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local )? ) | (?: # The compatible operator requires at least two digits in the # release segment. (?<=~=) # Only match for the compatible operator \s* v? (?:[0-9]+!)? # epoch [0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *) (?: # pre release [-_\.]? (alpha|beta|preview|pre|a|b|c|rc) [-_\.]? [0-9]* )? (?: # post release (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) )? (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release ) | (?: # All other operators only allow a sub set of what the # (non)equality operators do. Specifically they do not allow # local versions to be specified nor do they allow the prefix # matching wild cards. (?<!==|!=|~=) # We have special cases for these # operators so we want to make sure they # don't match here. \s* v? (?:[0-9]+!)? # epoch [0-9]+(?:\.[0-9]+)* # release (?: # pre release [-_\.]? (alpha|beta|preview|pre|a|b|c|rc) [-_\.]? [0-9]* )? (?: # post release (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) )? (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release ) ) """ _regex = re.compile( r"^\s*" + _operator_regex_str + _version_regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE, ) _operators = { "~=": "compatible", "==": "equal", "!=": "not_equal", "<=": "less_than_equal", ">=": "greater_than_equal", "<": "less_than", ">": "greater_than", "===": "arbitrary", } def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None: """Initialize a Specifier instance. :param spec: The string representation of a specifier which will be parsed and normalized before use. :param prereleases: This tells the specifier if it should accept prerelease versions if applicable or not. The default of ``None`` will autodetect it from the given specifiers. :raises InvalidSpecifier: If the given specifier is invalid (i.e. bad syntax). """ match = self._regex.search(spec) if not match: raise InvalidSpecifier(f"Invalid specifier: '{spec}'") self._spec: Tuple[str, str] = ( match.group("operator").strip(), match.group("version").strip(), ) # Store whether or not this Specifier should accept prereleases self._prereleases = prereleases # https://github.com/python/mypy/pull/13475#pullrequestreview-1079784515 @property # type: ignore[override] def prereleases(self) -> bool: # If there is an explicit prereleases set for this, then we'll just # blindly use that. if self._prereleases is not None: return self._prereleases # Look at all of our specifiers and determine if they are inclusive # operators, and if they are if they are including an explicit # prerelease. operator, version = self._spec if operator in ["==", ">=", "<=", "~=", "==="]: # The == specifier can include a trailing .*, if it does we # want to remove before parsing. if operator == "==" and version.endswith(".*"): version = version[:-2] # Parse the version, and if it is a pre-release than this # specifier allows pre-releases. if Version(version).is_prerelease: return True return False @prereleases.setter def prereleases(self, value: bool) -> None: self._prereleases = value @property def operator(self) -> str: """The operator of this specifier. >>> Specifier("==1.2.3").operator '==' """ return self._spec[0] @property def version(self) -> str: """The version of this specifier. >>> Specifier("==1.2.3").version '1.2.3' """ return self._spec[1] def __repr__(self) -> str: """A representation of the Specifier that shows all internal state. >>> Specifier('>=1.0.0') <Specifier('>=1.0.0')> >>> Specifier('>=1.0.0', prereleases=False) <Specifier('>=1.0.0', prereleases=False)> >>> Specifier('>=1.0.0', prereleases=True) <Specifier('>=1.0.0', prereleases=True)> """ pre = ( f", prereleases={self.prereleases!r}" if self._prereleases is not None else "" ) return f"<{self.__class__.__name__}({str(self)!r}{pre})>" def __str__(self) -> str: """A string representation of the Specifier that can be round-tripped. >>> str(Specifier('>=1.0.0')) '>=1.0.0' >>> str(Specifier('>=1.0.0', prereleases=False)) '>=1.0.0' """ return "{}{}".format(*self._spec) @property def _canonical_spec(self) -> Tuple[str, str]: canonical_version = canonicalize_version( self._spec[1], strip_trailing_zero=(self._spec[0] != "~="), ) return self._spec[0], canonical_version def __hash__(self) -> int: return hash(self._canonical_spec) def __eq__(self, other: object) -> bool: """Whether or not the two Specifier-like objects are equal. :param other: The other object to check against. The value of :attr:`prereleases` is ignored. >>> Specifier("==1.2.3") == Specifier("== 1.2.3.0") True >>> (Specifier("==1.2.3", prereleases=False) == ... Specifier("==1.2.3", prereleases=True)) True >>> Specifier("==1.2.3") == "==1.2.3" True >>> Specifier("==1.2.3") == Specifier("==1.2.4") False >>> Specifier("==1.2.3") == Specifier("~=1.2.3") False """ if isinstance(other, str): try: other = self.__class__(str(other)) except InvalidSpecifier: return NotImplemented elif not isinstance(other, self.__class__): return NotImplemented return self._canonical_spec == other._canonical_spec def _get_operator(self, op: str) -> CallableOperator: operator_callable: CallableOperator = getattr( self, f"_compare_{self._operators[op]}" ) return operator_callable def _compare_compatible(self, prospective: Version, spec: str) -> bool: # Compatible releases have an equivalent combination of >= and ==. That # is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to # implement this in terms of the other specifiers instead of # implementing it ourselves. The only thing we need to do is construct # the other specifiers. # We want everything but the last item in the version, but we want to # ignore suffix segments. prefix = ".".join( list(itertools.takewhile(_is_not_suffix, _version_split(spec)))[:-1] ) # Add the prefix notation to the end of our string prefix += ".*" return self._get_operator(">=")(prospective, spec) and self._get_operator("==")( prospective, prefix ) def _compare_equal(self, prospective: Version, spec: str) -> bool: # We need special logic to handle prefix matching if spec.endswith(".*"): # In the case of prefix matching we want to ignore local segment. normalized_prospective = canonicalize_version( prospective.public, strip_trailing_zero=False ) # Get the normalized version string ignoring the trailing .* normalized_spec = canonicalize_version(spec[:-2], strip_trailing_zero=False) # Split the spec out by dots, and pretend that there is an implicit # dot in between a release segment and a pre-release segment. split_spec = _version_split(normalized_spec) # Split the prospective version out by dots, and pretend that there # is an implicit dot in between a release segment and a pre-release # segment. split_prospective = _version_split(normalized_prospective) # 0-pad the prospective version before shortening it to get the correct # shortened version. padded_prospective, _ = _pad_version(split_prospective, split_spec) # Shorten the prospective version to be the same length as the spec # so that we can determine if the specifier is a prefix of the # prospective version or not. shortened_prospective = padded_prospective[: len(split_spec)] return shortened_prospective == split_spec else: # Convert our spec string into a Version spec_version = Version(spec) # If the specifier does not have a local segment, then we want to # act as if the prospective version also does not have a local # segment. if not spec_version.local: prospective = Version(prospective.public) return prospective == spec_version def _compare_not_equal(self, prospective: Version, spec: str) -> bool: return not self._compare_equal(prospective, spec) def _compare_less_than_equal(self, prospective: Version, spec: str) -> bool: # NB: Local version identifiers are NOT permitted in the version # specifier, so local version labels can be universally removed from # the prospective version. return Version(prospective.public) <= Version(spec) def _compare_greater_than_equal(self, prospective: Version, spec: str) -> bool: # NB: Local version identifiers are NOT permitted in the version # specifier, so local version labels can be universally removed from # the prospective version. return Version(prospective.public) >= Version(spec) def _compare_less_than(self, prospective: Version, spec_str: str) -> bool: # Convert our spec to a Version instance, since we'll want to work with # it as a version. spec = Version(spec_str) # Check to see if the prospective version is less than the spec # version. If it's not we can short circuit and just return False now # instead of doing extra unneeded work. if not prospective < spec: return False # This special case is here so that, unless the specifier itself # includes is a pre-release version, that we do not accept pre-release # versions for the version mentioned in the specifier (e.g. <3.1 should # not match 3.1.dev0, but should match 3.0.dev0). if not spec.is_prerelease and prospective.is_prerelease: if Version(prospective.base_version) == Version(spec.base_version): return False # If we've gotten to here, it means that prospective version is both # less than the spec version *and* it's not a pre-release of the same # version in the spec. return True def _compare_greater_than(self, prospective: Version, spec_str: str) -> bool: # Convert our spec to a Version instance, since we'll want to work with # it as a version. spec = Version(spec_str) # Check to see if the prospective version is greater than the spec # version. If it's not we can short circuit and just return False now # instead of doing extra unneeded work. if not prospective > spec: return False # This special case is here so that, unless the specifier itself # includes is a post-release version, that we do not accept # post-release versions for the version mentioned in the specifier # (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0). if not spec.is_postrelease and prospective.is_postrelease: if Version(prospective.base_version) == Version(spec.base_version): return False # Ensure that we do not allow a local version of the version mentioned # in the specifier, which is technically greater than, to match. if prospective.local is not None: if Version(prospective.base_version) == Version(spec.base_version): return False # If we've gotten to here, it means that prospective version is both # greater than the spec version *and* it's not a pre-release of the # same version in the spec. return True def _compare_arbitrary(self, prospective: Version, spec: str) -> bool: return str(prospective).lower() == str(spec).lower() def __contains__(self, item: Union[str, Version]) -> bool: """Return whether or not the item is contained in this specifier. :param item: The item to check for. This is used for the ``in`` operator and behaves the same as :meth:`contains` with no ``prereleases`` argument passed. >>> "1.2.3" in Specifier(">=1.2.3") True >>> Version("1.2.3") in Specifier(">=1.2.3") True >>> "1.0.0" in Specifier(">=1.2.3") False >>> "1.3.0a1" in Specifier(">=1.2.3") False >>> "1.3.0a1" in Specifier(">=1.2.3", prereleases=True) True """ return self.contains(item) def contains( self, item: UnparsedVersion, prereleases: Optional[bool] = None ) -> bool: """Return whether or not the item is contained in this specifier. :param item: The item to check for, which can be a version string or a :class:`Version` instance. :param prereleases: Whether or not to match prereleases with this Specifier. If set to ``None`` (the default), it uses :attr:`prereleases` to determine whether or not prereleases are allowed. >>> Specifier(">=1.2.3").contains("1.2.3") True >>> Specifier(">=1.2.3").contains(Version("1.2.3")) True >>> Specifier(">=1.2.3").contains("1.0.0") False >>> Specifier(">=1.2.3").contains("1.3.0a1") False >>> Specifier(">=1.2.3", prereleases=True).contains("1.3.0a1") True >>> Specifier(">=1.2.3").contains("1.3.0a1", prereleases=True) True """ # Determine if prereleases are to be allowed or not. if prereleases is None: prereleases = self.prereleases # Normalize item to a Version, this allows us to have a shortcut for # "2.0" in Specifier(">=2") normalized_item = _coerce_version(item) # Determine if we should be supporting prereleases in this specifier # or not, if we do not support prereleases than we can short circuit # logic if this version is a prereleases. if normalized_item.is_prerelease and not prereleases: return False # Actually do the comparison to determine if this item is contained # within this Specifier or not. operator_callable: CallableOperator = self._get_operator(self.operator) return operator_callable(normalized_item, self.version) def filter( self, iterable: Iterable[UnparsedVersionVar], prereleases: Optional[bool] = None ) -> Iterator[UnparsedVersionVar]: """Filter items in the given iterable, that match the specifier. :param iterable: An iterable that can contain version strings and :class:`Version` instances. The items in the iterable will be filtered according to the specifier. :param prereleases: Whether or not to allow prereleases in the returned iterator. If set to ``None`` (the default), it will be intelligently decide whether to allow prereleases or not (based on the :attr:`prereleases` attribute, and whether the only versions matching are prereleases). This method is smarter than just ``filter(Specifier().contains, [...])`` because it implements the rule from :pep:`440` that a prerelease item SHOULD be accepted if no other versions match the given specifier. >>> list(Specifier(">=1.2.3").filter(["1.2", "1.3", "1.5a1"])) ['1.3'] >>> list(Specifier(">=1.2.3").filter(["1.2", "1.2.3", "1.3", Version("1.4")])) ['1.2.3', '1.3', <Version('1.4')>] >>> list(Specifier(">=1.2.3").filter(["1.2", "1.5a1"])) ['1.5a1'] >>> list(Specifier(">=1.2.3").filter(["1.3", "1.5a1"], prereleases=True)) ['1.3', '1.5a1'] >>> list(Specifier(">=1.2.3", prereleases=True).filter(["1.3", "1.5a1"])) ['1.3', '1.5a1'] """ yielded = False found_prereleases = [] kw = {"prereleases": prereleases if prereleases is not None else True} # Attempt to iterate over all the values in the iterable and if any of # them match, yield them. for version in iterable: parsed_version = _coerce_version(version) if self.contains(parsed_version, **kw): # If our version is a prerelease, and we were not set to allow # prereleases, then we'll store it for later in case nothing # else matches this specifier. if parsed_version.is_prerelease and not ( prereleases or self.prereleases ): found_prereleases.append(version) # Either this is not a prerelease, or we should have been # accepting prereleases from the beginning. else: yielded = True yield version # Now that we've iterated over everything, determine if we've yielded # any values, and if we have not and we have any prereleases stored up # then we will go ahead and yield the prereleases. if not yielded and found_prereleases: for version in found_prereleases: yield version _prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$") def _version_split(version: str) -> List[str]: result: List[str] = [] for item in version.split("."): match = _prefix_regex.search(item) if match: result.extend(match.groups()) else: result.append(item) return result def _is_not_suffix(segment: str) -> bool: return not any( segment.startswith(prefix) for prefix in ("dev", "a", "b", "rc", "post") ) def _pad_version(left: List[str], right: List[str]) -> Tuple[List[str], List[str]]: left_split, right_split = [], [] # Get the release segment of our versions left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left))) right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right))) # Get the rest of our versions left_split.append(left[len(left_split[0]) :]) right_split.append(right[len(right_split[0]) :]) # Insert our padding left_split.insert(1, ["0"] * max(0, len(right_split[0]) - len(left_split[0]))) right_split.insert(1, ["0"] * max(0, len(left_split[0]) - len(right_split[0]))) return (list(itertools.chain(*left_split)), list(itertools.chain(*right_split))) class SpecifierSet(BaseSpecifier): """This class abstracts handling of a set of version specifiers. It can be passed a single specifier (``>=3.0``), a comma-separated list of specifiers (``>=3.0,!=3.1``), or no specifier at all. """ def __init__( self, specifiers: str = "", prereleases: Optional[bool] = None ) -> None: """Initialize a SpecifierSet instance. :param specifiers: The string representation of a specifier or a comma-separated list of specifiers which will be parsed and normalized before use. :param prereleases: This tells the SpecifierSet if it should accept prerelease versions if applicable or not. The default of ``None`` will autodetect it from the given specifiers. :raises InvalidSpecifier: If the given ``specifiers`` are not parseable than this exception will be raised. """ # Split on `,` to break each individual specifier into it's own item, and # strip each item to remove leading/trailing whitespace. split_specifiers = [s.strip() for s in specifiers.split(",") if s.strip()] # Parsed each individual specifier, attempting first to make it a # Specifier. parsed: Set[Specifier] = set() for specifier in split_specifiers: parsed.add(Specifier(specifier)) # Turn our parsed specifiers into a frozen set and save them for later. self._specs = frozenset(parsed) # Store our prereleases value so we can use it later to determine if # we accept prereleases or not. self._prereleases = prereleases @property def prereleases(self) -> Optional[bool]: # If we have been given an explicit prerelease modifier, then we'll # pass that through here. if self._prereleases is not None: return self._prereleases # If we don't have any specifiers, and we don't have a forced value, # then we'll just return None since we don't know if this should have # pre-releases or not. if not self._specs: return None # Otherwise we'll see if any of the given specifiers accept # prereleases, if any of them do we'll return True, otherwise False. return any(s.prereleases for s in self._specs) @prereleases.setter def prereleases(self, value: bool) -> None: self._prereleases = value def __repr__(self) -> str: """A representation of the specifier set that shows all internal state. Note that the ordering of the individual specifiers within the set may not match the input string. >>> SpecifierSet('>=1.0.0,!=2.0.0') <SpecifierSet('!=2.0.0,>=1.0.0')> >>> SpecifierSet('>=1.0.0,!=2.0.0', prereleases=False) <SpecifierSet('!=2.0.0,>=1.0.0', prereleases=False)> >>> SpecifierSet('>=1.0.0,!=2.0.0', prereleases=True) <SpecifierSet('!=2.0.0,>=1.0.0', prereleases=True)> """ pre = ( f", prereleases={self.prereleases!r}" if self._prereleases is not None else "" ) return f"<SpecifierSet({str(self)!r}{pre})>" def __str__(self) -> str: """A string representation of the specifier set that can be round-tripped. Note that the ordering of the individual specifiers within the set may not match the input string. >>> str(SpecifierSet(">=1.0.0,!=1.0.1")) '!=1.0.1,>=1.0.0' >>> str(SpecifierSet(">=1.0.0,!=1.0.1", prereleases=False)) '!=1.0.1,>=1.0.0' """ return ",".join(sorted(str(s) for s in self._specs)) def __hash__(self) -> int: return hash(self._specs) def __and__(self, other: Union["SpecifierSet", str]) -> "SpecifierSet": """Return a SpecifierSet which is a combination of the two sets. :param other: The other object to combine with. >>> SpecifierSet(">=1.0.0,!=1.0.1") & '<=2.0.0,!=2.0.1' <SpecifierSet('!=1.0.1,!=2.0.1,<=2.0.0,>=1.0.0')> >>> SpecifierSet(">=1.0.0,!=1.0.1") & SpecifierSet('<=2.0.0,!=2.0.1') <SpecifierSet('!=1.0.1,!=2.0.1,<=2.0.0,>=1.0.0')> """ if isinstance(other, str): other = SpecifierSet(other) elif not isinstance(other, SpecifierSet): return NotImplemented specifier = SpecifierSet() specifier._specs = frozenset(self._specs | other._specs) if self._prereleases is None and other._prereleases is not None: specifier._prereleases = other._prereleases elif self._prereleases is not None and other._prereleases is None: specifier._prereleases = self._prereleases elif self._prereleases == other._prereleases: specifier._prereleases = self._prereleases else: raise ValueError( "Cannot combine SpecifierSets with True and False prerelease " "overrides." ) return specifier def __eq__(self, other: object) -> bool: """Whether or not the two SpecifierSet-like objects are equal. :param other: The other object to check against. The value of :attr:`prereleases` is ignored. >>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0,!=1.0.1") True >>> (SpecifierSet(">=1.0.0,!=1.0.1", prereleases=False) == ... SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True)) True >>> SpecifierSet(">=1.0.0,!=1.0.1") == ">=1.0.0,!=1.0.1" True >>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0") False >>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0,!=1.0.2") False """ if isinstance(other, (str, Specifier)): other = SpecifierSet(str(other)) elif not isinstance(other, SpecifierSet): return NotImplemented return self._specs == other._specs def __len__(self) -> int: """Returns the number of specifiers in this specifier set.""" return len(self._specs) def __iter__(self) -> Iterator[Specifier]: """ Returns an iterator over all the underlying :class:`Specifier` instances in this specifier set. >>> sorted(SpecifierSet(">=1.0.0,!=1.0.1"), key=str) [<Specifier('!=1.0.1')>, <Specifier('>=1.0.0')>] """ return iter(self._specs) def __contains__(self, item: UnparsedVersion) -> bool: """Return whether or not the item is contained in this specifier. :param item: The item to check for. This is used for the ``in`` operator and behaves the same as :meth:`contains` with no ``prereleases`` argument passed. >>> "1.2.3" in SpecifierSet(">=1.0.0,!=1.0.1") True >>> Version("1.2.3") in SpecifierSet(">=1.0.0,!=1.0.1") True >>> "1.0.1" in SpecifierSet(">=1.0.0,!=1.0.1") False >>> "1.3.0a1" in SpecifierSet(">=1.0.0,!=1.0.1") False >>> "1.3.0a1" in SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True) True """ return self.contains(item) def contains( self, item: UnparsedVersion, prereleases: Optional[bool] = None, installed: Optional[bool] = None, ) -> bool: """Return whether or not the item is contained in this SpecifierSet. :param item: The item to check for, which can be a version string or a :class:`Version` instance. :param prereleases: Whether or not to match prereleases with this SpecifierSet. If set to ``None`` (the default), it uses :attr:`prereleases` to determine whether or not prereleases are allowed. >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.2.3") True >>> SpecifierSet(">=1.0.0,!=1.0.1").contains(Version("1.2.3")) True >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.0.1") False >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.3.0a1") False >>> SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True).contains("1.3.0a1") True >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.3.0a1", prereleases=True) True """ # Ensure that our item is a Version instance. if not isinstance(item, Version): item = Version(item) # Determine if we're forcing a prerelease or not, if we're not forcing # one for this particular filter call, then we'll use whatever the # SpecifierSet thinks for whether or not we should support prereleases. if prereleases is None: prereleases = self.prereleases # We can determine if we're going to allow pre-releases by looking to # see if any of the underlying items supports them. If none of them do # and this item is a pre-release then we do not allow it and we can # short circuit that here. # Note: This means that 1.0.dev1 would not be contained in something # like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0 if not prereleases and item.is_prerelease: return False if installed and item.is_prerelease: item = Version(item.base_version) # We simply dispatch to the underlying specs here to make sure that the # given version is contained within all of them. # Note: This use of all() here means that an empty set of specifiers # will always return True, this is an explicit design decision. return all(s.contains(item, prereleases=prereleases) for s in self._specs) def filter( self, iterable: Iterable[UnparsedVersionVar], prereleases: Optional[bool] = None ) -> Iterator[UnparsedVersionVar]: """Filter items in the given iterable, that match the specifiers in this set. :param iterable: An iterable that can contain version strings and :class:`Version` instances. The items in the iterable will be filtered according to the specifier. :param prereleases: Whether or not to allow prereleases in the returned iterator. If set to ``None`` (the default), it will be intelligently decide whether to allow prereleases or not (based on the :attr:`prereleases` attribute, and whether the only versions matching are prereleases). This method is smarter than just ``filter(SpecifierSet(...).contains, [...])`` because it implements the rule from :pep:`440` that a prerelease item SHOULD be accepted if no other versions match the given specifier. >>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.3", "1.5a1"])) ['1.3'] >>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.3", Version("1.4")])) ['1.3', <Version('1.4')>] >>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.5a1"])) [] >>> list(SpecifierSet(">=1.2.3").filter(["1.3", "1.5a1"], prereleases=True)) ['1.3', '1.5a1'] >>> list(SpecifierSet(">=1.2.3", prereleases=True).filter(["1.3", "1.5a1"])) ['1.3', '1.5a1'] An "empty" SpecifierSet will filter items based on the presence of prerelease versions in the set. >>> list(SpecifierSet("").filter(["1.3", "1.5a1"])) ['1.3'] >>> list(SpecifierSet("").filter(["1.5a1"])) ['1.5a1'] >>> list(SpecifierSet("", prereleases=True).filter(["1.3", "1.5a1"])) ['1.3', '1.5a1'] >>> list(SpecifierSet("").filter(["1.3", "1.5a1"], prereleases=True)) ['1.3', '1.5a1'] """ # Determine if we're forcing a prerelease or not, if we're not forcing # one for this particular filter call, then we'll use whatever the # SpecifierSet thinks for whether or not we should support prereleases. if prereleases is None: prereleases = self.prereleases # If we have any specifiers, then we want to wrap our iterable in the # filter method for each one, this will act as a logical AND amongst # each specifier. if self._specs: for spec in self._specs: iterable = spec.filter(iterable, prereleases=bool(prereleases)) return iter(iterable) # If we do not have any specifiers, then we need to have a rough filter # which will filter out any pre-releases, unless there are no final # releases. else: filtered: List[UnparsedVersionVar] = [] found_prereleases: List[UnparsedVersionVar] = [] for item in iterable: parsed_version = _coerce_version(item) # Store any item which is a pre-release for later unless we've # already found a final version or we are accepting prereleases if parsed_version.is_prerelease and not prereleases: if not filtered: found_prereleases.append(item) else: filtered.append(item) # If we've found no items except for pre-releases, then we'll go # ahead and use the pre-releases if not filtered and found_prereleases and prereleases is None: return iter(found_prereleases) return iter(filtered)
39,206
Python
37.857284
88
0.564837
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pkg_resources/_vendor/packaging/markers.py
# This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. import operator import os import platform import sys from typing import Any, Callable, Dict, List, Optional, Tuple, Union from ._parser import ( MarkerAtom, MarkerList, Op, Value, Variable, parse_marker as _parse_marker, ) from ._tokenizer import ParserSyntaxError from .specifiers import InvalidSpecifier, Specifier from .utils import canonicalize_name __all__ = [ "InvalidMarker", "UndefinedComparison", "UndefinedEnvironmentName", "Marker", "default_environment", ] Operator = Callable[[str, str], bool] class InvalidMarker(ValueError): """ An invalid marker was found, users should refer to PEP 508. """ class UndefinedComparison(ValueError): """ An invalid operation was attempted on a value that doesn't support it. """ class UndefinedEnvironmentName(ValueError): """ A name was attempted to be used that does not exist inside of the environment. """ def _normalize_extra_values(results: Any) -> Any: """ Normalize extra values. """ if isinstance(results[0], tuple): lhs, op, rhs = results[0] if isinstance(lhs, Variable) and lhs.value == "extra": normalized_extra = canonicalize_name(rhs.value) rhs = Value(normalized_extra) elif isinstance(rhs, Variable) and rhs.value == "extra": normalized_extra = canonicalize_name(lhs.value) lhs = Value(normalized_extra) results[0] = lhs, op, rhs return results def _format_marker( marker: Union[List[str], MarkerAtom, str], first: Optional[bool] = True ) -> str: assert isinstance(marker, (list, tuple, str)) # Sometimes we have a structure like [[...]] which is a single item list # where the single item is itself it's own list. In that case we want skip # the rest of this function so that we don't get extraneous () on the # outside. if ( isinstance(marker, list) and len(marker) == 1 and isinstance(marker[0], (list, tuple)) ): return _format_marker(marker[0]) if isinstance(marker, list): inner = (_format_marker(m, first=False) for m in marker) if first: return " ".join(inner) else: return "(" + " ".join(inner) + ")" elif isinstance(marker, tuple): return " ".join([m.serialize() for m in marker]) else: return marker _operators: Dict[str, Operator] = { "in": lambda lhs, rhs: lhs in rhs, "not in": lambda lhs, rhs: lhs not in rhs, "<": operator.lt, "<=": operator.le, "==": operator.eq, "!=": operator.ne, ">=": operator.ge, ">": operator.gt, } def _eval_op(lhs: str, op: Op, rhs: str) -> bool: try: spec = Specifier("".join([op.serialize(), rhs])) except InvalidSpecifier: pass else: return spec.contains(lhs, prereleases=True) oper: Optional[Operator] = _operators.get(op.serialize()) if oper is None: raise UndefinedComparison(f"Undefined {op!r} on {lhs!r} and {rhs!r}.") return oper(lhs, rhs) def _normalize(*values: str, key: str) -> Tuple[str, ...]: # PEP 685 – Comparison of extra names for optional distribution dependencies # https://peps.python.org/pep-0685/ # > When comparing extra names, tools MUST normalize the names being # > compared using the semantics outlined in PEP 503 for names if key == "extra": return tuple(canonicalize_name(v) for v in values) # other environment markers don't have such standards return values def _evaluate_markers(markers: MarkerList, environment: Dict[str, str]) -> bool: groups: List[List[bool]] = [[]] for marker in markers: assert isinstance(marker, (list, tuple, str)) if isinstance(marker, list): groups[-1].append(_evaluate_markers(marker, environment)) elif isinstance(marker, tuple): lhs, op, rhs = marker if isinstance(lhs, Variable): environment_key = lhs.value lhs_value = environment[environment_key] rhs_value = rhs.value else: lhs_value = lhs.value environment_key = rhs.value rhs_value = environment[environment_key] lhs_value, rhs_value = _normalize(lhs_value, rhs_value, key=environment_key) groups[-1].append(_eval_op(lhs_value, op, rhs_value)) else: assert marker in ["and", "or"] if marker == "or": groups.append([]) return any(all(item) for item in groups) def format_full_version(info: "sys._version_info") -> str: version = "{0.major}.{0.minor}.{0.micro}".format(info) kind = info.releaselevel if kind != "final": version += kind[0] + str(info.serial) return version def default_environment() -> Dict[str, str]: iver = format_full_version(sys.implementation.version) implementation_name = sys.implementation.name return { "implementation_name": implementation_name, "implementation_version": iver, "os_name": os.name, "platform_machine": platform.machine(), "platform_release": platform.release(), "platform_system": platform.system(), "platform_version": platform.version(), "python_full_version": platform.python_version(), "platform_python_implementation": platform.python_implementation(), "python_version": ".".join(platform.python_version_tuple()[:2]), "sys_platform": sys.platform, } class Marker: def __init__(self, marker: str) -> None: # Note: We create a Marker object without calling this constructor in # packaging.requirements.Requirement. If any additional logic is # added here, make sure to mirror/adapt Requirement. try: self._markers = _normalize_extra_values(_parse_marker(marker)) # The attribute `_markers` can be described in terms of a recursive type: # MarkerList = List[Union[Tuple[Node, ...], str, MarkerList]] # # For example, the following expression: # python_version > "3.6" or (python_version == "3.6" and os_name == "unix") # # is parsed into: # [ # (<Variable('python_version')>, <Op('>')>, <Value('3.6')>), # 'and', # [ # (<Variable('python_version')>, <Op('==')>, <Value('3.6')>), # 'or', # (<Variable('os_name')>, <Op('==')>, <Value('unix')>) # ] # ] except ParserSyntaxError as e: raise InvalidMarker(str(e)) from e def __str__(self) -> str: return _format_marker(self._markers) def __repr__(self) -> str: return f"<Marker('{self}')>" def __hash__(self) -> int: return hash((self.__class__.__name__, str(self))) def __eq__(self, other: Any) -> bool: if not isinstance(other, Marker): return NotImplemented return str(self) == str(other) def evaluate(self, environment: Optional[Dict[str, str]] = None) -> bool: """Evaluate a marker. Return the boolean from evaluating the given marker against the environment. environment is an optional argument to override all or part of the determined environment. The environment is determined from the current Python process. """ current_environment = default_environment() current_environment["extra"] = "" if environment is not None: current_environment.update(environment) # The API used to allow setting extra to None. We need to handle this # case for backwards compatibility. if current_environment["extra"] is None: current_environment["extra"] = "" return _evaluate_markers(self._markers, current_environment)
8,206
Python
31.438735
88
0.597977
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pkg_resources/_vendor/packaging/__init__.py
# This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. __title__ = "packaging" __summary__ = "Core utilities for Python packages" __uri__ = "https://github.com/pypa/packaging" __version__ = "23.1" __author__ = "Donald Stufft and individual contributors" __email__ = "[email protected]" __license__ = "BSD-2-Clause or Apache-2.0" __copyright__ = "2014-2019 %s" % __author__
501
Python
30.374998
79
0.682635
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pkg_resources/_vendor/packaging/version.py
# This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. """ .. testsetup:: from packaging.version import parse, Version """ import collections import itertools import re from typing import Any, Callable, Optional, SupportsInt, Tuple, Union from ._structures import Infinity, InfinityType, NegativeInfinity, NegativeInfinityType __all__ = ["VERSION_PATTERN", "parse", "Version", "InvalidVersion"] InfiniteTypes = Union[InfinityType, NegativeInfinityType] PrePostDevType = Union[InfiniteTypes, Tuple[str, int]] SubLocalType = Union[InfiniteTypes, int, str] LocalType = Union[ NegativeInfinityType, Tuple[ Union[ SubLocalType, Tuple[SubLocalType, str], Tuple[NegativeInfinityType, SubLocalType], ], ..., ], ] CmpKey = Tuple[ int, Tuple[int, ...], PrePostDevType, PrePostDevType, PrePostDevType, LocalType ] VersionComparisonMethod = Callable[[CmpKey, CmpKey], bool] _Version = collections.namedtuple( "_Version", ["epoch", "release", "dev", "pre", "post", "local"] ) def parse(version: str) -> "Version": """Parse the given version string. >>> parse('1.0.dev1') <Version('1.0.dev1')> :param version: The version string to parse. :raises InvalidVersion: When the version string is not a valid version. """ return Version(version) class InvalidVersion(ValueError): """Raised when a version string is not a valid version. >>> Version("invalid") Traceback (most recent call last): ... packaging.version.InvalidVersion: Invalid version: 'invalid' """ class _BaseVersion: _key: Tuple[Any, ...] def __hash__(self) -> int: return hash(self._key) # Please keep the duplicated `isinstance` check # in the six comparisons hereunder # unless you find a way to avoid adding overhead function calls. def __lt__(self, other: "_BaseVersion") -> bool: if not isinstance(other, _BaseVersion): return NotImplemented return self._key < other._key def __le__(self, other: "_BaseVersion") -> bool: if not isinstance(other, _BaseVersion): return NotImplemented return self._key <= other._key def __eq__(self, other: object) -> bool: if not isinstance(other, _BaseVersion): return NotImplemented return self._key == other._key def __ge__(self, other: "_BaseVersion") -> bool: if not isinstance(other, _BaseVersion): return NotImplemented return self._key >= other._key def __gt__(self, other: "_BaseVersion") -> bool: if not isinstance(other, _BaseVersion): return NotImplemented return self._key > other._key def __ne__(self, other: object) -> bool: if not isinstance(other, _BaseVersion): return NotImplemented return self._key != other._key # Deliberately not anchored to the start and end of the string, to make it # easier for 3rd party code to reuse _VERSION_PATTERN = r""" v? (?: (?:(?P<epoch>[0-9]+)!)? # epoch (?P<release>[0-9]+(?:\.[0-9]+)*) # release segment (?P<pre> # pre-release [-_\.]? (?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview)) [-_\.]? (?P<pre_n>[0-9]+)? )? (?P<post> # post release (?:-(?P<post_n1>[0-9]+)) | (?: [-_\.]? (?P<post_l>post|rev|r) [-_\.]? (?P<post_n2>[0-9]+)? ) )? (?P<dev> # dev release [-_\.]? (?P<dev_l>dev) [-_\.]? (?P<dev_n>[0-9]+)? )? ) (?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version """ VERSION_PATTERN = _VERSION_PATTERN """ A string containing the regular expression used to match a valid version. The pattern is not anchored at either end, and is intended for embedding in larger expressions (for example, matching a version number as part of a file name). The regular expression should be compiled with the ``re.VERBOSE`` and ``re.IGNORECASE`` flags set. :meta hide-value: """ class Version(_BaseVersion): """This class abstracts handling of a project's versions. A :class:`Version` instance is comparison aware and can be compared and sorted using the standard Python interfaces. >>> v1 = Version("1.0a5") >>> v2 = Version("1.0") >>> v1 <Version('1.0a5')> >>> v2 <Version('1.0')> >>> v1 < v2 True >>> v1 == v2 False >>> v1 > v2 False >>> v1 >= v2 False >>> v1 <= v2 True """ _regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE) _key: CmpKey def __init__(self, version: str) -> None: """Initialize a Version object. :param version: The string representation of a version which will be parsed and normalized before use. :raises InvalidVersion: If the ``version`` does not conform to PEP 440 in any way then this exception will be raised. """ # Validate the version and parse it into pieces match = self._regex.search(version) if not match: raise InvalidVersion(f"Invalid version: '{version}'") # Store the parsed out pieces of the version self._version = _Version( epoch=int(match.group("epoch")) if match.group("epoch") else 0, release=tuple(int(i) for i in match.group("release").split(".")), pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")), post=_parse_letter_version( match.group("post_l"), match.group("post_n1") or match.group("post_n2") ), dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")), local=_parse_local_version(match.group("local")), ) # Generate a key which will be used for sorting self._key = _cmpkey( self._version.epoch, self._version.release, self._version.pre, self._version.post, self._version.dev, self._version.local, ) def __repr__(self) -> str: """A representation of the Version that shows all internal state. >>> Version('1.0.0') <Version('1.0.0')> """ return f"<Version('{self}')>" def __str__(self) -> str: """A string representation of the version that can be rounded-tripped. >>> str(Version("1.0a5")) '1.0a5' """ parts = [] # Epoch if self.epoch != 0: parts.append(f"{self.epoch}!") # Release segment parts.append(".".join(str(x) for x in self.release)) # Pre-release if self.pre is not None: parts.append("".join(str(x) for x in self.pre)) # Post-release if self.post is not None: parts.append(f".post{self.post}") # Development release if self.dev is not None: parts.append(f".dev{self.dev}") # Local version segment if self.local is not None: parts.append(f"+{self.local}") return "".join(parts) @property def epoch(self) -> int: """The epoch of the version. >>> Version("2.0.0").epoch 0 >>> Version("1!2.0.0").epoch 1 """ _epoch: int = self._version.epoch return _epoch @property def release(self) -> Tuple[int, ...]: """The components of the "release" segment of the version. >>> Version("1.2.3").release (1, 2, 3) >>> Version("2.0.0").release (2, 0, 0) >>> Version("1!2.0.0.post0").release (2, 0, 0) Includes trailing zeroes but not the epoch or any pre-release / development / post-release suffixes. """ _release: Tuple[int, ...] = self._version.release return _release @property def pre(self) -> Optional[Tuple[str, int]]: """The pre-release segment of the version. >>> print(Version("1.2.3").pre) None >>> Version("1.2.3a1").pre ('a', 1) >>> Version("1.2.3b1").pre ('b', 1) >>> Version("1.2.3rc1").pre ('rc', 1) """ _pre: Optional[Tuple[str, int]] = self._version.pre return _pre @property def post(self) -> Optional[int]: """The post-release number of the version. >>> print(Version("1.2.3").post) None >>> Version("1.2.3.post1").post 1 """ return self._version.post[1] if self._version.post else None @property def dev(self) -> Optional[int]: """The development number of the version. >>> print(Version("1.2.3").dev) None >>> Version("1.2.3.dev1").dev 1 """ return self._version.dev[1] if self._version.dev else None @property def local(self) -> Optional[str]: """The local version segment of the version. >>> print(Version("1.2.3").local) None >>> Version("1.2.3+abc").local 'abc' """ if self._version.local: return ".".join(str(x) for x in self._version.local) else: return None @property def public(self) -> str: """The public portion of the version. >>> Version("1.2.3").public '1.2.3' >>> Version("1.2.3+abc").public '1.2.3' >>> Version("1.2.3+abc.dev1").public '1.2.3' """ return str(self).split("+", 1)[0] @property def base_version(self) -> str: """The "base version" of the version. >>> Version("1.2.3").base_version '1.2.3' >>> Version("1.2.3+abc").base_version '1.2.3' >>> Version("1!1.2.3+abc.dev1").base_version '1!1.2.3' The "base version" is the public version of the project without any pre or post release markers. """ parts = [] # Epoch if self.epoch != 0: parts.append(f"{self.epoch}!") # Release segment parts.append(".".join(str(x) for x in self.release)) return "".join(parts) @property def is_prerelease(self) -> bool: """Whether this version is a pre-release. >>> Version("1.2.3").is_prerelease False >>> Version("1.2.3a1").is_prerelease True >>> Version("1.2.3b1").is_prerelease True >>> Version("1.2.3rc1").is_prerelease True >>> Version("1.2.3dev1").is_prerelease True """ return self.dev is not None or self.pre is not None @property def is_postrelease(self) -> bool: """Whether this version is a post-release. >>> Version("1.2.3").is_postrelease False >>> Version("1.2.3.post1").is_postrelease True """ return self.post is not None @property def is_devrelease(self) -> bool: """Whether this version is a development release. >>> Version("1.2.3").is_devrelease False >>> Version("1.2.3.dev1").is_devrelease True """ return self.dev is not None @property def major(self) -> int: """The first item of :attr:`release` or ``0`` if unavailable. >>> Version("1.2.3").major 1 """ return self.release[0] if len(self.release) >= 1 else 0 @property def minor(self) -> int: """The second item of :attr:`release` or ``0`` if unavailable. >>> Version("1.2.3").minor 2 >>> Version("1").minor 0 """ return self.release[1] if len(self.release) >= 2 else 0 @property def micro(self) -> int: """The third item of :attr:`release` or ``0`` if unavailable. >>> Version("1.2.3").micro 3 >>> Version("1").micro 0 """ return self.release[2] if len(self.release) >= 3 else 0 def _parse_letter_version( letter: str, number: Union[str, bytes, SupportsInt] ) -> Optional[Tuple[str, int]]: if letter: # We consider there to be an implicit 0 in a pre-release if there is # not a numeral associated with it. if number is None: number = 0 # We normalize any letters to their lower case form letter = letter.lower() # We consider some words to be alternate spellings of other words and # in those cases we want to normalize the spellings to our preferred # spelling. if letter == "alpha": letter = "a" elif letter == "beta": letter = "b" elif letter in ["c", "pre", "preview"]: letter = "rc" elif letter in ["rev", "r"]: letter = "post" return letter, int(number) if not letter and number: # We assume if we are given a number, but we are not given a letter # then this is using the implicit post release syntax (e.g. 1.0-1) letter = "post" return letter, int(number) return None _local_version_separators = re.compile(r"[\._-]") def _parse_local_version(local: str) -> Optional[LocalType]: """ Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve"). """ if local is not None: return tuple( part.lower() if not part.isdigit() else int(part) for part in _local_version_separators.split(local) ) return None def _cmpkey( epoch: int, release: Tuple[int, ...], pre: Optional[Tuple[str, int]], post: Optional[Tuple[str, int]], dev: Optional[Tuple[str, int]], local: Optional[Tuple[SubLocalType]], ) -> CmpKey: # When we compare a release version, we want to compare it with all of the # trailing zeros removed. So we'll use a reverse the list, drop all the now # leading zeros until we come to something non zero, then take the rest # re-reverse it back into the correct order and make it a tuple and use # that for our sorting key. _release = tuple( reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release)))) ) # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0. # We'll do this by abusing the pre segment, but we _only_ want to do this # if there is not a pre or a post segment. If we have one of those then # the normal sorting rules will handle this case correctly. if pre is None and post is None and dev is not None: _pre: PrePostDevType = NegativeInfinity # Versions without a pre-release (except as noted above) should sort after # those with one. elif pre is None: _pre = Infinity else: _pre = pre # Versions without a post segment should sort before those with one. if post is None: _post: PrePostDevType = NegativeInfinity else: _post = post # Versions without a development segment should sort after those with one. if dev is None: _dev: PrePostDevType = Infinity else: _dev = dev if local is None: # Versions without a local segment should sort before those with one. _local: LocalType = NegativeInfinity else: # Versions with a local segment need that segment parsed to implement # the sorting rules in PEP440. # - Alpha numeric segments sort before numeric segments # - Alpha numeric segments sort lexicographically # - Numeric segments sort numerically # - Shorter versions sort before longer versions when the prefixes # match exactly _local = tuple( (i, "") if isinstance(i, int) else (NegativeInfinity, i) for i in local ) return epoch, _release, _pre, _post, _dev, _local
16,326
Python
27.897345
88
0.554086
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pkg_resources/_vendor/packaging/utils.py
# This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. import re from typing import FrozenSet, NewType, Tuple, Union, cast from .tags import Tag, parse_tag from .version import InvalidVersion, Version BuildTag = Union[Tuple[()], Tuple[int, str]] NormalizedName = NewType("NormalizedName", str) class InvalidWheelFilename(ValueError): """ An invalid wheel filename was found, users should refer to PEP 427. """ class InvalidSdistFilename(ValueError): """ An invalid sdist filename was found, users should refer to the packaging user guide. """ _canonicalize_regex = re.compile(r"[-_.]+") # PEP 427: The build number must start with a digit. _build_tag_regex = re.compile(r"(\d+)(.*)") def canonicalize_name(name: str) -> NormalizedName: # This is taken from PEP 503. value = _canonicalize_regex.sub("-", name).lower() return cast(NormalizedName, value) def canonicalize_version( version: Union[Version, str], *, strip_trailing_zero: bool = True ) -> str: """ This is very similar to Version.__str__, but has one subtle difference with the way it handles the release segment. """ if isinstance(version, str): try: parsed = Version(version) except InvalidVersion: # Legacy versions cannot be normalized return version else: parsed = version parts = [] # Epoch if parsed.epoch != 0: parts.append(f"{parsed.epoch}!") # Release segment release_segment = ".".join(str(x) for x in parsed.release) if strip_trailing_zero: # NB: This strips trailing '.0's to normalize release_segment = re.sub(r"(\.0)+$", "", release_segment) parts.append(release_segment) # Pre-release if parsed.pre is not None: parts.append("".join(str(x) for x in parsed.pre)) # Post-release if parsed.post is not None: parts.append(f".post{parsed.post}") # Development release if parsed.dev is not None: parts.append(f".dev{parsed.dev}") # Local version segment if parsed.local is not None: parts.append(f"+{parsed.local}") return "".join(parts) def parse_wheel_filename( filename: str, ) -> Tuple[NormalizedName, Version, BuildTag, FrozenSet[Tag]]: if not filename.endswith(".whl"): raise InvalidWheelFilename( f"Invalid wheel filename (extension must be '.whl'): {filename}" ) filename = filename[:-4] dashes = filename.count("-") if dashes not in (4, 5): raise InvalidWheelFilename( f"Invalid wheel filename (wrong number of parts): {filename}" ) parts = filename.split("-", dashes - 2) name_part = parts[0] # See PEP 427 for the rules on escaping the project name if "__" in name_part or re.match(r"^[\w\d._]*$", name_part, re.UNICODE) is None: raise InvalidWheelFilename(f"Invalid project name: {filename}") name = canonicalize_name(name_part) version = Version(parts[1]) if dashes == 5: build_part = parts[2] build_match = _build_tag_regex.match(build_part) if build_match is None: raise InvalidWheelFilename( f"Invalid build number: {build_part} in '{filename}'" ) build = cast(BuildTag, (int(build_match.group(1)), build_match.group(2))) else: build = () tags = parse_tag(parts[-1]) return (name, version, build, tags) def parse_sdist_filename(filename: str) -> Tuple[NormalizedName, Version]: if filename.endswith(".tar.gz"): file_stem = filename[: -len(".tar.gz")] elif filename.endswith(".zip"): file_stem = filename[: -len(".zip")] else: raise InvalidSdistFilename( f"Invalid sdist filename (extension must be '.tar.gz' or '.zip'):" f" {filename}" ) # We are requiring a PEP 440 version, which cannot contain dashes, # so we split on the last dash. name_part, sep, version_part = file_stem.rpartition("-") if not sep: raise InvalidSdistFilename(f"Invalid sdist filename: {filename}") name = canonicalize_name(name_part) version = Version(version_part) return (name, version)
4,355
Python
29.676056
88
0.631228
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pkg_resources/_vendor/packaging/_manylinux.py
import collections import contextlib import functools import os import re import sys import warnings from typing import Dict, Generator, Iterator, NamedTuple, Optional, Tuple from ._elffile import EIClass, EIData, ELFFile, EMachine EF_ARM_ABIMASK = 0xFF000000 EF_ARM_ABI_VER5 = 0x05000000 EF_ARM_ABI_FLOAT_HARD = 0x00000400 # `os.PathLike` not a generic type until Python 3.9, so sticking with `str` # as the type for `path` until then. @contextlib.contextmanager def _parse_elf(path: str) -> Generator[Optional[ELFFile], None, None]: try: with open(path, "rb") as f: yield ELFFile(f) except (OSError, TypeError, ValueError): yield None def _is_linux_armhf(executable: str) -> bool: # hard-float ABI can be detected from the ELF header of the running # process # https://static.docs.arm.com/ihi0044/g/aaelf32.pdf with _parse_elf(executable) as f: return ( f is not None and f.capacity == EIClass.C32 and f.encoding == EIData.Lsb and f.machine == EMachine.Arm and f.flags & EF_ARM_ABIMASK == EF_ARM_ABI_VER5 and f.flags & EF_ARM_ABI_FLOAT_HARD == EF_ARM_ABI_FLOAT_HARD ) def _is_linux_i686(executable: str) -> bool: with _parse_elf(executable) as f: return ( f is not None and f.capacity == EIClass.C32 and f.encoding == EIData.Lsb and f.machine == EMachine.I386 ) def _have_compatible_abi(executable: str, arch: str) -> bool: if arch == "armv7l": return _is_linux_armhf(executable) if arch == "i686": return _is_linux_i686(executable) return arch in {"x86_64", "aarch64", "ppc64", "ppc64le", "s390x"} # If glibc ever changes its major version, we need to know what the last # minor version was, so we can build the complete list of all versions. # For now, guess what the highest minor version might be, assume it will # be 50 for testing. Once this actually happens, update the dictionary # with the actual value. _LAST_GLIBC_MINOR: Dict[int, int] = collections.defaultdict(lambda: 50) class _GLibCVersion(NamedTuple): major: int minor: int def _glibc_version_string_confstr() -> Optional[str]: """ Primary implementation of glibc_version_string using os.confstr. """ # os.confstr is quite a bit faster than ctypes.DLL. It's also less likely # to be broken or missing. This strategy is used in the standard library # platform module. # https://github.com/python/cpython/blob/fcf1d003bf4f0100c/Lib/platform.py#L175-L183 try: # Should be a string like "glibc 2.17". version_string: str = getattr(os, "confstr")("CS_GNU_LIBC_VERSION") assert version_string is not None _, version = version_string.rsplit() except (AssertionError, AttributeError, OSError, ValueError): # os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)... return None return version def _glibc_version_string_ctypes() -> Optional[str]: """ Fallback implementation of glibc_version_string using ctypes. """ try: import ctypes except ImportError: return None # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen # manpage says, "If filename is NULL, then the returned handle is for the # main program". This way we can let the linker do the work to figure out # which libc our process is actually using. # # We must also handle the special case where the executable is not a # dynamically linked executable. This can occur when using musl libc, # for example. In this situation, dlopen() will error, leading to an # OSError. Interestingly, at least in the case of musl, there is no # errno set on the OSError. The single string argument used to construct # OSError comes from libc itself and is therefore not portable to # hard code here. In any case, failure to call dlopen() means we # can proceed, so we bail on our attempt. try: process_namespace = ctypes.CDLL(None) except OSError: return None try: gnu_get_libc_version = process_namespace.gnu_get_libc_version except AttributeError: # Symbol doesn't exist -> therefore, we are not linked to # glibc. return None # Call gnu_get_libc_version, which returns a string like "2.5" gnu_get_libc_version.restype = ctypes.c_char_p version_str: str = gnu_get_libc_version() # py2 / py3 compatibility: if not isinstance(version_str, str): version_str = version_str.decode("ascii") return version_str def _glibc_version_string() -> Optional[str]: """Returns glibc version string, or None if not using glibc.""" return _glibc_version_string_confstr() or _glibc_version_string_ctypes() def _parse_glibc_version(version_str: str) -> Tuple[int, int]: """Parse glibc version. We use a regexp instead of str.split because we want to discard any random junk that might come after the minor version -- this might happen in patched/forked versions of glibc (e.g. Linaro's version of glibc uses version strings like "2.20-2014.11"). See gh-3588. """ m = re.match(r"(?P<major>[0-9]+)\.(?P<minor>[0-9]+)", version_str) if not m: warnings.warn( f"Expected glibc version with 2 components major.minor," f" got: {version_str}", RuntimeWarning, ) return -1, -1 return int(m.group("major")), int(m.group("minor")) @functools.lru_cache() def _get_glibc_version() -> Tuple[int, int]: version_str = _glibc_version_string() if version_str is None: return (-1, -1) return _parse_glibc_version(version_str) # From PEP 513, PEP 600 def _is_compatible(name: str, arch: str, version: _GLibCVersion) -> bool: sys_glibc = _get_glibc_version() if sys_glibc < version: return False # Check for presence of _manylinux module. try: import _manylinux # noqa except ImportError: return True if hasattr(_manylinux, "manylinux_compatible"): result = _manylinux.manylinux_compatible(version[0], version[1], arch) if result is not None: return bool(result) return True if version == _GLibCVersion(2, 5): if hasattr(_manylinux, "manylinux1_compatible"): return bool(_manylinux.manylinux1_compatible) if version == _GLibCVersion(2, 12): if hasattr(_manylinux, "manylinux2010_compatible"): return bool(_manylinux.manylinux2010_compatible) if version == _GLibCVersion(2, 17): if hasattr(_manylinux, "manylinux2014_compatible"): return bool(_manylinux.manylinux2014_compatible) return True _LEGACY_MANYLINUX_MAP = { # CentOS 7 w/ glibc 2.17 (PEP 599) (2, 17): "manylinux2014", # CentOS 6 w/ glibc 2.12 (PEP 571) (2, 12): "manylinux2010", # CentOS 5 w/ glibc 2.5 (PEP 513) (2, 5): "manylinux1", } def platform_tags(linux: str, arch: str) -> Iterator[str]: if not _have_compatible_abi(sys.executable, arch): return # Oldest glibc to be supported regardless of architecture is (2, 17). too_old_glibc2 = _GLibCVersion(2, 16) if arch in {"x86_64", "i686"}: # On x86/i686 also oldest glibc to be supported is (2, 5). too_old_glibc2 = _GLibCVersion(2, 4) current_glibc = _GLibCVersion(*_get_glibc_version()) glibc_max_list = [current_glibc] # We can assume compatibility across glibc major versions. # https://sourceware.org/bugzilla/show_bug.cgi?id=24636 # # Build a list of maximum glibc versions so that we can # output the canonical list of all glibc from current_glibc # down to too_old_glibc2, including all intermediary versions. for glibc_major in range(current_glibc.major - 1, 1, -1): glibc_minor = _LAST_GLIBC_MINOR[glibc_major] glibc_max_list.append(_GLibCVersion(glibc_major, glibc_minor)) for glibc_max in glibc_max_list: if glibc_max.major == too_old_glibc2.major: min_minor = too_old_glibc2.minor else: # For other glibc major versions oldest supported is (x, 0). min_minor = -1 for glibc_minor in range(glibc_max.minor, min_minor, -1): glibc_version = _GLibCVersion(glibc_max.major, glibc_minor) tag = "manylinux_{}_{}".format(*glibc_version) if _is_compatible(tag, arch, glibc_version): yield linux.replace("linux", tag) # Handle the legacy manylinux1, manylinux2010, manylinux2014 tags. if glibc_version in _LEGACY_MANYLINUX_MAP: legacy_tag = _LEGACY_MANYLINUX_MAP[glibc_version] if _is_compatible(legacy_tag, arch, glibc_version): yield linux.replace("linux", legacy_tag)
8,926
Python
36.041494
88
0.647995
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pkg_resources/_vendor/packaging/_musllinux.py
"""PEP 656 support. This module implements logic to detect if the currently running Python is linked against musl, and what musl version is used. """ import functools import re import subprocess import sys from typing import Iterator, NamedTuple, Optional from ._elffile import ELFFile class _MuslVersion(NamedTuple): major: int minor: int def _parse_musl_version(output: str) -> Optional[_MuslVersion]: lines = [n for n in (n.strip() for n in output.splitlines()) if n] if len(lines) < 2 or lines[0][:4] != "musl": return None m = re.match(r"Version (\d+)\.(\d+)", lines[1]) if not m: return None return _MuslVersion(major=int(m.group(1)), minor=int(m.group(2))) @functools.lru_cache() def _get_musl_version(executable: str) -> Optional[_MuslVersion]: """Detect currently-running musl runtime version. This is done by checking the specified executable's dynamic linking information, and invoking the loader to parse its output for a version string. If the loader is musl, the output would be something like:: musl libc (x86_64) Version 1.2.2 Dynamic Program Loader """ try: with open(executable, "rb") as f: ld = ELFFile(f).interpreter except (OSError, TypeError, ValueError): return None if ld is None or "musl" not in ld: return None proc = subprocess.run([ld], stderr=subprocess.PIPE, universal_newlines=True) return _parse_musl_version(proc.stderr) def platform_tags(arch: str) -> Iterator[str]: """Generate musllinux tags compatible to the current platform. :param arch: Should be the part of platform tag after the ``linux_`` prefix, e.g. ``x86_64``. The ``linux_`` prefix is assumed as a prerequisite for the current platform to be musllinux-compatible. :returns: An iterator of compatible musllinux tags. """ sys_musl = _get_musl_version(sys.executable) if sys_musl is None: # Python not dynamically linked against musl. return for minor in range(sys_musl.minor, -1, -1): yield f"musllinux_{sys_musl.major}_{minor}_{arch}" if __name__ == "__main__": # pragma: no cover import sysconfig plat = sysconfig.get_platform() assert plat.startswith("linux-"), "not linux" print("plat:", plat) print("musl:", _get_musl_version(sys.executable)) print("tags:", end=" ") for t in platform_tags(re.sub(r"[.-]", "_", plat.split("-", 1)[-1])): print(t, end="\n ")
2,524
Python
30.172839
80
0.650555
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pkg_resources/_vendor/packaging/tags.py
# This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. import logging import platform import subprocess import sys import sysconfig from importlib.machinery import EXTENSION_SUFFIXES from typing import ( Dict, FrozenSet, Iterable, Iterator, List, Optional, Sequence, Tuple, Union, cast, ) from . import _manylinux, _musllinux logger = logging.getLogger(__name__) PythonVersion = Sequence[int] MacVersion = Tuple[int, int] INTERPRETER_SHORT_NAMES: Dict[str, str] = { "python": "py", # Generic. "cpython": "cp", "pypy": "pp", "ironpython": "ip", "jython": "jy", } _32_BIT_INTERPRETER = sys.maxsize <= 2**32 class Tag: """ A representation of the tag triple for a wheel. Instances are considered immutable and thus are hashable. Equality checking is also supported. """ __slots__ = ["_interpreter", "_abi", "_platform", "_hash"] def __init__(self, interpreter: str, abi: str, platform: str) -> None: self._interpreter = interpreter.lower() self._abi = abi.lower() self._platform = platform.lower() # The __hash__ of every single element in a Set[Tag] will be evaluated each time # that a set calls its `.disjoint()` method, which may be called hundreds of # times when scanning a page of links for packages with tags matching that # Set[Tag]. Pre-computing the value here produces significant speedups for # downstream consumers. self._hash = hash((self._interpreter, self._abi, self._platform)) @property def interpreter(self) -> str: return self._interpreter @property def abi(self) -> str: return self._abi @property def platform(self) -> str: return self._platform def __eq__(self, other: object) -> bool: if not isinstance(other, Tag): return NotImplemented return ( (self._hash == other._hash) # Short-circuit ASAP for perf reasons. and (self._platform == other._platform) and (self._abi == other._abi) and (self._interpreter == other._interpreter) ) def __hash__(self) -> int: return self._hash def __str__(self) -> str: return f"{self._interpreter}-{self._abi}-{self._platform}" def __repr__(self) -> str: return f"<{self} @ {id(self)}>" def parse_tag(tag: str) -> FrozenSet[Tag]: """ Parses the provided tag (e.g. `py3-none-any`) into a frozenset of Tag instances. Returning a set is required due to the possibility that the tag is a compressed tag set. """ tags = set() interpreters, abis, platforms = tag.split("-") for interpreter in interpreters.split("."): for abi in abis.split("."): for platform_ in platforms.split("."): tags.add(Tag(interpreter, abi, platform_)) return frozenset(tags) def _get_config_var(name: str, warn: bool = False) -> Union[int, str, None]: value: Union[int, str, None] = sysconfig.get_config_var(name) if value is None and warn: logger.debug( "Config variable '%s' is unset, Python ABI tag may be incorrect", name ) return value def _normalize_string(string: str) -> str: return string.replace(".", "_").replace("-", "_").replace(" ", "_") def _abi3_applies(python_version: PythonVersion) -> bool: """ Determine if the Python version supports abi3. PEP 384 was first implemented in Python 3.2. """ return len(python_version) > 1 and tuple(python_version) >= (3, 2) def _cpython_abis(py_version: PythonVersion, warn: bool = False) -> List[str]: py_version = tuple(py_version) # To allow for version comparison. abis = [] version = _version_nodot(py_version[:2]) debug = pymalloc = ucs4 = "" with_debug = _get_config_var("Py_DEBUG", warn) has_refcount = hasattr(sys, "gettotalrefcount") # Windows doesn't set Py_DEBUG, so checking for support of debug-compiled # extension modules is the best option. # https://github.com/pypa/pip/issues/3383#issuecomment-173267692 has_ext = "_d.pyd" in EXTENSION_SUFFIXES if with_debug or (with_debug is None and (has_refcount or has_ext)): debug = "d" if py_version < (3, 8): with_pymalloc = _get_config_var("WITH_PYMALLOC", warn) if with_pymalloc or with_pymalloc is None: pymalloc = "m" if py_version < (3, 3): unicode_size = _get_config_var("Py_UNICODE_SIZE", warn) if unicode_size == 4 or ( unicode_size is None and sys.maxunicode == 0x10FFFF ): ucs4 = "u" elif debug: # Debug builds can also load "normal" extension modules. # We can also assume no UCS-4 or pymalloc requirement. abis.append(f"cp{version}") abis.insert( 0, "cp{version}{debug}{pymalloc}{ucs4}".format( version=version, debug=debug, pymalloc=pymalloc, ucs4=ucs4 ), ) return abis def cpython_tags( python_version: Optional[PythonVersion] = None, abis: Optional[Iterable[str]] = None, platforms: Optional[Iterable[str]] = None, *, warn: bool = False, ) -> Iterator[Tag]: """ Yields the tags for a CPython interpreter. The tags consist of: - cp<python_version>-<abi>-<platform> - cp<python_version>-abi3-<platform> - cp<python_version>-none-<platform> - cp<less than python_version>-abi3-<platform> # Older Python versions down to 3.2. If python_version only specifies a major version then user-provided ABIs and the 'none' ABItag will be used. If 'abi3' or 'none' are specified in 'abis' then they will be yielded at their normal position and not at the beginning. """ if not python_version: python_version = sys.version_info[:2] interpreter = f"cp{_version_nodot(python_version[:2])}" if abis is None: if len(python_version) > 1: abis = _cpython_abis(python_version, warn) else: abis = [] abis = list(abis) # 'abi3' and 'none' are explicitly handled later. for explicit_abi in ("abi3", "none"): try: abis.remove(explicit_abi) except ValueError: pass platforms = list(platforms or platform_tags()) for abi in abis: for platform_ in platforms: yield Tag(interpreter, abi, platform_) if _abi3_applies(python_version): yield from (Tag(interpreter, "abi3", platform_) for platform_ in platforms) yield from (Tag(interpreter, "none", platform_) for platform_ in platforms) if _abi3_applies(python_version): for minor_version in range(python_version[1] - 1, 1, -1): for platform_ in platforms: interpreter = "cp{version}".format( version=_version_nodot((python_version[0], minor_version)) ) yield Tag(interpreter, "abi3", platform_) def _generic_abi() -> List[str]: """ Return the ABI tag based on EXT_SUFFIX. """ # The following are examples of `EXT_SUFFIX`. # We want to keep the parts which are related to the ABI and remove the # parts which are related to the platform: # - linux: '.cpython-310-x86_64-linux-gnu.so' => cp310 # - mac: '.cpython-310-darwin.so' => cp310 # - win: '.cp310-win_amd64.pyd' => cp310 # - win: '.pyd' => cp37 (uses _cpython_abis()) # - pypy: '.pypy38-pp73-x86_64-linux-gnu.so' => pypy38_pp73 # - graalpy: '.graalpy-38-native-x86_64-darwin.dylib' # => graalpy_38_native ext_suffix = _get_config_var("EXT_SUFFIX", warn=True) if not isinstance(ext_suffix, str) or ext_suffix[0] != ".": raise SystemError("invalid sysconfig.get_config_var('EXT_SUFFIX')") parts = ext_suffix.split(".") if len(parts) < 3: # CPython3.7 and earlier uses ".pyd" on Windows. return _cpython_abis(sys.version_info[:2]) soabi = parts[1] if soabi.startswith("cpython"): # non-windows abi = "cp" + soabi.split("-")[1] elif soabi.startswith("cp"): # windows abi = soabi.split("-")[0] elif soabi.startswith("pypy"): abi = "-".join(soabi.split("-")[:2]) elif soabi.startswith("graalpy"): abi = "-".join(soabi.split("-")[:3]) elif soabi: # pyston, ironpython, others? abi = soabi else: return [] return [_normalize_string(abi)] def generic_tags( interpreter: Optional[str] = None, abis: Optional[Iterable[str]] = None, platforms: Optional[Iterable[str]] = None, *, warn: bool = False, ) -> Iterator[Tag]: """ Yields the tags for a generic interpreter. The tags consist of: - <interpreter>-<abi>-<platform> The "none" ABI will be added if it was not explicitly provided. """ if not interpreter: interp_name = interpreter_name() interp_version = interpreter_version(warn=warn) interpreter = "".join([interp_name, interp_version]) if abis is None: abis = _generic_abi() else: abis = list(abis) platforms = list(platforms or platform_tags()) if "none" not in abis: abis.append("none") for abi in abis: for platform_ in platforms: yield Tag(interpreter, abi, platform_) def _py_interpreter_range(py_version: PythonVersion) -> Iterator[str]: """ Yields Python versions in descending order. After the latest version, the major-only version will be yielded, and then all previous versions of that major version. """ if len(py_version) > 1: yield f"py{_version_nodot(py_version[:2])}" yield f"py{py_version[0]}" if len(py_version) > 1: for minor in range(py_version[1] - 1, -1, -1): yield f"py{_version_nodot((py_version[0], minor))}" def compatible_tags( python_version: Optional[PythonVersion] = None, interpreter: Optional[str] = None, platforms: Optional[Iterable[str]] = None, ) -> Iterator[Tag]: """ Yields the sequence of tags that are compatible with a specific version of Python. The tags consist of: - py*-none-<platform> - <interpreter>-none-any # ... if `interpreter` is provided. - py*-none-any """ if not python_version: python_version = sys.version_info[:2] platforms = list(platforms or platform_tags()) for version in _py_interpreter_range(python_version): for platform_ in platforms: yield Tag(version, "none", platform_) if interpreter: yield Tag(interpreter, "none", "any") for version in _py_interpreter_range(python_version): yield Tag(version, "none", "any") def _mac_arch(arch: str, is_32bit: bool = _32_BIT_INTERPRETER) -> str: if not is_32bit: return arch if arch.startswith("ppc"): return "ppc" return "i386" def _mac_binary_formats(version: MacVersion, cpu_arch: str) -> List[str]: formats = [cpu_arch] if cpu_arch == "x86_64": if version < (10, 4): return [] formats.extend(["intel", "fat64", "fat32"]) elif cpu_arch == "i386": if version < (10, 4): return [] formats.extend(["intel", "fat32", "fat"]) elif cpu_arch == "ppc64": # TODO: Need to care about 32-bit PPC for ppc64 through 10.2? if version > (10, 5) or version < (10, 4): return [] formats.append("fat64") elif cpu_arch == "ppc": if version > (10, 6): return [] formats.extend(["fat32", "fat"]) if cpu_arch in {"arm64", "x86_64"}: formats.append("universal2") if cpu_arch in {"x86_64", "i386", "ppc64", "ppc", "intel"}: formats.append("universal") return formats def mac_platforms( version: Optional[MacVersion] = None, arch: Optional[str] = None ) -> Iterator[str]: """ Yields the platform tags for a macOS system. The `version` parameter is a two-item tuple specifying the macOS version to generate platform tags for. The `arch` parameter is the CPU architecture to generate platform tags for. Both parameters default to the appropriate value for the current system. """ version_str, _, cpu_arch = platform.mac_ver() if version is None: version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2]))) if version == (10, 16): # When built against an older macOS SDK, Python will report macOS 10.16 # instead of the real version. version_str = subprocess.run( [ sys.executable, "-sS", "-c", "import platform; print(platform.mac_ver()[0])", ], check=True, env={"SYSTEM_VERSION_COMPAT": "0"}, stdout=subprocess.PIPE, universal_newlines=True, ).stdout version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2]))) else: version = version if arch is None: arch = _mac_arch(cpu_arch) else: arch = arch if (10, 0) <= version and version < (11, 0): # Prior to Mac OS 11, each yearly release of Mac OS bumped the # "minor" version number. The major version was always 10. for minor_version in range(version[1], -1, -1): compat_version = 10, minor_version binary_formats = _mac_binary_formats(compat_version, arch) for binary_format in binary_formats: yield "macosx_{major}_{minor}_{binary_format}".format( major=10, minor=minor_version, binary_format=binary_format ) if version >= (11, 0): # Starting with Mac OS 11, each yearly release bumps the major version # number. The minor versions are now the midyear updates. for major_version in range(version[0], 10, -1): compat_version = major_version, 0 binary_formats = _mac_binary_formats(compat_version, arch) for binary_format in binary_formats: yield "macosx_{major}_{minor}_{binary_format}".format( major=major_version, minor=0, binary_format=binary_format ) if version >= (11, 0): # Mac OS 11 on x86_64 is compatible with binaries from previous releases. # Arm64 support was introduced in 11.0, so no Arm binaries from previous # releases exist. # # However, the "universal2" binary format can have a # macOS version earlier than 11.0 when the x86_64 part of the binary supports # that version of macOS. if arch == "x86_64": for minor_version in range(16, 3, -1): compat_version = 10, minor_version binary_formats = _mac_binary_formats(compat_version, arch) for binary_format in binary_formats: yield "macosx_{major}_{minor}_{binary_format}".format( major=compat_version[0], minor=compat_version[1], binary_format=binary_format, ) else: for minor_version in range(16, 3, -1): compat_version = 10, minor_version binary_format = "universal2" yield "macosx_{major}_{minor}_{binary_format}".format( major=compat_version[0], minor=compat_version[1], binary_format=binary_format, ) def _linux_platforms(is_32bit: bool = _32_BIT_INTERPRETER) -> Iterator[str]: linux = _normalize_string(sysconfig.get_platform()) if is_32bit: if linux == "linux_x86_64": linux = "linux_i686" elif linux == "linux_aarch64": linux = "linux_armv7l" _, arch = linux.split("_", 1) yield from _manylinux.platform_tags(linux, arch) yield from _musllinux.platform_tags(arch) yield linux def _generic_platforms() -> Iterator[str]: yield _normalize_string(sysconfig.get_platform()) def platform_tags() -> Iterator[str]: """ Provides the platform tags for this installation. """ if platform.system() == "Darwin": return mac_platforms() elif platform.system() == "Linux": return _linux_platforms() else: return _generic_platforms() def interpreter_name() -> str: """ Returns the name of the running interpreter. Some implementations have a reserved, two-letter abbreviation which will be returned when appropriate. """ name = sys.implementation.name return INTERPRETER_SHORT_NAMES.get(name) or name def interpreter_version(*, warn: bool = False) -> str: """ Returns the version of the running interpreter. """ version = _get_config_var("py_version_nodot", warn=warn) if version: version = str(version) else: version = _version_nodot(sys.version_info[:2]) return version def _version_nodot(version: PythonVersion) -> str: return "".join(map(str, version)) def sys_tags(*, warn: bool = False) -> Iterator[Tag]: """ Returns the sequence of tag triples for the running interpreter. The order of the sequence corresponds to priority order for the interpreter, from most to least important. """ interp_name = interpreter_name() if interp_name == "cp": yield from cpython_tags(warn=warn) else: yield from generic_tags() if interp_name == "pp": interp = "pp3" elif interp_name == "cp": interp = "cp" + interpreter_version(warn=warn) else: interp = None yield from compatible_tags(interpreter=interp)
18,106
Python
32.102377
88
0.591958
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pkg_resources/_vendor/packaging/metadata.py
import email.feedparser import email.header import email.message import email.parser import email.policy import sys import typing from typing import Dict, List, Optional, Tuple, Union, cast if sys.version_info >= (3, 8): # pragma: no cover from typing import TypedDict else: # pragma: no cover if typing.TYPE_CHECKING: from typing_extensions import TypedDict else: try: from typing_extensions import TypedDict except ImportError: class TypedDict: def __init_subclass__(*_args, **_kwargs): pass # The RawMetadata class attempts to make as few assumptions about the underlying # serialization formats as possible. The idea is that as long as a serialization # formats offer some very basic primitives in *some* way then we can support # serializing to and from that format. class RawMetadata(TypedDict, total=False): """A dictionary of raw core metadata. Each field in core metadata maps to a key of this dictionary (when data is provided). The key is lower-case and underscores are used instead of dashes compared to the equivalent core metadata field. Any core metadata field that can be specified multiple times or can hold multiple values in a single field have a key with a plural name. Core metadata fields that can be specified multiple times are stored as a list or dict depending on which is appropriate for the field. Any fields which hold multiple values in a single field are stored as a list. """ # Metadata 1.0 - PEP 241 metadata_version: str name: str version: str platforms: List[str] summary: str description: str keywords: List[str] home_page: str author: str author_email: str license: str # Metadata 1.1 - PEP 314 supported_platforms: List[str] download_url: str classifiers: List[str] requires: List[str] provides: List[str] obsoletes: List[str] # Metadata 1.2 - PEP 345 maintainer: str maintainer_email: str requires_dist: List[str] provides_dist: List[str] obsoletes_dist: List[str] requires_python: str requires_external: List[str] project_urls: Dict[str, str] # Metadata 2.0 # PEP 426 attempted to completely revamp the metadata format # but got stuck without ever being able to build consensus on # it and ultimately ended up withdrawn. # # However, a number of tools had started emiting METADATA with # `2.0` Metadata-Version, so for historical reasons, this version # was skipped. # Metadata 2.1 - PEP 566 description_content_type: str provides_extra: List[str] # Metadata 2.2 - PEP 643 dynamic: List[str] # Metadata 2.3 - PEP 685 # No new fields were added in PEP 685, just some edge case were # tightened up to provide better interoptability. _STRING_FIELDS = { "author", "author_email", "description", "description_content_type", "download_url", "home_page", "license", "maintainer", "maintainer_email", "metadata_version", "name", "requires_python", "summary", "version", } _LIST_STRING_FIELDS = { "classifiers", "dynamic", "obsoletes", "obsoletes_dist", "platforms", "provides", "provides_dist", "provides_extra", "requires", "requires_dist", "requires_external", "supported_platforms", } def _parse_keywords(data: str) -> List[str]: """Split a string of comma-separate keyboards into a list of keywords.""" return [k.strip() for k in data.split(",")] def _parse_project_urls(data: List[str]) -> Dict[str, str]: """Parse a list of label/URL string pairings separated by a comma.""" urls = {} for pair in data: # Our logic is slightly tricky here as we want to try and do # *something* reasonable with malformed data. # # The main thing that we have to worry about, is data that does # not have a ',' at all to split the label from the Value. There # isn't a singular right answer here, and we will fail validation # later on (if the caller is validating) so it doesn't *really* # matter, but since the missing value has to be an empty str # and our return value is dict[str, str], if we let the key # be the missing value, then they'd have multiple '' values that # overwrite each other in a accumulating dict. # # The other potentional issue is that it's possible to have the # same label multiple times in the metadata, with no solid "right" # answer with what to do in that case. As such, we'll do the only # thing we can, which is treat the field as unparseable and add it # to our list of unparsed fields. parts = [p.strip() for p in pair.split(",", 1)] parts.extend([""] * (max(0, 2 - len(parts)))) # Ensure 2 items # TODO: The spec doesn't say anything about if the keys should be # considered case sensitive or not... logically they should # be case-preserving and case-insensitive, but doing that # would open up more cases where we might have duplicate # entries. label, url = parts if label in urls: # The label already exists in our set of urls, so this field # is unparseable, and we can just add the whole thing to our # unparseable data and stop processing it. raise KeyError("duplicate labels in project urls") urls[label] = url return urls def _get_payload(msg: email.message.Message, source: Union[bytes, str]) -> str: """Get the body of the message.""" # If our source is a str, then our caller has managed encodings for us, # and we don't need to deal with it. if isinstance(source, str): payload: str = msg.get_payload() return payload # If our source is a bytes, then we're managing the encoding and we need # to deal with it. else: bpayload: bytes = msg.get_payload(decode=True) try: return bpayload.decode("utf8", "strict") except UnicodeDecodeError: raise ValueError("payload in an invalid encoding") # The various parse_FORMAT functions here are intended to be as lenient as # possible in their parsing, while still returning a correctly typed # RawMetadata. # # To aid in this, we also generally want to do as little touching of the # data as possible, except where there are possibly some historic holdovers # that make valid data awkward to work with. # # While this is a lower level, intermediate format than our ``Metadata`` # class, some light touch ups can make a massive difference in usability. # Map METADATA fields to RawMetadata. _EMAIL_TO_RAW_MAPPING = { "author": "author", "author-email": "author_email", "classifier": "classifiers", "description": "description", "description-content-type": "description_content_type", "download-url": "download_url", "dynamic": "dynamic", "home-page": "home_page", "keywords": "keywords", "license": "license", "maintainer": "maintainer", "maintainer-email": "maintainer_email", "metadata-version": "metadata_version", "name": "name", "obsoletes": "obsoletes", "obsoletes-dist": "obsoletes_dist", "platform": "platforms", "project-url": "project_urls", "provides": "provides", "provides-dist": "provides_dist", "provides-extra": "provides_extra", "requires": "requires", "requires-dist": "requires_dist", "requires-external": "requires_external", "requires-python": "requires_python", "summary": "summary", "supported-platform": "supported_platforms", "version": "version", } def parse_email(data: Union[bytes, str]) -> Tuple[RawMetadata, Dict[str, List[str]]]: """Parse a distribution's metadata. This function returns a two-item tuple of dicts. The first dict is of recognized fields from the core metadata specification. Fields that can be parsed and translated into Python's built-in types are converted appropriately. All other fields are left as-is. Fields that are allowed to appear multiple times are stored as lists. The second dict contains all other fields from the metadata. This includes any unrecognized fields. It also includes any fields which are expected to be parsed into a built-in type but were not formatted appropriately. Finally, any fields that are expected to appear only once but are repeated are included in this dict. """ raw: Dict[str, Union[str, List[str], Dict[str, str]]] = {} unparsed: Dict[str, List[str]] = {} if isinstance(data, str): parsed = email.parser.Parser(policy=email.policy.compat32).parsestr(data) else: parsed = email.parser.BytesParser(policy=email.policy.compat32).parsebytes(data) # We have to wrap parsed.keys() in a set, because in the case of multiple # values for a key (a list), the key will appear multiple times in the # list of keys, but we're avoiding that by using get_all(). for name in frozenset(parsed.keys()): # Header names in RFC are case insensitive, so we'll normalize to all # lower case to make comparisons easier. name = name.lower() # We use get_all() here, even for fields that aren't multiple use, # because otherwise someone could have e.g. two Name fields, and we # would just silently ignore it rather than doing something about it. headers = parsed.get_all(name) # The way the email module works when parsing bytes is that it # unconditionally decodes the bytes as ascii using the surrogateescape # handler. When you pull that data back out (such as with get_all() ), # it looks to see if the str has any surrogate escapes, and if it does # it wraps it in a Header object instead of returning the string. # # As such, we'll look for those Header objects, and fix up the encoding. value = [] # Flag if we have run into any issues processing the headers, thus # signalling that the data belongs in 'unparsed'. valid_encoding = True for h in headers: # It's unclear if this can return more types than just a Header or # a str, so we'll just assert here to make sure. assert isinstance(h, (email.header.Header, str)) # If it's a header object, we need to do our little dance to get # the real data out of it. In cases where there is invalid data # we're going to end up with mojibake, but there's no obvious, good # way around that without reimplementing parts of the Header object # ourselves. # # That should be fine since, if mojibacked happens, this key is # going into the unparsed dict anyways. if isinstance(h, email.header.Header): # The Header object stores it's data as chunks, and each chunk # can be independently encoded, so we'll need to check each # of them. chunks: List[Tuple[bytes, Optional[str]]] = [] for bin, encoding in email.header.decode_header(h): try: bin.decode("utf8", "strict") except UnicodeDecodeError: # Enable mojibake. encoding = "latin1" valid_encoding = False else: encoding = "utf8" chunks.append((bin, encoding)) # Turn our chunks back into a Header object, then let that # Header object do the right thing to turn them into a # string for us. value.append(str(email.header.make_header(chunks))) # This is already a string, so just add it. else: value.append(h) # We've processed all of our values to get them into a list of str, # but we may have mojibake data, in which case this is an unparsed # field. if not valid_encoding: unparsed[name] = value continue raw_name = _EMAIL_TO_RAW_MAPPING.get(name) if raw_name is None: # This is a bit of a weird situation, we've encountered a key that # we don't know what it means, so we don't know whether it's meant # to be a list or not. # # Since we can't really tell one way or another, we'll just leave it # as a list, even though it may be a single item list, because that's # what makes the most sense for email headers. unparsed[name] = value continue # If this is one of our string fields, then we'll check to see if our # value is a list of a single item. If it is then we'll assume that # it was emitted as a single string, and unwrap the str from inside # the list. # # If it's any other kind of data, then we haven't the faintest clue # what we should parse it as, and we have to just add it to our list # of unparsed stuff. if raw_name in _STRING_FIELDS and len(value) == 1: raw[raw_name] = value[0] # If this is one of our list of string fields, then we can just assign # the value, since email *only* has strings, and our get_all() call # above ensures that this is a list. elif raw_name in _LIST_STRING_FIELDS: raw[raw_name] = value # Special Case: Keywords # The keywords field is implemented in the metadata spec as a str, # but it conceptually is a list of strings, and is serialized using # ", ".join(keywords), so we'll do some light data massaging to turn # this into what it logically is. elif raw_name == "keywords" and len(value) == 1: raw[raw_name] = _parse_keywords(value[0]) # Special Case: Project-URL # The project urls is implemented in the metadata spec as a list of # specially-formatted strings that represent a key and a value, which # is fundamentally a mapping, however the email format doesn't support # mappings in a sane way, so it was crammed into a list of strings # instead. # # We will do a little light data massaging to turn this into a map as # it logically should be. elif raw_name == "project_urls": try: raw[raw_name] = _parse_project_urls(value) except KeyError: unparsed[name] = value # Nothing that we've done has managed to parse this, so it'll just # throw it in our unparseable data and move on. else: unparsed[name] = value # We need to support getting the Description from the message payload in # addition to getting it from the the headers. This does mean, though, there # is the possibility of it being set both ways, in which case we put both # in 'unparsed' since we don't know which is right. try: payload = _get_payload(parsed, data) except ValueError: unparsed.setdefault("description", []).append( parsed.get_payload(decode=isinstance(data, bytes)) ) else: if payload: # Check to see if we've already got a description, if so then both # it, and this body move to unparseable. if "description" in raw: description_header = cast(str, raw.pop("description")) unparsed.setdefault("description", []).extend( [description_header, payload] ) elif "description" in unparsed: unparsed["description"].append(payload) else: raw["description"] = payload # We need to cast our `raw` to a metadata, because a TypedDict only support # literal key names, but we're computing our key names on purpose, but the # way this function is implemented, our `TypedDict` can only have valid key # names. return cast(RawMetadata, raw), unparsed
16,397
Python
39.092909
88
0.631152
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pkg_resources/_vendor/packaging/_elffile.py
""" ELF file parser. This provides a class ``ELFFile`` that parses an ELF executable in a similar interface to ``ZipFile``. Only the read interface is implemented. Based on: https://gist.github.com/lyssdod/f51579ae8d93c8657a5564aefc2ffbca ELF header: https://refspecs.linuxfoundation.org/elf/gabi4+/ch4.eheader.html """ import enum import os import struct from typing import IO, Optional, Tuple class ELFInvalid(ValueError): pass class EIClass(enum.IntEnum): C32 = 1 C64 = 2 class EIData(enum.IntEnum): Lsb = 1 Msb = 2 class EMachine(enum.IntEnum): I386 = 3 S390 = 22 Arm = 40 X8664 = 62 AArc64 = 183 class ELFFile: """ Representation of an ELF executable. """ def __init__(self, f: IO[bytes]) -> None: self._f = f try: ident = self._read("16B") except struct.error: raise ELFInvalid("unable to parse identification") magic = bytes(ident[:4]) if magic != b"\x7fELF": raise ELFInvalid(f"invalid magic: {magic!r}") self.capacity = ident[4] # Format for program header (bitness). self.encoding = ident[5] # Data structure encoding (endianness). try: # e_fmt: Format for program header. # p_fmt: Format for section header. # p_idx: Indexes to find p_type, p_offset, and p_filesz. e_fmt, self._p_fmt, self._p_idx = { (1, 1): ("<HHIIIIIHHH", "<IIIIIIII", (0, 1, 4)), # 32-bit LSB. (1, 2): (">HHIIIIIHHH", ">IIIIIIII", (0, 1, 4)), # 32-bit MSB. (2, 1): ("<HHIQQQIHHH", "<IIQQQQQQ", (0, 2, 5)), # 64-bit LSB. (2, 2): (">HHIQQQIHHH", ">IIQQQQQQ", (0, 2, 5)), # 64-bit MSB. }[(self.capacity, self.encoding)] except KeyError: raise ELFInvalid( f"unrecognized capacity ({self.capacity}) or " f"encoding ({self.encoding})" ) try: ( _, self.machine, # Architecture type. _, _, self._e_phoff, # Offset of program header. _, self.flags, # Processor-specific flags. _, self._e_phentsize, # Size of section. self._e_phnum, # Number of sections. ) = self._read(e_fmt) except struct.error as e: raise ELFInvalid("unable to parse machine and section information") from e def _read(self, fmt: str) -> Tuple[int, ...]: return struct.unpack(fmt, self._f.read(struct.calcsize(fmt))) @property def interpreter(self) -> Optional[str]: """ The path recorded in the ``PT_INTERP`` section header. """ for index in range(self._e_phnum): self._f.seek(self._e_phoff + self._e_phentsize * index) try: data = self._read(self._p_fmt) except struct.error: continue if data[self._p_idx[0]] != 3: # Not PT_INTERP. continue self._f.seek(data[self._p_idx[1]]) return os.fsdecode(self._f.read(data[self._p_idx[2]])).strip("\0") return None
3,266
Python
28.972477
86
0.527863
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pkg_resources/_vendor/packaging/_parser.py
"""Handwritten parser of dependency specifiers. The docstring for each __parse_* function contains ENBF-inspired grammar representing the implementation. """ import ast from typing import Any, List, NamedTuple, Optional, Tuple, Union from ._tokenizer import DEFAULT_RULES, Tokenizer class Node: def __init__(self, value: str) -> None: self.value = value def __str__(self) -> str: return self.value def __repr__(self) -> str: return f"<{self.__class__.__name__}('{self}')>" def serialize(self) -> str: raise NotImplementedError class Variable(Node): def serialize(self) -> str: return str(self) class Value(Node): def serialize(self) -> str: return f'"{self}"' class Op(Node): def serialize(self) -> str: return str(self) MarkerVar = Union[Variable, Value] MarkerItem = Tuple[MarkerVar, Op, MarkerVar] # MarkerAtom = Union[MarkerItem, List["MarkerAtom"]] # MarkerList = List[Union["MarkerList", MarkerAtom, str]] # mypy does not support recursive type definition # https://github.com/python/mypy/issues/731 MarkerAtom = Any MarkerList = List[Any] class ParsedRequirement(NamedTuple): name: str url: str extras: List[str] specifier: str marker: Optional[MarkerList] # -------------------------------------------------------------------------------------- # Recursive descent parser for dependency specifier # -------------------------------------------------------------------------------------- def parse_requirement(source: str) -> ParsedRequirement: return _parse_requirement(Tokenizer(source, rules=DEFAULT_RULES)) def _parse_requirement(tokenizer: Tokenizer) -> ParsedRequirement: """ requirement = WS? IDENTIFIER WS? extras WS? requirement_details """ tokenizer.consume("WS") name_token = tokenizer.expect( "IDENTIFIER", expected="package name at the start of dependency specifier" ) name = name_token.text tokenizer.consume("WS") extras = _parse_extras(tokenizer) tokenizer.consume("WS") url, specifier, marker = _parse_requirement_details(tokenizer) tokenizer.expect("END", expected="end of dependency specifier") return ParsedRequirement(name, url, extras, specifier, marker) def _parse_requirement_details( tokenizer: Tokenizer, ) -> Tuple[str, str, Optional[MarkerList]]: """ requirement_details = AT URL (WS requirement_marker?)? | specifier WS? (requirement_marker)? """ specifier = "" url = "" marker = None if tokenizer.check("AT"): tokenizer.read() tokenizer.consume("WS") url_start = tokenizer.position url = tokenizer.expect("URL", expected="URL after @").text if tokenizer.check("END", peek=True): return (url, specifier, marker) tokenizer.expect("WS", expected="whitespace after URL") # The input might end after whitespace. if tokenizer.check("END", peek=True): return (url, specifier, marker) marker = _parse_requirement_marker( tokenizer, span_start=url_start, after="URL and whitespace" ) else: specifier_start = tokenizer.position specifier = _parse_specifier(tokenizer) tokenizer.consume("WS") if tokenizer.check("END", peek=True): return (url, specifier, marker) marker = _parse_requirement_marker( tokenizer, span_start=specifier_start, after=( "version specifier" if specifier else "name and no valid version specifier" ), ) return (url, specifier, marker) def _parse_requirement_marker( tokenizer: Tokenizer, *, span_start: int, after: str ) -> MarkerList: """ requirement_marker = SEMICOLON marker WS? """ if not tokenizer.check("SEMICOLON"): tokenizer.raise_syntax_error( f"Expected end or semicolon (after {after})", span_start=span_start, ) tokenizer.read() marker = _parse_marker(tokenizer) tokenizer.consume("WS") return marker def _parse_extras(tokenizer: Tokenizer) -> List[str]: """ extras = (LEFT_BRACKET wsp* extras_list? wsp* RIGHT_BRACKET)? """ if not tokenizer.check("LEFT_BRACKET", peek=True): return [] with tokenizer.enclosing_tokens( "LEFT_BRACKET", "RIGHT_BRACKET", around="extras", ): tokenizer.consume("WS") extras = _parse_extras_list(tokenizer) tokenizer.consume("WS") return extras def _parse_extras_list(tokenizer: Tokenizer) -> List[str]: """ extras_list = identifier (wsp* ',' wsp* identifier)* """ extras: List[str] = [] if not tokenizer.check("IDENTIFIER"): return extras extras.append(tokenizer.read().text) while True: tokenizer.consume("WS") if tokenizer.check("IDENTIFIER", peek=True): tokenizer.raise_syntax_error("Expected comma between extra names") elif not tokenizer.check("COMMA"): break tokenizer.read() tokenizer.consume("WS") extra_token = tokenizer.expect("IDENTIFIER", expected="extra name after comma") extras.append(extra_token.text) return extras def _parse_specifier(tokenizer: Tokenizer) -> str: """ specifier = LEFT_PARENTHESIS WS? version_many WS? RIGHT_PARENTHESIS | WS? version_many WS? """ with tokenizer.enclosing_tokens( "LEFT_PARENTHESIS", "RIGHT_PARENTHESIS", around="version specifier", ): tokenizer.consume("WS") parsed_specifiers = _parse_version_many(tokenizer) tokenizer.consume("WS") return parsed_specifiers def _parse_version_many(tokenizer: Tokenizer) -> str: """ version_many = (SPECIFIER (WS? COMMA WS? SPECIFIER)*)? """ parsed_specifiers = "" while tokenizer.check("SPECIFIER"): span_start = tokenizer.position parsed_specifiers += tokenizer.read().text if tokenizer.check("VERSION_PREFIX_TRAIL", peek=True): tokenizer.raise_syntax_error( ".* suffix can only be used with `==` or `!=` operators", span_start=span_start, span_end=tokenizer.position + 1, ) if tokenizer.check("VERSION_LOCAL_LABEL_TRAIL", peek=True): tokenizer.raise_syntax_error( "Local version label can only be used with `==` or `!=` operators", span_start=span_start, span_end=tokenizer.position, ) tokenizer.consume("WS") if not tokenizer.check("COMMA"): break parsed_specifiers += tokenizer.read().text tokenizer.consume("WS") return parsed_specifiers # -------------------------------------------------------------------------------------- # Recursive descent parser for marker expression # -------------------------------------------------------------------------------------- def parse_marker(source: str) -> MarkerList: return _parse_marker(Tokenizer(source, rules=DEFAULT_RULES)) def _parse_marker(tokenizer: Tokenizer) -> MarkerList: """ marker = marker_atom (BOOLOP marker_atom)+ """ expression = [_parse_marker_atom(tokenizer)] while tokenizer.check("BOOLOP"): token = tokenizer.read() expr_right = _parse_marker_atom(tokenizer) expression.extend((token.text, expr_right)) return expression def _parse_marker_atom(tokenizer: Tokenizer) -> MarkerAtom: """ marker_atom = WS? LEFT_PARENTHESIS WS? marker WS? RIGHT_PARENTHESIS WS? | WS? marker_item WS? """ tokenizer.consume("WS") if tokenizer.check("LEFT_PARENTHESIS", peek=True): with tokenizer.enclosing_tokens( "LEFT_PARENTHESIS", "RIGHT_PARENTHESIS", around="marker expression", ): tokenizer.consume("WS") marker: MarkerAtom = _parse_marker(tokenizer) tokenizer.consume("WS") else: marker = _parse_marker_item(tokenizer) tokenizer.consume("WS") return marker def _parse_marker_item(tokenizer: Tokenizer) -> MarkerItem: """ marker_item = WS? marker_var WS? marker_op WS? marker_var WS? """ tokenizer.consume("WS") marker_var_left = _parse_marker_var(tokenizer) tokenizer.consume("WS") marker_op = _parse_marker_op(tokenizer) tokenizer.consume("WS") marker_var_right = _parse_marker_var(tokenizer) tokenizer.consume("WS") return (marker_var_left, marker_op, marker_var_right) def _parse_marker_var(tokenizer: Tokenizer) -> MarkerVar: """ marker_var = VARIABLE | QUOTED_STRING """ if tokenizer.check("VARIABLE"): return process_env_var(tokenizer.read().text.replace(".", "_")) elif tokenizer.check("QUOTED_STRING"): return process_python_str(tokenizer.read().text) else: tokenizer.raise_syntax_error( message="Expected a marker variable or quoted string" ) def process_env_var(env_var: str) -> Variable: if ( env_var == "platform_python_implementation" or env_var == "python_implementation" ): return Variable("platform_python_implementation") else: return Variable(env_var) def process_python_str(python_str: str) -> Value: value = ast.literal_eval(python_str) return Value(str(value)) def _parse_marker_op(tokenizer: Tokenizer) -> Op: """ marker_op = IN | NOT IN | OP """ if tokenizer.check("IN"): tokenizer.read() return Op("in") elif tokenizer.check("NOT"): tokenizer.read() tokenizer.expect("WS", expected="whitespace after 'not'") tokenizer.expect("IN", expected="'in' after 'not'") return Op("not in") elif tokenizer.check("OP"): return Op(tokenizer.read().text) else: return tokenizer.raise_syntax_error( "Expected marker operator, one of " "<=, <, !=, ==, >=, >, ~=, ===, in, not in" )
10,194
Python
27.799435
88
0.593388
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pkg_resources/_vendor/importlib_resources/abc.py
import abc import io import itertools import pathlib from typing import Any, BinaryIO, Iterable, Iterator, NoReturn, Text, Optional from ._compat import runtime_checkable, Protocol, StrPath __all__ = ["ResourceReader", "Traversable", "TraversableResources"] class ResourceReader(metaclass=abc.ABCMeta): """Abstract base class for loaders to provide resource reading support.""" @abc.abstractmethod def open_resource(self, resource: Text) -> BinaryIO: """Return an opened, file-like object for binary reading. The 'resource' argument is expected to represent only a file name. If the resource cannot be found, FileNotFoundError is raised. """ # This deliberately raises FileNotFoundError instead of # NotImplementedError so that if this method is accidentally called, # it'll still do the right thing. raise FileNotFoundError @abc.abstractmethod def resource_path(self, resource: Text) -> Text: """Return the file system path to the specified resource. The 'resource' argument is expected to represent only a file name. If the resource does not exist on the file system, raise FileNotFoundError. """ # This deliberately raises FileNotFoundError instead of # NotImplementedError so that if this method is accidentally called, # it'll still do the right thing. raise FileNotFoundError @abc.abstractmethod def is_resource(self, path: Text) -> bool: """Return True if the named 'path' is a resource. Files are resources, directories are not. """ raise FileNotFoundError @abc.abstractmethod def contents(self) -> Iterable[str]: """Return an iterable of entries in `package`.""" raise FileNotFoundError class TraversalError(Exception): pass @runtime_checkable class Traversable(Protocol): """ An object with a subset of pathlib.Path methods suitable for traversing directories and opening files. Any exceptions that occur when accessing the backing resource may propagate unaltered. """ @abc.abstractmethod def iterdir(self) -> Iterator["Traversable"]: """ Yield Traversable objects in self """ def read_bytes(self) -> bytes: """ Read contents of self as bytes """ with self.open('rb') as strm: return strm.read() def read_text(self, encoding: Optional[str] = None) -> str: """ Read contents of self as text """ with self.open(encoding=encoding) as strm: return strm.read() @abc.abstractmethod def is_dir(self) -> bool: """ Return True if self is a directory """ @abc.abstractmethod def is_file(self) -> bool: """ Return True if self is a file """ def joinpath(self, *descendants: StrPath) -> "Traversable": """ Return Traversable resolved with any descendants applied. Each descendant should be a path segment relative to self and each may contain multiple levels separated by ``posixpath.sep`` (``/``). """ if not descendants: return self names = itertools.chain.from_iterable( path.parts for path in map(pathlib.PurePosixPath, descendants) ) target = next(names) matches = ( traversable for traversable in self.iterdir() if traversable.name == target ) try: match = next(matches) except StopIteration: raise TraversalError( "Target not found during traversal.", target, list(names) ) return match.joinpath(*names) def __truediv__(self, child: StrPath) -> "Traversable": """ Return Traversable child in self """ return self.joinpath(child) @abc.abstractmethod def open(self, mode='r', *args, **kwargs): """ mode may be 'r' or 'rb' to open as text or binary. Return a handle suitable for reading (same as pathlib.Path.open). When opening as text, accepts encoding parameters such as those accepted by io.TextIOWrapper. """ @property @abc.abstractmethod def name(self) -> str: """ The base name of this object without any parent references. """ class TraversableResources(ResourceReader): """ The required interface for providing traversable resources. """ @abc.abstractmethod def files(self) -> "Traversable": """Return a Traversable object for the loaded package.""" def open_resource(self, resource: StrPath) -> io.BufferedReader: return self.files().joinpath(resource).open('rb') def resource_path(self, resource: Any) -> NoReturn: raise FileNotFoundError(resource) def is_resource(self, path: StrPath) -> bool: return self.files().joinpath(path).is_file() def contents(self) -> Iterator[str]: return (item.name for item in self.files().iterdir())
5,140
Python
29.064327
87
0.628405
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pkg_resources/_vendor/importlib_resources/_itertools.py
from itertools import filterfalse from typing import ( Callable, Iterable, Iterator, Optional, Set, TypeVar, Union, ) # Type and type variable definitions _T = TypeVar('_T') _U = TypeVar('_U') def unique_everseen( iterable: Iterable[_T], key: Optional[Callable[[_T], _U]] = None ) -> Iterator[_T]: "List unique elements, preserving order. Remember all elements ever seen." # unique_everseen('AAAABBBCCDAABBB') --> A B C D # unique_everseen('ABBCcAD', str.lower) --> A B C D seen: Set[Union[_T, _U]] = set() seen_add = seen.add if key is None: for element in filterfalse(seen.__contains__, iterable): seen_add(element) yield element else: for element in iterable: k = key(element) if k not in seen: seen_add(k) yield element
884
Python
23.583333
78
0.580317
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pkg_resources/_vendor/importlib_resources/_compat.py
# flake8: noqa import abc import os import sys import pathlib from contextlib import suppress from typing import Union if sys.version_info >= (3, 10): from zipfile import Path as ZipPath # type: ignore else: from ..zipp import Path as ZipPath # type: ignore try: from typing import runtime_checkable # type: ignore except ImportError: def runtime_checkable(cls): # type: ignore return cls try: from typing import Protocol # type: ignore except ImportError: Protocol = abc.ABC # type: ignore class TraversableResourcesLoader: """ Adapt loaders to provide TraversableResources and other compatibility. Used primarily for Python 3.9 and earlier where the native loaders do not yet implement TraversableResources. """ def __init__(self, spec): self.spec = spec @property def path(self): return self.spec.origin def get_resource_reader(self, name): from . import readers, _adapters def _zip_reader(spec): with suppress(AttributeError): return readers.ZipReader(spec.loader, spec.name) def _namespace_reader(spec): with suppress(AttributeError, ValueError): return readers.NamespaceReader(spec.submodule_search_locations) def _available_reader(spec): with suppress(AttributeError): return spec.loader.get_resource_reader(spec.name) def _native_reader(spec): reader = _available_reader(spec) return reader if hasattr(reader, 'files') else None def _file_reader(spec): try: path = pathlib.Path(self.path) except TypeError: return None if path.exists(): return readers.FileReader(self) return ( # native reader if it supplies 'files' _native_reader(self.spec) or # local ZipReader if a zip module _zip_reader(self.spec) or # local NamespaceReader if a namespace module _namespace_reader(self.spec) or # local FileReader _file_reader(self.spec) # fallback - adapt the spec ResourceReader to TraversableReader or _adapters.CompatibilityFiles(self.spec) ) def wrap_spec(package): """ Construct a package spec with traversable compatibility on the spec/loader/reader. Supersedes _adapters.wrap_spec to use TraversableResourcesLoader from above for older Python compatibility (<3.10). """ from . import _adapters return _adapters.SpecLoaderAdapter(package.__spec__, TraversableResourcesLoader) if sys.version_info >= (3, 9): StrPath = Union[str, os.PathLike[str]] else: # PathLike is only subscriptable at runtime in 3.9+ StrPath = Union[str, "os.PathLike[str]"]
2,925
Python
25.844036
84
0.627009
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pkg_resources/_vendor/importlib_resources/__init__.py
"""Read resources contained within a package.""" from ._common import ( as_file, files, Package, ) from ._legacy import ( contents, open_binary, read_binary, open_text, read_text, is_resource, path, Resource, ) from .abc import ResourceReader __all__ = [ 'Package', 'Resource', 'ResourceReader', 'as_file', 'contents', 'files', 'is_resource', 'open_binary', 'open_text', 'path', 'read_binary', 'read_text', ]
506
Python
12.702702
48
0.55336
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pkg_resources/_vendor/importlib_resources/_common.py
import os import pathlib import tempfile import functools import contextlib import types import importlib import inspect import warnings import itertools from typing import Union, Optional, cast from .abc import ResourceReader, Traversable from ._compat import wrap_spec Package = Union[types.ModuleType, str] Anchor = Package def package_to_anchor(func): """ Replace 'package' parameter as 'anchor' and warn about the change. Other errors should fall through. >>> files('a', 'b') Traceback (most recent call last): TypeError: files() takes from 0 to 1 positional arguments but 2 were given """ undefined = object() @functools.wraps(func) def wrapper(anchor=undefined, package=undefined): if package is not undefined: if anchor is not undefined: return func(anchor, package) warnings.warn( "First parameter to files is renamed to 'anchor'", DeprecationWarning, stacklevel=2, ) return func(package) elif anchor is undefined: return func() return func(anchor) return wrapper @package_to_anchor def files(anchor: Optional[Anchor] = None) -> Traversable: """ Get a Traversable resource for an anchor. """ return from_package(resolve(anchor)) def get_resource_reader(package: types.ModuleType) -> Optional[ResourceReader]: """ Return the package's loader if it's a ResourceReader. """ # We can't use # a issubclass() check here because apparently abc.'s __subclasscheck__() # hook wants to create a weak reference to the object, but # zipimport.zipimporter does not support weak references, resulting in a # TypeError. That seems terrible. spec = package.__spec__ reader = getattr(spec.loader, 'get_resource_reader', None) # type: ignore if reader is None: return None return reader(spec.name) # type: ignore @functools.singledispatch def resolve(cand: Optional[Anchor]) -> types.ModuleType: return cast(types.ModuleType, cand) @resolve.register def _(cand: str) -> types.ModuleType: return importlib.import_module(cand) @resolve.register def _(cand: None) -> types.ModuleType: return resolve(_infer_caller().f_globals['__name__']) def _infer_caller(): """ Walk the stack and find the frame of the first caller not in this module. """ def is_this_file(frame_info): return frame_info.filename == __file__ def is_wrapper(frame_info): return frame_info.function == 'wrapper' not_this_file = itertools.filterfalse(is_this_file, inspect.stack()) # also exclude 'wrapper' due to singledispatch in the call stack callers = itertools.filterfalse(is_wrapper, not_this_file) return next(callers).frame def from_package(package: types.ModuleType): """ Return a Traversable object for the given package. """ spec = wrap_spec(package) reader = spec.loader.get_resource_reader(spec.name) return reader.files() @contextlib.contextmanager def _tempfile( reader, suffix='', # gh-93353: Keep a reference to call os.remove() in late Python # finalization. *, _os_remove=os.remove, ): # Not using tempfile.NamedTemporaryFile as it leads to deeper 'try' # blocks due to the need to close the temporary file to work on Windows # properly. fd, raw_path = tempfile.mkstemp(suffix=suffix) try: try: os.write(fd, reader()) finally: os.close(fd) del reader yield pathlib.Path(raw_path) finally: try: _os_remove(raw_path) except FileNotFoundError: pass def _temp_file(path): return _tempfile(path.read_bytes, suffix=path.name) def _is_present_dir(path: Traversable) -> bool: """ Some Traversables implement ``is_dir()`` to raise an exception (i.e. ``FileNotFoundError``) when the directory doesn't exist. This function wraps that call to always return a boolean and only return True if there's a dir and it exists. """ with contextlib.suppress(FileNotFoundError): return path.is_dir() return False @functools.singledispatch def as_file(path): """ Given a Traversable object, return that object as a path on the local file system in a context manager. """ return _temp_dir(path) if _is_present_dir(path) else _temp_file(path) @as_file.register(pathlib.Path) @contextlib.contextmanager def _(path): """ Degenerate behavior for pathlib.Path objects. """ yield path @contextlib.contextmanager def _temp_path(dir: tempfile.TemporaryDirectory): """ Wrap tempfile.TemporyDirectory to return a pathlib object. """ with dir as result: yield pathlib.Path(result) @contextlib.contextmanager def _temp_dir(path): """ Given a traversable dir, recursively replicate the whole tree to the file system in a context manager. """ assert path.is_dir() with _temp_path(tempfile.TemporaryDirectory()) as temp_dir: yield _write_contents(temp_dir, path) def _write_contents(target, source): child = target.joinpath(source.name) if source.is_dir(): child.mkdir() for item in source.iterdir(): _write_contents(child, item) else: child.write_bytes(source.read_bytes()) return child
5,457
Python
25.240384
79
0.658604
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pkg_resources/_vendor/importlib_resources/_legacy.py
import functools import os import pathlib import types import warnings from typing import Union, Iterable, ContextManager, BinaryIO, TextIO, Any from . import _common Package = Union[types.ModuleType, str] Resource = str def deprecated(func): @functools.wraps(func) def wrapper(*args, **kwargs): warnings.warn( f"{func.__name__} is deprecated. Use files() instead. " "Refer to https://importlib-resources.readthedocs.io" "/en/latest/using.html#migrating-from-legacy for migration advice.", DeprecationWarning, stacklevel=2, ) return func(*args, **kwargs) return wrapper def normalize_path(path: Any) -> str: """Normalize a path by ensuring it is a string. If the resulting string contains path separators, an exception is raised. """ str_path = str(path) parent, file_name = os.path.split(str_path) if parent: raise ValueError(f'{path!r} must be only a file name') return file_name @deprecated def open_binary(package: Package, resource: Resource) -> BinaryIO: """Return a file-like object opened for binary reading of the resource.""" return (_common.files(package) / normalize_path(resource)).open('rb') @deprecated def read_binary(package: Package, resource: Resource) -> bytes: """Return the binary contents of the resource.""" return (_common.files(package) / normalize_path(resource)).read_bytes() @deprecated def open_text( package: Package, resource: Resource, encoding: str = 'utf-8', errors: str = 'strict', ) -> TextIO: """Return a file-like object opened for text reading of the resource.""" return (_common.files(package) / normalize_path(resource)).open( 'r', encoding=encoding, errors=errors ) @deprecated def read_text( package: Package, resource: Resource, encoding: str = 'utf-8', errors: str = 'strict', ) -> str: """Return the decoded string of the resource. The decoding-related arguments have the same semantics as those of bytes.decode(). """ with open_text(package, resource, encoding, errors) as fp: return fp.read() @deprecated def contents(package: Package) -> Iterable[str]: """Return an iterable of entries in `package`. Note that not all entries are resources. Specifically, directories are not considered resources. Use `is_resource()` on each entry returned here to check if it is a resource or not. """ return [path.name for path in _common.files(package).iterdir()] @deprecated def is_resource(package: Package, name: str) -> bool: """True if `name` is a resource inside `package`. Directories are *not* resources. """ resource = normalize_path(name) return any( traversable.name == resource and traversable.is_file() for traversable in _common.files(package).iterdir() ) @deprecated def path( package: Package, resource: Resource, ) -> ContextManager[pathlib.Path]: """A context manager providing a file path object to the resource. If the resource does not already exist on its own on the file system, a temporary file will be created. If the file was created, the file will be deleted upon exiting the context manager (no exception is raised if the file was deleted prior to the context manager exiting). """ return _common.as_file(_common.files(package) / normalize_path(resource))
3,481
Python
27.776859
80
0.673657
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pkg_resources/_vendor/importlib_resources/_adapters.py
from contextlib import suppress from io import TextIOWrapper from . import abc class SpecLoaderAdapter: """ Adapt a package spec to adapt the underlying loader. """ def __init__(self, spec, adapter=lambda spec: spec.loader): self.spec = spec self.loader = adapter(spec) def __getattr__(self, name): return getattr(self.spec, name) class TraversableResourcesLoader: """ Adapt a loader to provide TraversableResources. """ def __init__(self, spec): self.spec = spec def get_resource_reader(self, name): return CompatibilityFiles(self.spec)._native() def _io_wrapper(file, mode='r', *args, **kwargs): if mode == 'r': return TextIOWrapper(file, *args, **kwargs) elif mode == 'rb': return file raise ValueError( "Invalid mode value '{}', only 'r' and 'rb' are supported".format(mode) ) class CompatibilityFiles: """ Adapter for an existing or non-existent resource reader to provide a compatibility .files(). """ class SpecPath(abc.Traversable): """ Path tied to a module spec. Can be read and exposes the resource reader children. """ def __init__(self, spec, reader): self._spec = spec self._reader = reader def iterdir(self): if not self._reader: return iter(()) return iter( CompatibilityFiles.ChildPath(self._reader, path) for path in self._reader.contents() ) def is_file(self): return False is_dir = is_file def joinpath(self, other): if not self._reader: return CompatibilityFiles.OrphanPath(other) return CompatibilityFiles.ChildPath(self._reader, other) @property def name(self): return self._spec.name def open(self, mode='r', *args, **kwargs): return _io_wrapper(self._reader.open_resource(None), mode, *args, **kwargs) class ChildPath(abc.Traversable): """ Path tied to a resource reader child. Can be read but doesn't expose any meaningful children. """ def __init__(self, reader, name): self._reader = reader self._name = name def iterdir(self): return iter(()) def is_file(self): return self._reader.is_resource(self.name) def is_dir(self): return not self.is_file() def joinpath(self, other): return CompatibilityFiles.OrphanPath(self.name, other) @property def name(self): return self._name def open(self, mode='r', *args, **kwargs): return _io_wrapper( self._reader.open_resource(self.name), mode, *args, **kwargs ) class OrphanPath(abc.Traversable): """ Orphan path, not tied to a module spec or resource reader. Can't be read and doesn't expose any meaningful children. """ def __init__(self, *path_parts): if len(path_parts) < 1: raise ValueError('Need at least one path part to construct a path') self._path = path_parts def iterdir(self): return iter(()) def is_file(self): return False is_dir = is_file def joinpath(self, other): return CompatibilityFiles.OrphanPath(*self._path, other) @property def name(self): return self._path[-1] def open(self, mode='r', *args, **kwargs): raise FileNotFoundError("Can't open orphan path") def __init__(self, spec): self.spec = spec @property def _reader(self): with suppress(AttributeError): return self.spec.loader.get_resource_reader(self.spec.name) def _native(self): """ Return the native reader if it supports files(). """ reader = self._reader return reader if hasattr(reader, 'files') else self def __getattr__(self, attr): return getattr(self._reader, attr) def files(self): return CompatibilityFiles.SpecPath(self.spec, self._reader) def wrap_spec(package): """ Construct a package spec with traversable compatibility on the spec/loader/reader. """ return SpecLoaderAdapter(package.__spec__, TraversableResourcesLoader)
4,504
Python
25.345029
87
0.571714
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pkg_resources/_vendor/importlib_resources/simple.py
""" Interface adapters for low-level readers. """ import abc import io import itertools from typing import BinaryIO, List from .abc import Traversable, TraversableResources class SimpleReader(abc.ABC): """ The minimum, low-level interface required from a resource provider. """ @property @abc.abstractmethod def package(self) -> str: """ The name of the package for which this reader loads resources. """ @abc.abstractmethod def children(self) -> List['SimpleReader']: """ Obtain an iterable of SimpleReader for available child containers (e.g. directories). """ @abc.abstractmethod def resources(self) -> List[str]: """ Obtain available named resources for this virtual package. """ @abc.abstractmethod def open_binary(self, resource: str) -> BinaryIO: """ Obtain a File-like for a named resource. """ @property def name(self): return self.package.split('.')[-1] class ResourceContainer(Traversable): """ Traversable container for a package's resources via its reader. """ def __init__(self, reader: SimpleReader): self.reader = reader def is_dir(self): return True def is_file(self): return False def iterdir(self): files = (ResourceHandle(self, name) for name in self.reader.resources) dirs = map(ResourceContainer, self.reader.children()) return itertools.chain(files, dirs) def open(self, *args, **kwargs): raise IsADirectoryError() class ResourceHandle(Traversable): """ Handle to a named resource in a ResourceReader. """ def __init__(self, parent: ResourceContainer, name: str): self.parent = parent self.name = name # type: ignore def is_file(self): return True def is_dir(self): return False def open(self, mode='r', *args, **kwargs): stream = self.parent.reader.open_binary(self.name) if 'b' not in mode: stream = io.TextIOWrapper(*args, **kwargs) return stream def joinpath(self, name): raise RuntimeError("Cannot traverse into a resource") class TraversableReader(TraversableResources, SimpleReader): """ A TraversableResources based on SimpleReader. Resource providers may derive from this class to provide the TraversableResources interface by supplying the SimpleReader interface. """ def files(self): return ResourceContainer(self)
2,576
Python
23.084112
78
0.63354
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pkg_resources/_vendor/importlib_resources/readers.py
import collections import pathlib import operator from . import abc from ._itertools import unique_everseen from ._compat import ZipPath def remove_duplicates(items): return iter(collections.OrderedDict.fromkeys(items)) class FileReader(abc.TraversableResources): def __init__(self, loader): self.path = pathlib.Path(loader.path).parent def resource_path(self, resource): """ Return the file system path to prevent `resources.path()` from creating a temporary copy. """ return str(self.path.joinpath(resource)) def files(self): return self.path class ZipReader(abc.TraversableResources): def __init__(self, loader, module): _, _, name = module.rpartition('.') self.prefix = loader.prefix.replace('\\', '/') + name + '/' self.archive = loader.archive def open_resource(self, resource): try: return super().open_resource(resource) except KeyError as exc: raise FileNotFoundError(exc.args[0]) def is_resource(self, path): # workaround for `zipfile.Path.is_file` returning true # for non-existent paths. target = self.files().joinpath(path) return target.is_file() and target.exists() def files(self): return ZipPath(self.archive, self.prefix) class MultiplexedPath(abc.Traversable): """ Given a series of Traversable objects, implement a merged version of the interface across all objects. Useful for namespace packages which may be multihomed at a single name. """ def __init__(self, *paths): self._paths = list(map(pathlib.Path, remove_duplicates(paths))) if not self._paths: message = 'MultiplexedPath must contain at least one path' raise FileNotFoundError(message) if not all(path.is_dir() for path in self._paths): raise NotADirectoryError('MultiplexedPath only supports directories') def iterdir(self): files = (file for path in self._paths for file in path.iterdir()) return unique_everseen(files, key=operator.attrgetter('name')) def read_bytes(self): raise FileNotFoundError(f'{self} is not a file') def read_text(self, *args, **kwargs): raise FileNotFoundError(f'{self} is not a file') def is_dir(self): return True def is_file(self): return False def joinpath(self, *descendants): try: return super().joinpath(*descendants) except abc.TraversalError: # One of the paths did not resolve (a directory does not exist). # Just return something that will not exist. return self._paths[0].joinpath(*descendants) def open(self, *args, **kwargs): raise FileNotFoundError(f'{self} is not a file') @property def name(self): return self._paths[0].name def __repr__(self): paths = ', '.join(f"'{path}'" for path in self._paths) return f'MultiplexedPath({paths})' class NamespaceReader(abc.TraversableResources): def __init__(self, namespace_path): if 'NamespacePath' not in str(namespace_path): raise ValueError('Invalid path') self.path = MultiplexedPath(*list(namespace_path)) def resource_path(self, resource): """ Return the file system path to prevent `resources.path()` from creating a temporary copy. """ return str(self.path.joinpath(resource)) def files(self): return self.path
3,581
Python
28.603306
81
0.63055
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pkg_resources/_vendor/jaraco/functools.py
import functools import time import inspect import collections import types import itertools import warnings import pkg_resources.extern.more_itertools from typing import Callable, TypeVar CallableT = TypeVar("CallableT", bound=Callable[..., object]) def compose(*funcs): """ Compose any number of unary functions into a single unary function. >>> import textwrap >>> expected = str.strip(textwrap.dedent(compose.__doc__)) >>> strip_and_dedent = compose(str.strip, textwrap.dedent) >>> strip_and_dedent(compose.__doc__) == expected True Compose also allows the innermost function to take arbitrary arguments. >>> round_three = lambda x: round(x, ndigits=3) >>> f = compose(round_three, int.__truediv__) >>> [f(3*x, x+1) for x in range(1,10)] [1.5, 2.0, 2.25, 2.4, 2.5, 2.571, 2.625, 2.667, 2.7] """ def compose_two(f1, f2): return lambda *args, **kwargs: f1(f2(*args, **kwargs)) return functools.reduce(compose_two, funcs) def method_caller(method_name, *args, **kwargs): """ Return a function that will call a named method on the target object with optional positional and keyword arguments. >>> lower = method_caller('lower') >>> lower('MyString') 'mystring' """ def call_method(target): func = getattr(target, method_name) return func(*args, **kwargs) return call_method def once(func): """ Decorate func so it's only ever called the first time. This decorator can ensure that an expensive or non-idempotent function will not be expensive on subsequent calls and is idempotent. >>> add_three = once(lambda a: a+3) >>> add_three(3) 6 >>> add_three(9) 6 >>> add_three('12') 6 To reset the stored value, simply clear the property ``saved_result``. >>> del add_three.saved_result >>> add_three(9) 12 >>> add_three(8) 12 Or invoke 'reset()' on it. >>> add_three.reset() >>> add_three(-3) 0 >>> add_three(0) 0 """ @functools.wraps(func) def wrapper(*args, **kwargs): if not hasattr(wrapper, 'saved_result'): wrapper.saved_result = func(*args, **kwargs) return wrapper.saved_result wrapper.reset = lambda: vars(wrapper).__delitem__('saved_result') return wrapper def method_cache( method: CallableT, cache_wrapper: Callable[ [CallableT], CallableT ] = functools.lru_cache(), # type: ignore[assignment] ) -> CallableT: """ Wrap lru_cache to support storing the cache data in the object instances. Abstracts the common paradigm where the method explicitly saves an underscore-prefixed protected property on first call and returns that subsequently. >>> class MyClass: ... calls = 0 ... ... @method_cache ... def method(self, value): ... self.calls += 1 ... return value >>> a = MyClass() >>> a.method(3) 3 >>> for x in range(75): ... res = a.method(x) >>> a.calls 75 Note that the apparent behavior will be exactly like that of lru_cache except that the cache is stored on each instance, so values in one instance will not flush values from another, and when an instance is deleted, so are the cached values for that instance. >>> b = MyClass() >>> for x in range(35): ... res = b.method(x) >>> b.calls 35 >>> a.method(0) 0 >>> a.calls 75 Note that if method had been decorated with ``functools.lru_cache()``, a.calls would have been 76 (due to the cached value of 0 having been flushed by the 'b' instance). Clear the cache with ``.cache_clear()`` >>> a.method.cache_clear() Same for a method that hasn't yet been called. >>> c = MyClass() >>> c.method.cache_clear() Another cache wrapper may be supplied: >>> cache = functools.lru_cache(maxsize=2) >>> MyClass.method2 = method_cache(lambda self: 3, cache_wrapper=cache) >>> a = MyClass() >>> a.method2() 3 Caution - do not subsequently wrap the method with another decorator, such as ``@property``, which changes the semantics of the function. See also http://code.activestate.com/recipes/577452-a-memoize-decorator-for-instance-methods/ for another implementation and additional justification. """ def wrapper(self: object, *args: object, **kwargs: object) -> object: # it's the first call, replace the method with a cached, bound method bound_method: CallableT = types.MethodType( # type: ignore[assignment] method, self ) cached_method = cache_wrapper(bound_method) setattr(self, method.__name__, cached_method) return cached_method(*args, **kwargs) # Support cache clear even before cache has been created. wrapper.cache_clear = lambda: None # type: ignore[attr-defined] return ( # type: ignore[return-value] _special_method_cache(method, cache_wrapper) or wrapper ) def _special_method_cache(method, cache_wrapper): """ Because Python treats special methods differently, it's not possible to use instance attributes to implement the cached methods. Instead, install the wrapper method under a different name and return a simple proxy to that wrapper. https://github.com/jaraco/jaraco.functools/issues/5 """ name = method.__name__ special_names = '__getattr__', '__getitem__' if name not in special_names: return wrapper_name = '__cached' + name def proxy(self, *args, **kwargs): if wrapper_name not in vars(self): bound = types.MethodType(method, self) cache = cache_wrapper(bound) setattr(self, wrapper_name, cache) else: cache = getattr(self, wrapper_name) return cache(*args, **kwargs) return proxy def apply(transform): """ Decorate a function with a transform function that is invoked on results returned from the decorated function. >>> @apply(reversed) ... def get_numbers(start): ... "doc for get_numbers" ... return range(start, start+3) >>> list(get_numbers(4)) [6, 5, 4] >>> get_numbers.__doc__ 'doc for get_numbers' """ def wrap(func): return functools.wraps(func)(compose(transform, func)) return wrap def result_invoke(action): r""" Decorate a function with an action function that is invoked on the results returned from the decorated function (for its side-effect), then return the original result. >>> @result_invoke(print) ... def add_two(a, b): ... return a + b >>> x = add_two(2, 3) 5 >>> x 5 """ def wrap(func): @functools.wraps(func) def wrapper(*args, **kwargs): result = func(*args, **kwargs) action(result) return result return wrapper return wrap def invoke(f, *args, **kwargs): """ Call a function for its side effect after initialization. The benefit of using the decorator instead of simply invoking a function after defining it is that it makes explicit the author's intent for the function to be called immediately. Whereas if one simply calls the function immediately, it's less obvious if that was intentional or incidental. It also avoids repeating the name - the two actions, defining the function and calling it immediately are modeled separately, but linked by the decorator construct. The benefit of having a function construct (opposed to just invoking some behavior inline) is to serve as a scope in which the behavior occurs. It avoids polluting the global namespace with local variables, provides an anchor on which to attach documentation (docstring), keeps the behavior logically separated (instead of conceptually separated or not separated at all), and provides potential to re-use the behavior for testing or other purposes. This function is named as a pithy way to communicate, "call this function primarily for its side effect", or "while defining this function, also take it aside and call it". It exists because there's no Python construct for "define and call" (nor should there be, as decorators serve this need just fine). The behavior happens immediately and synchronously. >>> @invoke ... def func(): print("called") called >>> func() called Use functools.partial to pass parameters to the initial call >>> @functools.partial(invoke, name='bingo') ... def func(name): print("called with", name) called with bingo """ f(*args, **kwargs) return f def call_aside(*args, **kwargs): """ Deprecated name for invoke. """ warnings.warn("call_aside is deprecated, use invoke", DeprecationWarning) return invoke(*args, **kwargs) class Throttler: """ Rate-limit a function (or other callable) """ def __init__(self, func, max_rate=float('Inf')): if isinstance(func, Throttler): func = func.func self.func = func self.max_rate = max_rate self.reset() def reset(self): self.last_called = 0 def __call__(self, *args, **kwargs): self._wait() return self.func(*args, **kwargs) def _wait(self): "ensure at least 1/max_rate seconds from last call" elapsed = time.time() - self.last_called must_wait = 1 / self.max_rate - elapsed time.sleep(max(0, must_wait)) self.last_called = time.time() def __get__(self, obj, type=None): return first_invoke(self._wait, functools.partial(self.func, obj)) def first_invoke(func1, func2): """ Return a function that when invoked will invoke func1 without any parameters (for its side-effect) and then invoke func2 with whatever parameters were passed, returning its result. """ def wrapper(*args, **kwargs): func1() return func2(*args, **kwargs) return wrapper def retry_call(func, cleanup=lambda: None, retries=0, trap=()): """ Given a callable func, trap the indicated exceptions for up to 'retries' times, invoking cleanup on the exception. On the final attempt, allow any exceptions to propagate. """ attempts = itertools.count() if retries == float('inf') else range(retries) for attempt in attempts: try: return func() except trap: cleanup() return func() def retry(*r_args, **r_kwargs): """ Decorator wrapper for retry_call. Accepts arguments to retry_call except func and then returns a decorator for the decorated function. Ex: >>> @retry(retries=3) ... def my_func(a, b): ... "this is my funk" ... print(a, b) >>> my_func.__doc__ 'this is my funk' """ def decorate(func): @functools.wraps(func) def wrapper(*f_args, **f_kwargs): bound = functools.partial(func, *f_args, **f_kwargs) return retry_call(bound, *r_args, **r_kwargs) return wrapper return decorate def print_yielded(func): """ Convert a generator into a function that prints all yielded elements >>> @print_yielded ... def x(): ... yield 3; yield None >>> x() 3 None """ print_all = functools.partial(map, print) print_results = compose(more_itertools.consume, print_all, func) return functools.wraps(func)(print_results) def pass_none(func): """ Wrap func so it's not called if its first param is None >>> print_text = pass_none(print) >>> print_text('text') text >>> print_text(None) """ @functools.wraps(func) def wrapper(param, *args, **kwargs): if param is not None: return func(param, *args, **kwargs) return wrapper def assign_params(func, namespace): """ Assign parameters from namespace where func solicits. >>> def func(x, y=3): ... print(x, y) >>> assigned = assign_params(func, dict(x=2, z=4)) >>> assigned() 2 3 The usual errors are raised if a function doesn't receive its required parameters: >>> assigned = assign_params(func, dict(y=3, z=4)) >>> assigned() Traceback (most recent call last): TypeError: func() ...argument... It even works on methods: >>> class Handler: ... def meth(self, arg): ... print(arg) >>> assign_params(Handler().meth, dict(arg='crystal', foo='clear'))() crystal """ sig = inspect.signature(func) params = sig.parameters.keys() call_ns = {k: namespace[k] for k in params if k in namespace} return functools.partial(func, **call_ns) def save_method_args(method): """ Wrap a method such that when it is called, the args and kwargs are saved on the method. >>> class MyClass: ... @save_method_args ... def method(self, a, b): ... print(a, b) >>> my_ob = MyClass() >>> my_ob.method(1, 2) 1 2 >>> my_ob._saved_method.args (1, 2) >>> my_ob._saved_method.kwargs {} >>> my_ob.method(a=3, b='foo') 3 foo >>> my_ob._saved_method.args () >>> my_ob._saved_method.kwargs == dict(a=3, b='foo') True The arguments are stored on the instance, allowing for different instance to save different args. >>> your_ob = MyClass() >>> your_ob.method({str('x'): 3}, b=[4]) {'x': 3} [4] >>> your_ob._saved_method.args ({'x': 3},) >>> my_ob._saved_method.args () """ args_and_kwargs = collections.namedtuple('args_and_kwargs', 'args kwargs') @functools.wraps(method) def wrapper(self, *args, **kwargs): attr_name = '_saved_' + method.__name__ attr = args_and_kwargs(args, kwargs) setattr(self, attr_name, attr) return method(self, *args, **kwargs) return wrapper def except_(*exceptions, replace=None, use=None): """ Replace the indicated exceptions, if raised, with the indicated literal replacement or evaluated expression (if present). >>> safe_int = except_(ValueError)(int) >>> safe_int('five') >>> safe_int('5') 5 Specify a literal replacement with ``replace``. >>> safe_int_r = except_(ValueError, replace=0)(int) >>> safe_int_r('five') 0 Provide an expression to ``use`` to pass through particular parameters. >>> safe_int_pt = except_(ValueError, use='args[0]')(int) >>> safe_int_pt('five') 'five' """ def decorate(func): @functools.wraps(func) def wrapper(*args, **kwargs): try: return func(*args, **kwargs) except exceptions: try: return eval(use) except TypeError: return replace return wrapper return decorate
15,056
Python
26.032316
88
0.613443
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pkg_resources/_vendor/jaraco/context.py
import os import subprocess import contextlib import functools import tempfile import shutil import operator import warnings @contextlib.contextmanager def pushd(dir): """ >>> tmp_path = getfixture('tmp_path') >>> with pushd(tmp_path): ... assert os.getcwd() == os.fspath(tmp_path) >>> assert os.getcwd() != os.fspath(tmp_path) """ orig = os.getcwd() os.chdir(dir) try: yield dir finally: os.chdir(orig) @contextlib.contextmanager def tarball_context(url, target_dir=None, runner=None, pushd=pushd): """ Get a tarball, extract it, change to that directory, yield, then clean up. `runner` is the function to invoke commands. `pushd` is a context manager for changing the directory. """ if target_dir is None: target_dir = os.path.basename(url).replace('.tar.gz', '').replace('.tgz', '') if runner is None: runner = functools.partial(subprocess.check_call, shell=True) else: warnings.warn("runner parameter is deprecated", DeprecationWarning) # In the tar command, use --strip-components=1 to strip the first path and # then # use -C to cause the files to be extracted to {target_dir}. This ensures # that we always know where the files were extracted. runner('mkdir {target_dir}'.format(**vars())) try: getter = 'wget {url} -O -' extract = 'tar x{compression} --strip-components=1 -C {target_dir}' cmd = ' | '.join((getter, extract)) runner(cmd.format(compression=infer_compression(url), **vars())) with pushd(target_dir): yield target_dir finally: runner('rm -Rf {target_dir}'.format(**vars())) def infer_compression(url): """ Given a URL or filename, infer the compression code for tar. >>> infer_compression('http://foo/bar.tar.gz') 'z' >>> infer_compression('http://foo/bar.tgz') 'z' >>> infer_compression('file.bz') 'j' >>> infer_compression('file.xz') 'J' """ # cheat and just assume it's the last two characters compression_indicator = url[-2:] mapping = dict(gz='z', bz='j', xz='J') # Assume 'z' (gzip) if no match return mapping.get(compression_indicator, 'z') @contextlib.contextmanager def temp_dir(remover=shutil.rmtree): """ Create a temporary directory context. Pass a custom remover to override the removal behavior. >>> import pathlib >>> with temp_dir() as the_dir: ... assert os.path.isdir(the_dir) ... _ = pathlib.Path(the_dir).joinpath('somefile').write_text('contents') >>> assert not os.path.exists(the_dir) """ temp_dir = tempfile.mkdtemp() try: yield temp_dir finally: remover(temp_dir) @contextlib.contextmanager def repo_context(url, branch=None, quiet=True, dest_ctx=temp_dir): """ Check out the repo indicated by url. If dest_ctx is supplied, it should be a context manager to yield the target directory for the check out. """ exe = 'git' if 'git' in url else 'hg' with dest_ctx() as repo_dir: cmd = [exe, 'clone', url, repo_dir] if branch: cmd.extend(['--branch', branch]) devnull = open(os.path.devnull, 'w') stdout = devnull if quiet else None subprocess.check_call(cmd, stdout=stdout) yield repo_dir @contextlib.contextmanager def null(): """ A null context suitable to stand in for a meaningful context. >>> with null() as value: ... assert value is None """ yield class ExceptionTrap: """ A context manager that will catch certain exceptions and provide an indication they occurred. >>> with ExceptionTrap() as trap: ... raise Exception() >>> bool(trap) True >>> with ExceptionTrap() as trap: ... pass >>> bool(trap) False >>> with ExceptionTrap(ValueError) as trap: ... raise ValueError("1 + 1 is not 3") >>> bool(trap) True >>> trap.value ValueError('1 + 1 is not 3') >>> trap.tb <traceback object at ...> >>> with ExceptionTrap(ValueError) as trap: ... raise Exception() Traceback (most recent call last): ... Exception >>> bool(trap) False """ exc_info = None, None, None def __init__(self, exceptions=(Exception,)): self.exceptions = exceptions def __enter__(self): return self @property def type(self): return self.exc_info[0] @property def value(self): return self.exc_info[1] @property def tb(self): return self.exc_info[2] def __exit__(self, *exc_info): type = exc_info[0] matches = type and issubclass(type, self.exceptions) if matches: self.exc_info = exc_info return matches def __bool__(self): return bool(self.type) def raises(self, func, *, _test=bool): """ Wrap func and replace the result with the truth value of the trap (True if an exception occurred). First, give the decorator an alias to support Python 3.8 Syntax. >>> raises = ExceptionTrap(ValueError).raises Now decorate a function that always fails. >>> @raises ... def fail(): ... raise ValueError('failed') >>> fail() True """ @functools.wraps(func) def wrapper(*args, **kwargs): with ExceptionTrap(self.exceptions) as trap: func(*args, **kwargs) return _test(trap) return wrapper def passes(self, func): """ Wrap func and replace the result with the truth value of the trap (True if no exception). First, give the decorator an alias to support Python 3.8 Syntax. >>> passes = ExceptionTrap(ValueError).passes Now decorate a function that always fails. >>> @passes ... def fail(): ... raise ValueError('failed') >>> fail() False """ return self.raises(func, _test=operator.not_) class suppress(contextlib.suppress, contextlib.ContextDecorator): """ A version of contextlib.suppress with decorator support. >>> @suppress(KeyError) ... def key_error(): ... {}[''] >>> key_error() """ class on_interrupt(contextlib.ContextDecorator): """ Replace a KeyboardInterrupt with SystemExit(1) >>> def do_interrupt(): ... raise KeyboardInterrupt() >>> on_interrupt('error')(do_interrupt)() Traceback (most recent call last): ... SystemExit: 1 >>> on_interrupt('error', code=255)(do_interrupt)() Traceback (most recent call last): ... SystemExit: 255 >>> on_interrupt('suppress')(do_interrupt)() >>> with __import__('pytest').raises(KeyboardInterrupt): ... on_interrupt('ignore')(do_interrupt)() """ def __init__( self, action='error', # py3.7 compat # /, code=1, ): self.action = action self.code = code def __enter__(self): return self def __exit__(self, exctype, excinst, exctb): if exctype is not KeyboardInterrupt or self.action == 'ignore': return elif self.action == 'error': raise SystemExit(self.code) from excinst return self.action == 'suppress'
7,460
Python
24.816609
85
0.586863
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pkg_resources/_vendor/jaraco/text/__init__.py
import re import itertools import textwrap import functools try: from importlib.resources import files # type: ignore except ImportError: # pragma: nocover from pkg_resources.extern.importlib_resources import files # type: ignore from pkg_resources.extern.jaraco.functools import compose, method_cache from pkg_resources.extern.jaraco.context import ExceptionTrap def substitution(old, new): """ Return a function that will perform a substitution on a string """ return lambda s: s.replace(old, new) def multi_substitution(*substitutions): """ Take a sequence of pairs specifying substitutions, and create a function that performs those substitutions. >>> multi_substitution(('foo', 'bar'), ('bar', 'baz'))('foo') 'baz' """ substitutions = itertools.starmap(substitution, substitutions) # compose function applies last function first, so reverse the # substitutions to get the expected order. substitutions = reversed(tuple(substitutions)) return compose(*substitutions) class FoldedCase(str): """ A case insensitive string class; behaves just like str except compares equal when the only variation is case. >>> s = FoldedCase('hello world') >>> s == 'Hello World' True >>> 'Hello World' == s True >>> s != 'Hello World' False >>> s.index('O') 4 >>> s.split('O') ['hell', ' w', 'rld'] >>> sorted(map(FoldedCase, ['GAMMA', 'alpha', 'Beta'])) ['alpha', 'Beta', 'GAMMA'] Sequence membership is straightforward. >>> "Hello World" in [s] True >>> s in ["Hello World"] True You may test for set inclusion, but candidate and elements must both be folded. >>> FoldedCase("Hello World") in {s} True >>> s in {FoldedCase("Hello World")} True String inclusion works as long as the FoldedCase object is on the right. >>> "hello" in FoldedCase("Hello World") True But not if the FoldedCase object is on the left: >>> FoldedCase('hello') in 'Hello World' False In that case, use ``in_``: >>> FoldedCase('hello').in_('Hello World') True >>> FoldedCase('hello') > FoldedCase('Hello') False """ def __lt__(self, other): return self.lower() < other.lower() def __gt__(self, other): return self.lower() > other.lower() def __eq__(self, other): return self.lower() == other.lower() def __ne__(self, other): return self.lower() != other.lower() def __hash__(self): return hash(self.lower()) def __contains__(self, other): return super().lower().__contains__(other.lower()) def in_(self, other): "Does self appear in other?" return self in FoldedCase(other) # cache lower since it's likely to be called frequently. @method_cache def lower(self): return super().lower() def index(self, sub): return self.lower().index(sub.lower()) def split(self, splitter=' ', maxsplit=0): pattern = re.compile(re.escape(splitter), re.I) return pattern.split(self, maxsplit) # Python 3.8 compatibility _unicode_trap = ExceptionTrap(UnicodeDecodeError) @_unicode_trap.passes def is_decodable(value): r""" Return True if the supplied value is decodable (using the default encoding). >>> is_decodable(b'\xff') False >>> is_decodable(b'\x32') True """ value.decode() def is_binary(value): r""" Return True if the value appears to be binary (that is, it's a byte string and isn't decodable). >>> is_binary(b'\xff') True >>> is_binary('\xff') False """ return isinstance(value, bytes) and not is_decodable(value) def trim(s): r""" Trim something like a docstring to remove the whitespace that is common due to indentation and formatting. >>> trim("\n\tfoo = bar\n\t\tbar = baz\n") 'foo = bar\n\tbar = baz' """ return textwrap.dedent(s).strip() def wrap(s): """ Wrap lines of text, retaining existing newlines as paragraph markers. >>> print(wrap(lorem_ipsum)) Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum. <BLANKLINE> Curabitur pretium tincidunt lacus. Nulla gravida orci a odio. Nullam varius, turpis et commodo pharetra, est eros bibendum elit, nec luctus magna felis sollicitudin mauris. Integer in mauris eu nibh euismod gravida. Duis ac tellus et risus vulputate vehicula. Donec lobortis risus a elit. Etiam tempor. Ut ullamcorper, ligula eu tempor congue, eros est euismod turpis, id tincidunt sapien risus a quam. Maecenas fermentum consequat mi. Donec fermentum. Pellentesque malesuada nulla a mi. Duis sapien sem, aliquet nec, commodo eget, consequat quis, neque. Aliquam faucibus, elit ut dictum aliquet, felis nisl adipiscing sapien, sed malesuada diam lacus eget erat. Cras mollis scelerisque nunc. Nullam arcu. Aliquam consequat. Curabitur augue lorem, dapibus quis, laoreet et, pretium ac, nisi. Aenean magna nisl, mollis quis, molestie eu, feugiat in, orci. In hac habitasse platea dictumst. """ paragraphs = s.splitlines() wrapped = ('\n'.join(textwrap.wrap(para)) for para in paragraphs) return '\n\n'.join(wrapped) def unwrap(s): r""" Given a multi-line string, return an unwrapped version. >>> wrapped = wrap(lorem_ipsum) >>> wrapped.count('\n') 20 >>> unwrapped = unwrap(wrapped) >>> unwrapped.count('\n') 1 >>> print(unwrapped) Lorem ipsum dolor sit amet, consectetur adipiscing ... Curabitur pretium tincidunt lacus. Nulla gravida orci ... """ paragraphs = re.split(r'\n\n+', s) cleaned = (para.replace('\n', ' ') for para in paragraphs) return '\n'.join(cleaned) class Splitter(object): """object that will split a string with the given arguments for each call >>> s = Splitter(',') >>> s('hello, world, this is your, master calling') ['hello', ' world', ' this is your', ' master calling'] """ def __init__(self, *args): self.args = args def __call__(self, s): return s.split(*self.args) def indent(string, prefix=' ' * 4): """ >>> indent('foo') ' foo' """ return prefix + string class WordSet(tuple): """ Given an identifier, return the words that identifier represents, whether in camel case, underscore-separated, etc. >>> WordSet.parse("camelCase") ('camel', 'Case') >>> WordSet.parse("under_sep") ('under', 'sep') Acronyms should be retained >>> WordSet.parse("firstSNL") ('first', 'SNL') >>> WordSet.parse("you_and_I") ('you', 'and', 'I') >>> WordSet.parse("A simple test") ('A', 'simple', 'test') Multiple caps should not interfere with the first cap of another word. >>> WordSet.parse("myABCClass") ('my', 'ABC', 'Class') The result is a WordSet, so you can get the form you need. >>> WordSet.parse("myABCClass").underscore_separated() 'my_ABC_Class' >>> WordSet.parse('a-command').camel_case() 'ACommand' >>> WordSet.parse('someIdentifier').lowered().space_separated() 'some identifier' Slices of the result should return another WordSet. >>> WordSet.parse('taken-out-of-context')[1:].underscore_separated() 'out_of_context' >>> WordSet.from_class_name(WordSet()).lowered().space_separated() 'word set' >>> example = WordSet.parse('figured it out') >>> example.headless_camel_case() 'figuredItOut' >>> example.dash_separated() 'figured-it-out' """ _pattern = re.compile('([A-Z]?[a-z]+)|([A-Z]+(?![a-z]))') def capitalized(self): return WordSet(word.capitalize() for word in self) def lowered(self): return WordSet(word.lower() for word in self) def camel_case(self): return ''.join(self.capitalized()) def headless_camel_case(self): words = iter(self) first = next(words).lower() new_words = itertools.chain((first,), WordSet(words).camel_case()) return ''.join(new_words) def underscore_separated(self): return '_'.join(self) def dash_separated(self): return '-'.join(self) def space_separated(self): return ' '.join(self) def trim_right(self, item): """ Remove the item from the end of the set. >>> WordSet.parse('foo bar').trim_right('foo') ('foo', 'bar') >>> WordSet.parse('foo bar').trim_right('bar') ('foo',) >>> WordSet.parse('').trim_right('bar') () """ return self[:-1] if self and self[-1] == item else self def trim_left(self, item): """ Remove the item from the beginning of the set. >>> WordSet.parse('foo bar').trim_left('foo') ('bar',) >>> WordSet.parse('foo bar').trim_left('bar') ('foo', 'bar') >>> WordSet.parse('').trim_left('bar') () """ return self[1:] if self and self[0] == item else self def trim(self, item): """ >>> WordSet.parse('foo bar').trim('foo') ('bar',) """ return self.trim_left(item).trim_right(item) def __getitem__(self, item): result = super(WordSet, self).__getitem__(item) if isinstance(item, slice): result = WordSet(result) return result @classmethod def parse(cls, identifier): matches = cls._pattern.finditer(identifier) return WordSet(match.group(0) for match in matches) @classmethod def from_class_name(cls, subject): return cls.parse(subject.__class__.__name__) # for backward compatibility words = WordSet.parse def simple_html_strip(s): r""" Remove HTML from the string `s`. >>> str(simple_html_strip('')) '' >>> print(simple_html_strip('A <bold>stormy</bold> day in paradise')) A stormy day in paradise >>> print(simple_html_strip('Somebody <!-- do not --> tell the truth.')) Somebody tell the truth. >>> print(simple_html_strip('What about<br/>\nmultiple lines?')) What about multiple lines? """ html_stripper = re.compile('(<!--.*?-->)|(<[^>]*>)|([^<]+)', re.DOTALL) texts = (match.group(3) or '' for match in html_stripper.finditer(s)) return ''.join(texts) class SeparatedValues(str): """ A string separated by a separator. Overrides __iter__ for getting the values. >>> list(SeparatedValues('a,b,c')) ['a', 'b', 'c'] Whitespace is stripped and empty values are discarded. >>> list(SeparatedValues(' a, b , c, ')) ['a', 'b', 'c'] """ separator = ',' def __iter__(self): parts = self.split(self.separator) return filter(None, (part.strip() for part in parts)) class Stripper: r""" Given a series of lines, find the common prefix and strip it from them. >>> lines = [ ... 'abcdefg\n', ... 'abc\n', ... 'abcde\n', ... ] >>> res = Stripper.strip_prefix(lines) >>> res.prefix 'abc' >>> list(res.lines) ['defg\n', '\n', 'de\n'] If no prefix is common, nothing should be stripped. >>> lines = [ ... 'abcd\n', ... '1234\n', ... ] >>> res = Stripper.strip_prefix(lines) >>> res.prefix = '' >>> list(res.lines) ['abcd\n', '1234\n'] """ def __init__(self, prefix, lines): self.prefix = prefix self.lines = map(self, lines) @classmethod def strip_prefix(cls, lines): prefix_lines, lines = itertools.tee(lines) prefix = functools.reduce(cls.common_prefix, prefix_lines) return cls(prefix, lines) def __call__(self, line): if not self.prefix: return line null, prefix, rest = line.partition(self.prefix) return rest @staticmethod def common_prefix(s1, s2): """ Return the common prefix of two lines. """ index = min(len(s1), len(s2)) while s1[:index] != s2[:index]: index -= 1 return s1[:index] def remove_prefix(text, prefix): """ Remove the prefix from the text if it exists. >>> remove_prefix('underwhelming performance', 'underwhelming ') 'performance' >>> remove_prefix('something special', 'sample') 'something special' """ null, prefix, rest = text.rpartition(prefix) return rest def remove_suffix(text, suffix): """ Remove the suffix from the text if it exists. >>> remove_suffix('name.git', '.git') 'name' >>> remove_suffix('something special', 'sample') 'something special' """ rest, suffix, null = text.partition(suffix) return rest def normalize_newlines(text): r""" Replace alternate newlines with the canonical newline. >>> normalize_newlines('Lorem Ipsum\u2029') 'Lorem Ipsum\n' >>> normalize_newlines('Lorem Ipsum\r\n') 'Lorem Ipsum\n' >>> normalize_newlines('Lorem Ipsum\x85') 'Lorem Ipsum\n' """ newlines = ['\r\n', '\r', '\n', '\u0085', '\u2028', '\u2029'] pattern = '|'.join(newlines) return re.sub(pattern, '\n', text) def _nonblank(str): return str and not str.startswith('#') @functools.singledispatch def yield_lines(iterable): r""" Yield valid lines of a string or iterable. >>> list(yield_lines('')) [] >>> list(yield_lines(['foo', 'bar'])) ['foo', 'bar'] >>> list(yield_lines('foo\nbar')) ['foo', 'bar'] >>> list(yield_lines('\nfoo\n#bar\nbaz #comment')) ['foo', 'baz #comment'] >>> list(yield_lines(['foo\nbar', 'baz', 'bing\n\n\n'])) ['foo', 'bar', 'baz', 'bing'] """ return itertools.chain.from_iterable(map(yield_lines, iterable)) @yield_lines.register(str) def _(text): return filter(_nonblank, map(str.strip, text.splitlines())) def drop_comment(line): """ Drop comments. >>> drop_comment('foo # bar') 'foo' A hash without a space may be in a URL. >>> drop_comment('http://example.com/foo#bar') 'http://example.com/foo#bar' """ return line.partition(' #')[0] def join_continuation(lines): r""" Join lines continued by a trailing backslash. >>> list(join_continuation(['foo \\', 'bar', 'baz'])) ['foobar', 'baz'] >>> list(join_continuation(['foo \\', 'bar', 'baz'])) ['foobar', 'baz'] >>> list(join_continuation(['foo \\', 'bar \\', 'baz'])) ['foobarbaz'] Not sure why, but... The character preceeding the backslash is also elided. >>> list(join_continuation(['goo\\', 'dly'])) ['godly'] A terrible idea, but... If no line is available to continue, suppress the lines. >>> list(join_continuation(['foo', 'bar\\', 'baz\\'])) ['foo'] """ lines = iter(lines) for item in lines: while item.endswith('\\'): try: item = item[:-2].strip() + next(lines) except StopIteration: return yield item
15,526
Python
24.878333
78
0.598673
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pkg_resources/extern/__init__.py
import importlib.util import sys class VendorImporter: """ A PEP 302 meta path importer for finding optionally-vendored or otherwise naturally-installed packages from root_name. """ def __init__(self, root_name, vendored_names=(), vendor_pkg=None): self.root_name = root_name self.vendored_names = set(vendored_names) self.vendor_pkg = vendor_pkg or root_name.replace('extern', '_vendor') @property def search_path(self): """ Search first the vendor package then as a natural package. """ yield self.vendor_pkg + '.' yield '' def _module_matches_namespace(self, fullname): """Figure out if the target module is vendored.""" root, base, target = fullname.partition(self.root_name + '.') return not root and any(map(target.startswith, self.vendored_names)) def load_module(self, fullname): """ Iterate over the search path to locate and load fullname. """ root, base, target = fullname.partition(self.root_name + '.') for prefix in self.search_path: try: extant = prefix + target __import__(extant) mod = sys.modules[extant] sys.modules[fullname] = mod return mod except ImportError: pass else: raise ImportError( "The '{target}' package is required; " "normally this is bundled with this package so if you get " "this warning, consult the packager of your " "distribution.".format(**locals()) ) def create_module(self, spec): return self.load_module(spec.name) def exec_module(self, module): pass def find_spec(self, fullname, path=None, target=None): """Return a module spec for vendored names.""" return ( importlib.util.spec_from_loader(fullname, self) if self._module_matches_namespace(fullname) else None ) def install(self): """ Install this importer into sys.meta_path if not already present. """ if self not in sys.meta_path: sys.meta_path.append(self) names = ( 'packaging', 'platformdirs', 'jaraco', 'importlib_resources', 'more_itertools', ) VendorImporter(__name__, names).install()
2,442
Python
29.160493
78
0.57371
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/_distutils_hack/__init__.py
# don't import any costly modules import sys import os is_pypy = '__pypy__' in sys.builtin_module_names def warn_distutils_present(): if 'distutils' not in sys.modules: return if is_pypy and sys.version_info < (3, 7): # PyPy for 3.6 unconditionally imports distutils, so bypass the warning # https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250 return import warnings warnings.warn( "Distutils was imported before Setuptools, but importing Setuptools " "also replaces the `distutils` module in `sys.modules`. This may lead " "to undesirable behaviors or errors. To avoid these issues, avoid " "using distutils directly, ensure that setuptools is installed in the " "traditional way (e.g. not an editable install), and/or make sure " "that setuptools is always imported before distutils." ) def clear_distutils(): if 'distutils' not in sys.modules: return import warnings warnings.warn("Setuptools is replacing distutils.") mods = [ name for name in sys.modules if name == "distutils" or name.startswith("distutils.") ] for name in mods: del sys.modules[name] def enabled(): """ Allow selection of distutils by environment variable. """ which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'local') return which == 'local' def ensure_local_distutils(): import importlib clear_distutils() # With the DistutilsMetaFinder in place, # perform an import to cause distutils to be # loaded from setuptools._distutils. Ref #2906. with shim(): importlib.import_module('distutils') # check that submodules load as expected core = importlib.import_module('distutils.core') assert '_distutils' in core.__file__, core.__file__ assert 'setuptools._distutils.log' not in sys.modules def do_override(): """ Ensure that the local copy of distutils is preferred over stdlib. See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401 for more motivation. """ if enabled(): warn_distutils_present() ensure_local_distutils() class _TrivialRe: def __init__(self, *patterns): self._patterns = patterns def match(self, string): return all(pat in string for pat in self._patterns) class DistutilsMetaFinder: def find_spec(self, fullname, path, target=None): # optimization: only consider top level modules and those # found in the CPython test suite. if path is not None and not fullname.startswith('test.'): return method_name = 'spec_for_{fullname}'.format(**locals()) method = getattr(self, method_name, lambda: None) return method() def spec_for_distutils(self): if self.is_cpython(): return import importlib import importlib.abc import importlib.util try: mod = importlib.import_module('setuptools._distutils') except Exception: # There are a couple of cases where setuptools._distutils # may not be present: # - An older Setuptools without a local distutils is # taking precedence. Ref #2957. # - Path manipulation during sitecustomize removes # setuptools from the path but only after the hook # has been loaded. Ref #2980. # In either case, fall back to stdlib behavior. return class DistutilsLoader(importlib.abc.Loader): def create_module(self, spec): mod.__name__ = 'distutils' return mod def exec_module(self, module): pass return importlib.util.spec_from_loader( 'distutils', DistutilsLoader(), origin=mod.__file__ ) @staticmethod def is_cpython(): """ Suppress supplying distutils for CPython (build and tests). Ref #2965 and #3007. """ return os.path.isfile('pybuilddir.txt') def spec_for_pip(self): """ Ensure stdlib distutils when running under pip. See pypa/pip#8761 for rationale. """ if sys.version_info >= (3, 12) or self.pip_imported_during_build(): return clear_distutils() self.spec_for_distutils = lambda: None @classmethod def pip_imported_during_build(cls): """ Detect if pip is being imported in a build script. Ref #2355. """ import traceback return any( cls.frame_file_is_setup(frame) for frame, line in traceback.walk_stack(None) ) @staticmethod def frame_file_is_setup(frame): """ Return True if the indicated frame suggests a setup.py file. """ # some frames may not have __file__ (#2940) return frame.f_globals.get('__file__', '').endswith('setup.py') def spec_for_sensitive_tests(self): """ Ensure stdlib distutils when running select tests under CPython. python/cpython#91169 """ clear_distutils() self.spec_for_distutils = lambda: None sensitive_tests = ( [ 'test.test_distutils', 'test.test_peg_generator', 'test.test_importlib', ] if sys.version_info < (3, 10) else [ 'test.test_distutils', ] ) for name in DistutilsMetaFinder.sensitive_tests: setattr( DistutilsMetaFinder, f'spec_for_{name}', DistutilsMetaFinder.spec_for_sensitive_tests, ) DISTUTILS_FINDER = DistutilsMetaFinder() def add_shim(): DISTUTILS_FINDER in sys.meta_path or insert_shim() class shim: def __enter__(self): insert_shim() def __exit__(self, exc, value, tb): _remove_shim() def insert_shim(): sys.meta_path.insert(0, DISTUTILS_FINDER) def _remove_shim(): try: sys.meta_path.remove(DISTUTILS_FINDER) except ValueError: pass if sys.version_info < (3, 12): # DistutilsMetaFinder can only be disabled in Python < 3.12 (PEP 632) remove_shim = _remove_shim
6,299
Python
26.631579
119
0.610414
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/_distutils_hack/override.py
__import__('_distutils_hack').do_override()
44
Python
21.499989
43
0.659091
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pytz/exceptions.py
''' Custom exceptions raised by pytz. ''' __all__ = [ 'UnknownTimeZoneError', 'InvalidTimeError', 'AmbiguousTimeError', 'NonExistentTimeError', ] class Error(Exception): '''Base class for all exceptions raised by the pytz library''' class UnknownTimeZoneError(KeyError, Error): '''Exception raised when pytz is passed an unknown timezone. >>> isinstance(UnknownTimeZoneError(), LookupError) True This class is actually a subclass of KeyError to provide backwards compatibility with code relying on the undocumented behavior of earlier pytz releases. >>> isinstance(UnknownTimeZoneError(), KeyError) True And also a subclass of pytz.exceptions.Error, as are other pytz exceptions. >>> isinstance(UnknownTimeZoneError(), Error) True ''' pass class InvalidTimeError(Error): '''Base class for invalid time exceptions.''' class AmbiguousTimeError(InvalidTimeError): '''Exception raised when attempting to create an ambiguous wallclock time. At the end of a DST transition period, a particular wallclock time will occur twice (once before the clocks are set back, once after). Both possibilities may be correct, unless further information is supplied. See DstTzInfo.normalize() for more info ''' class NonExistentTimeError(InvalidTimeError): '''Exception raised when attempting to create a wallclock time that cannot exist. At the start of a DST transition period, the wallclock time jumps forward. The instants jumped over never occur. '''
1,571
Python
25.2
78
0.723106
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pytz/tzinfo.py
'''Base classes and helpers for building zone specific tzinfo classes''' from datetime import datetime, timedelta, tzinfo from bisect import bisect_right try: set except NameError: from sets import Set as set import pytz from pytz.exceptions import AmbiguousTimeError, NonExistentTimeError __all__ = [] _timedelta_cache = {} def memorized_timedelta(seconds): '''Create only one instance of each distinct timedelta''' try: return _timedelta_cache[seconds] except KeyError: delta = timedelta(seconds=seconds) _timedelta_cache[seconds] = delta return delta _epoch = datetime.utcfromtimestamp(0) _datetime_cache = {0: _epoch} def memorized_datetime(seconds): '''Create only one instance of each distinct datetime''' try: return _datetime_cache[seconds] except KeyError: # NB. We can't just do datetime.utcfromtimestamp(seconds) as this # fails with negative values under Windows (Bug #90096) dt = _epoch + timedelta(seconds=seconds) _datetime_cache[seconds] = dt return dt _ttinfo_cache = {} def memorized_ttinfo(*args): '''Create only one instance of each distinct tuple''' try: return _ttinfo_cache[args] except KeyError: ttinfo = ( memorized_timedelta(args[0]), memorized_timedelta(args[1]), args[2] ) _ttinfo_cache[args] = ttinfo return ttinfo _notime = memorized_timedelta(0) def _to_seconds(td): '''Convert a timedelta to seconds''' return td.seconds + td.days * 24 * 60 * 60 class BaseTzInfo(tzinfo): # Overridden in subclass _utcoffset = None _tzname = None zone = None def __str__(self): return self.zone class StaticTzInfo(BaseTzInfo): '''A timezone that has a constant offset from UTC These timezones are rare, as most locations have changed their offset at some point in their history ''' def fromutc(self, dt): '''See datetime.tzinfo.fromutc''' if dt.tzinfo is not None and dt.tzinfo is not self: raise ValueError('fromutc: dt.tzinfo is not self') return (dt + self._utcoffset).replace(tzinfo=self) def utcoffset(self, dt, is_dst=None): '''See datetime.tzinfo.utcoffset is_dst is ignored for StaticTzInfo, and exists only to retain compatibility with DstTzInfo. ''' return self._utcoffset def dst(self, dt, is_dst=None): '''See datetime.tzinfo.dst is_dst is ignored for StaticTzInfo, and exists only to retain compatibility with DstTzInfo. ''' return _notime def tzname(self, dt, is_dst=None): '''See datetime.tzinfo.tzname is_dst is ignored for StaticTzInfo, and exists only to retain compatibility with DstTzInfo. ''' return self._tzname def localize(self, dt, is_dst=False): '''Convert naive time to local time''' if dt.tzinfo is not None: raise ValueError('Not naive datetime (tzinfo is already set)') return dt.replace(tzinfo=self) def normalize(self, dt, is_dst=False): '''Correct the timezone information on the given datetime. This is normally a no-op, as StaticTzInfo timezones never have ambiguous cases to correct: >>> from pytz import timezone >>> gmt = timezone('GMT') >>> isinstance(gmt, StaticTzInfo) True >>> dt = datetime(2011, 5, 8, 1, 2, 3, tzinfo=gmt) >>> gmt.normalize(dt) is dt True The supported method of converting between timezones is to use datetime.astimezone(). Currently normalize() also works: >>> la = timezone('America/Los_Angeles') >>> dt = la.localize(datetime(2011, 5, 7, 1, 2, 3)) >>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)' >>> gmt.normalize(dt).strftime(fmt) '2011-05-07 08:02:03 GMT (+0000)' ''' if dt.tzinfo is self: return dt if dt.tzinfo is None: raise ValueError('Naive time - no tzinfo set') return dt.astimezone(self) def __repr__(self): return '<StaticTzInfo %r>' % (self.zone,) def __reduce__(self): # Special pickle to zone remains a singleton and to cope with # database changes. return pytz._p, (self.zone,) class DstTzInfo(BaseTzInfo): '''A timezone that has a variable offset from UTC The offset might change if daylight saving time comes into effect, or at a point in history when the region decides to change their timezone definition. ''' # Overridden in subclass # Sorted list of DST transition times, UTC _utc_transition_times = None # [(utcoffset, dstoffset, tzname)] corresponding to # _utc_transition_times entries _transition_info = None zone = None # Set in __init__ _tzinfos = None _dst = None # DST offset def __init__(self, _inf=None, _tzinfos=None): if _inf: self._tzinfos = _tzinfos self._utcoffset, self._dst, self._tzname = _inf else: _tzinfos = {} self._tzinfos = _tzinfos self._utcoffset, self._dst, self._tzname = ( self._transition_info[0]) _tzinfos[self._transition_info[0]] = self for inf in self._transition_info[1:]: if inf not in _tzinfos: _tzinfos[inf] = self.__class__(inf, _tzinfos) def fromutc(self, dt): '''See datetime.tzinfo.fromutc''' if (dt.tzinfo is not None and getattr(dt.tzinfo, '_tzinfos', None) is not self._tzinfos): raise ValueError('fromutc: dt.tzinfo is not self') dt = dt.replace(tzinfo=None) idx = max(0, bisect_right(self._utc_transition_times, dt) - 1) inf = self._transition_info[idx] return (dt + inf[0]).replace(tzinfo=self._tzinfos[inf]) def normalize(self, dt): '''Correct the timezone information on the given datetime If date arithmetic crosses DST boundaries, the tzinfo is not magically adjusted. This method normalizes the tzinfo to the correct one. To test, first we need to do some setup >>> from pytz import timezone >>> utc = timezone('UTC') >>> eastern = timezone('US/Eastern') >>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)' We next create a datetime right on an end-of-DST transition point, the instant when the wallclocks are wound back one hour. >>> utc_dt = datetime(2002, 10, 27, 6, 0, 0, tzinfo=utc) >>> loc_dt = utc_dt.astimezone(eastern) >>> loc_dt.strftime(fmt) '2002-10-27 01:00:00 EST (-0500)' Now, if we subtract a few minutes from it, note that the timezone information has not changed. >>> before = loc_dt - timedelta(minutes=10) >>> before.strftime(fmt) '2002-10-27 00:50:00 EST (-0500)' But we can fix that by calling the normalize method >>> before = eastern.normalize(before) >>> before.strftime(fmt) '2002-10-27 01:50:00 EDT (-0400)' The supported method of converting between timezones is to use datetime.astimezone(). Currently, normalize() also works: >>> th = timezone('Asia/Bangkok') >>> am = timezone('Europe/Amsterdam') >>> dt = th.localize(datetime(2011, 5, 7, 1, 2, 3)) >>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)' >>> am.normalize(dt).strftime(fmt) '2011-05-06 20:02:03 CEST (+0200)' ''' if dt.tzinfo is None: raise ValueError('Naive time - no tzinfo set') # Convert dt in localtime to UTC offset = dt.tzinfo._utcoffset dt = dt.replace(tzinfo=None) dt = dt - offset # convert it back, and return it return self.fromutc(dt) def localize(self, dt, is_dst=False): '''Convert naive time to local time. This method should be used to construct localtimes, rather than passing a tzinfo argument to a datetime constructor. is_dst is used to determine the correct timezone in the ambigous period at the end of daylight saving time. >>> from pytz import timezone >>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)' >>> amdam = timezone('Europe/Amsterdam') >>> dt = datetime(2004, 10, 31, 2, 0, 0) >>> loc_dt1 = amdam.localize(dt, is_dst=True) >>> loc_dt2 = amdam.localize(dt, is_dst=False) >>> loc_dt1.strftime(fmt) '2004-10-31 02:00:00 CEST (+0200)' >>> loc_dt2.strftime(fmt) '2004-10-31 02:00:00 CET (+0100)' >>> str(loc_dt2 - loc_dt1) '1:00:00' Use is_dst=None to raise an AmbiguousTimeError for ambiguous times at the end of daylight saving time >>> try: ... loc_dt1 = amdam.localize(dt, is_dst=None) ... except AmbiguousTimeError: ... print('Ambiguous') Ambiguous is_dst defaults to False >>> amdam.localize(dt) == amdam.localize(dt, False) True is_dst is also used to determine the correct timezone in the wallclock times jumped over at the start of daylight saving time. >>> pacific = timezone('US/Pacific') >>> dt = datetime(2008, 3, 9, 2, 0, 0) >>> ploc_dt1 = pacific.localize(dt, is_dst=True) >>> ploc_dt2 = pacific.localize(dt, is_dst=False) >>> ploc_dt1.strftime(fmt) '2008-03-09 02:00:00 PDT (-0700)' >>> ploc_dt2.strftime(fmt) '2008-03-09 02:00:00 PST (-0800)' >>> str(ploc_dt2 - ploc_dt1) '1:00:00' Use is_dst=None to raise a NonExistentTimeError for these skipped times. >>> try: ... loc_dt1 = pacific.localize(dt, is_dst=None) ... except NonExistentTimeError: ... print('Non-existent') Non-existent ''' if dt.tzinfo is not None: raise ValueError('Not naive datetime (tzinfo is already set)') # Find the two best possibilities. possible_loc_dt = set() for delta in [timedelta(days=-1), timedelta(days=1)]: loc_dt = dt + delta idx = max(0, bisect_right( self._utc_transition_times, loc_dt) - 1) inf = self._transition_info[idx] tzinfo = self._tzinfos[inf] loc_dt = tzinfo.normalize(dt.replace(tzinfo=tzinfo)) if loc_dt.replace(tzinfo=None) == dt: possible_loc_dt.add(loc_dt) if len(possible_loc_dt) == 1: return possible_loc_dt.pop() # If there are no possibly correct timezones, we are attempting # to convert a time that never happened - the time period jumped # during the start-of-DST transition period. if len(possible_loc_dt) == 0: # If we refuse to guess, raise an exception. if is_dst is None: raise NonExistentTimeError(dt) # If we are forcing the pre-DST side of the DST transition, we # obtain the correct timezone by winding the clock forward a few # hours. elif is_dst: return self.localize( dt + timedelta(hours=6), is_dst=True) - timedelta(hours=6) # If we are forcing the post-DST side of the DST transition, we # obtain the correct timezone by winding the clock back. else: return self.localize( dt - timedelta(hours=6), is_dst=False) + timedelta(hours=6) # If we get this far, we have multiple possible timezones - this # is an ambiguous case occuring during the end-of-DST transition. # If told to be strict, raise an exception since we have an # ambiguous case if is_dst is None: raise AmbiguousTimeError(dt) # Filter out the possiblilities that don't match the requested # is_dst filtered_possible_loc_dt = [ p for p in possible_loc_dt if bool(p.tzinfo._dst) == is_dst ] # Hopefully we only have one possibility left. Return it. if len(filtered_possible_loc_dt) == 1: return filtered_possible_loc_dt[0] if len(filtered_possible_loc_dt) == 0: filtered_possible_loc_dt = list(possible_loc_dt) # If we get this far, we have in a wierd timezone transition # where the clocks have been wound back but is_dst is the same # in both (eg. Europe/Warsaw 1915 when they switched to CET). # At this point, we just have to guess unless we allow more # hints to be passed in (such as the UTC offset or abbreviation), # but that is just getting silly. # # Choose the earliest (by UTC) applicable timezone if is_dst=True # Choose the latest (by UTC) applicable timezone if is_dst=False # i.e., behave like end-of-DST transition dates = {} # utc -> local for local_dt in filtered_possible_loc_dt: utc_time = ( local_dt.replace(tzinfo=None) - local_dt.tzinfo._utcoffset) assert utc_time not in dates dates[utc_time] = local_dt return dates[[min, max][not is_dst](dates)] def utcoffset(self, dt, is_dst=None): '''See datetime.tzinfo.utcoffset The is_dst parameter may be used to remove ambiguity during DST transitions. >>> from pytz import timezone >>> tz = timezone('America/St_Johns') >>> ambiguous = datetime(2009, 10, 31, 23, 30) >>> str(tz.utcoffset(ambiguous, is_dst=False)) '-1 day, 20:30:00' >>> str(tz.utcoffset(ambiguous, is_dst=True)) '-1 day, 21:30:00' >>> try: ... tz.utcoffset(ambiguous) ... except AmbiguousTimeError: ... print('Ambiguous') Ambiguous ''' if dt is None: return None elif dt.tzinfo is not self: dt = self.localize(dt, is_dst) return dt.tzinfo._utcoffset else: return self._utcoffset def dst(self, dt, is_dst=None): '''See datetime.tzinfo.dst The is_dst parameter may be used to remove ambiguity during DST transitions. >>> from pytz import timezone >>> tz = timezone('America/St_Johns') >>> normal = datetime(2009, 9, 1) >>> str(tz.dst(normal)) '1:00:00' >>> str(tz.dst(normal, is_dst=False)) '1:00:00' >>> str(tz.dst(normal, is_dst=True)) '1:00:00' >>> ambiguous = datetime(2009, 10, 31, 23, 30) >>> str(tz.dst(ambiguous, is_dst=False)) '0:00:00' >>> str(tz.dst(ambiguous, is_dst=True)) '1:00:00' >>> try: ... tz.dst(ambiguous) ... except AmbiguousTimeError: ... print('Ambiguous') Ambiguous ''' if dt is None: return None elif dt.tzinfo is not self: dt = self.localize(dt, is_dst) return dt.tzinfo._dst else: return self._dst def tzname(self, dt, is_dst=None): '''See datetime.tzinfo.tzname The is_dst parameter may be used to remove ambiguity during DST transitions. >>> from pytz import timezone >>> tz = timezone('America/St_Johns') >>> normal = datetime(2009, 9, 1) >>> tz.tzname(normal) 'NDT' >>> tz.tzname(normal, is_dst=False) 'NDT' >>> tz.tzname(normal, is_dst=True) 'NDT' >>> ambiguous = datetime(2009, 10, 31, 23, 30) >>> tz.tzname(ambiguous, is_dst=False) 'NST' >>> tz.tzname(ambiguous, is_dst=True) 'NDT' >>> try: ... tz.tzname(ambiguous) ... except AmbiguousTimeError: ... print('Ambiguous') Ambiguous ''' if dt is None: return self.zone elif dt.tzinfo is not self: dt = self.localize(dt, is_dst) return dt.tzinfo._tzname else: return self._tzname def __repr__(self): if self._dst: dst = 'DST' else: dst = 'STD' if self._utcoffset > _notime: return '<DstTzInfo %r %s+%s %s>' % ( self.zone, self._tzname, self._utcoffset, dst ) else: return '<DstTzInfo %r %s%s %s>' % ( self.zone, self._tzname, self._utcoffset, dst ) def __reduce__(self): # Special pickle to zone remains a singleton and to cope with # database changes. return pytz._p, ( self.zone, _to_seconds(self._utcoffset), _to_seconds(self._dst), self._tzname ) def unpickler(zone, utcoffset=None, dstoffset=None, tzname=None): """Factory function for unpickling pytz tzinfo instances. This is shared for both StaticTzInfo and DstTzInfo instances, because database changes could cause a zones implementation to switch between these two base classes and we can't break pickles on a pytz version upgrade. """ # Raises a KeyError if zone no longer exists, which should never happen # and would be a bug. tz = pytz.timezone(zone) # A StaticTzInfo - just return it if utcoffset is None: return tz # This pickle was created from a DstTzInfo. We need to # determine which of the list of tzinfo instances for this zone # to use in order to restore the state of any datetime instances using # it correctly. utcoffset = memorized_timedelta(utcoffset) dstoffset = memorized_timedelta(dstoffset) try: return tz._tzinfos[(utcoffset, dstoffset, tzname)] except KeyError: # The particular state requested in this timezone no longer exists. # This indicates a corrupt pickle, or the timezone database has been # corrected violently enough to make this particular # (utcoffset,dstoffset) no longer exist in the zone, or the # abbreviation has been changed. pass # See if we can find an entry differing only by tzname. Abbreviations # get changed from the initial guess by the database maintainers to # match reality when this information is discovered. for localized_tz in tz._tzinfos.values(): if (localized_tz._utcoffset == utcoffset and localized_tz._dst == dstoffset): return localized_tz # This (utcoffset, dstoffset) information has been removed from the # zone. Add it back. This might occur when the database maintainers have # corrected incorrect information. datetime instances using this # incorrect information will continue to do so, exactly as they were # before being pickled. This is purely an overly paranoid safety net - I # doubt this will ever been needed in real life. inf = (utcoffset, dstoffset, tzname) tz._tzinfos[inf] = tz.__class__(inf, tz._tzinfos) return tz._tzinfos[inf]
19,272
Python
32.344291
78
0.584164
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pytz/__init__.py
''' datetime.tzinfo timezone definitions generated from the Olson timezone database: ftp://elsie.nci.nih.gov/pub/tz*.tar.gz See the datetime section of the Python Library Reference for information on how to use these modules. ''' import sys import datetime import os.path from pytz.exceptions import AmbiguousTimeError from pytz.exceptions import InvalidTimeError from pytz.exceptions import NonExistentTimeError from pytz.exceptions import UnknownTimeZoneError from pytz.lazy import LazyDict, LazyList, LazySet # noqa from pytz.tzinfo import unpickler, BaseTzInfo from pytz.tzfile import build_tzinfo # The IANA (nee Olson) database is updated several times a year. OLSON_VERSION = '2022g' VERSION = '2022.7.1' # pip compatible version number. __version__ = VERSION OLSEN_VERSION = OLSON_VERSION # Old releases had this misspelling __all__ = [ 'timezone', 'utc', 'country_timezones', 'country_names', 'AmbiguousTimeError', 'InvalidTimeError', 'NonExistentTimeError', 'UnknownTimeZoneError', 'all_timezones', 'all_timezones_set', 'common_timezones', 'common_timezones_set', 'BaseTzInfo', 'FixedOffset', ] if sys.version_info[0] > 2: # Python 3.x # Python 3.x doesn't have unicode(), making writing code # for Python 2.3 and Python 3.x a pain. unicode = str def ascii(s): r""" >>> ascii('Hello') 'Hello' >>> ascii('\N{TRADE MARK SIGN}') #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... UnicodeEncodeError: ... """ if type(s) == bytes: s = s.decode('ASCII') else: s.encode('ASCII') # Raise an exception if not ASCII return s # But the string - not a byte string. else: # Python 2.x def ascii(s): r""" >>> ascii('Hello') 'Hello' >>> ascii(u'Hello') 'Hello' >>> ascii(u'\N{TRADE MARK SIGN}') #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... UnicodeEncodeError: ... """ return s.encode('ASCII') def open_resource(name): """Open a resource from the zoneinfo subdir for reading. Uses the pkg_resources module if available and no standard file found at the calculated location. It is possible to specify different location for zoneinfo subdir by using the PYTZ_TZDATADIR environment variable. """ name_parts = name.lstrip('/').split('/') for part in name_parts: if part == os.path.pardir or os.sep in part: raise ValueError('Bad path segment: %r' % part) zoneinfo_dir = os.environ.get('PYTZ_TZDATADIR', None) if zoneinfo_dir is not None: filename = os.path.join(zoneinfo_dir, *name_parts) else: filename = os.path.join(os.path.dirname(__file__), 'zoneinfo', *name_parts) if not os.path.exists(filename): # http://bugs.launchpad.net/bugs/383171 - we avoid using this # unless absolutely necessary to help when a broken version of # pkg_resources is installed. try: from pkg_resources import resource_stream except ImportError: resource_stream = None if resource_stream is not None: return resource_stream(__name__, 'zoneinfo/' + name) return open(filename, 'rb') def resource_exists(name): """Return true if the given resource exists""" try: if os.environ.get('PYTZ_SKIPEXISTSCHECK', ''): # In "standard" distributions, we can assume that # all the listed timezones are present. As an # import-speed optimization, you can set the # PYTZ_SKIPEXISTSCHECK flag to skip checking # for the presence of the resource file on disk. return True open_resource(name).close() return True except IOError: return False _tzinfo_cache = {} def timezone(zone): r''' Return a datetime.tzinfo implementation for the given timezone >>> from datetime import datetime, timedelta >>> utc = timezone('UTC') >>> eastern = timezone('US/Eastern') >>> eastern.zone 'US/Eastern' >>> timezone(unicode('US/Eastern')) is eastern True >>> utc_dt = datetime(2002, 10, 27, 6, 0, 0, tzinfo=utc) >>> loc_dt = utc_dt.astimezone(eastern) >>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)' >>> loc_dt.strftime(fmt) '2002-10-27 01:00:00 EST (-0500)' >>> (loc_dt - timedelta(minutes=10)).strftime(fmt) '2002-10-27 00:50:00 EST (-0500)' >>> eastern.normalize(loc_dt - timedelta(minutes=10)).strftime(fmt) '2002-10-27 01:50:00 EDT (-0400)' >>> (loc_dt + timedelta(minutes=10)).strftime(fmt) '2002-10-27 01:10:00 EST (-0500)' Raises UnknownTimeZoneError if passed an unknown zone. >>> try: ... timezone('Asia/Shangri-La') ... except UnknownTimeZoneError: ... print('Unknown') Unknown >>> try: ... timezone(unicode('\N{TRADE MARK SIGN}')) ... except UnknownTimeZoneError: ... print('Unknown') Unknown ''' if zone is None: raise UnknownTimeZoneError(None) if zone.upper() == 'UTC': return utc try: zone = ascii(zone) except UnicodeEncodeError: # All valid timezones are ASCII raise UnknownTimeZoneError(zone) zone = _case_insensitive_zone_lookup(_unmunge_zone(zone)) if zone not in _tzinfo_cache: if zone in all_timezones_set: # noqa fp = open_resource(zone) try: _tzinfo_cache[zone] = build_tzinfo(zone, fp) finally: fp.close() else: raise UnknownTimeZoneError(zone) return _tzinfo_cache[zone] def _unmunge_zone(zone): """Undo the time zone name munging done by older versions of pytz.""" return zone.replace('_plus_', '+').replace('_minus_', '-') _all_timezones_lower_to_standard = None def _case_insensitive_zone_lookup(zone): """case-insensitively matching timezone, else return zone unchanged""" global _all_timezones_lower_to_standard if _all_timezones_lower_to_standard is None: _all_timezones_lower_to_standard = dict((tz.lower(), tz) for tz in _all_timezones_unchecked) # noqa return _all_timezones_lower_to_standard.get(zone.lower()) or zone # noqa ZERO = datetime.timedelta(0) HOUR = datetime.timedelta(hours=1) class UTC(BaseTzInfo): """UTC Optimized UTC implementation. It unpickles using the single module global instance defined beneath this class declaration. """ zone = "UTC" _utcoffset = ZERO _dst = ZERO _tzname = zone def fromutc(self, dt): if dt.tzinfo is None: return self.localize(dt) return super(utc.__class__, self).fromutc(dt) def utcoffset(self, dt): return ZERO def tzname(self, dt): return "UTC" def dst(self, dt): return ZERO def __reduce__(self): return _UTC, () def localize(self, dt, is_dst=False): '''Convert naive time to local time''' if dt.tzinfo is not None: raise ValueError('Not naive datetime (tzinfo is already set)') return dt.replace(tzinfo=self) def normalize(self, dt, is_dst=False): '''Correct the timezone information on the given datetime''' if dt.tzinfo is self: return dt if dt.tzinfo is None: raise ValueError('Naive time - no tzinfo set') return dt.astimezone(self) def __repr__(self): return "<UTC>" def __str__(self): return "UTC" UTC = utc = UTC() # UTC is a singleton def _UTC(): """Factory function for utc unpickling. Makes sure that unpickling a utc instance always returns the same module global. These examples belong in the UTC class above, but it is obscured; or in the README.rst, but we are not depending on Python 2.4 so integrating the README.rst examples with the unit tests is not trivial. >>> import datetime, pickle >>> dt = datetime.datetime(2005, 3, 1, 14, 13, 21, tzinfo=utc) >>> naive = dt.replace(tzinfo=None) >>> p = pickle.dumps(dt, 1) >>> naive_p = pickle.dumps(naive, 1) >>> len(p) - len(naive_p) 17 >>> new = pickle.loads(p) >>> new == dt True >>> new is dt False >>> new.tzinfo is dt.tzinfo True >>> utc is UTC is timezone('UTC') True >>> utc is timezone('GMT') False """ return utc _UTC.__safe_for_unpickling__ = True def _p(*args): """Factory function for unpickling pytz tzinfo instances. Just a wrapper around tzinfo.unpickler to save a few bytes in each pickle by shortening the path. """ return unpickler(*args) _p.__safe_for_unpickling__ = True class _CountryTimezoneDict(LazyDict): """Map ISO 3166 country code to a list of timezone names commonly used in that country. iso3166_code is the two letter code used to identify the country. >>> def print_list(list_of_strings): ... 'We use a helper so doctests work under Python 2.3 -> 3.x' ... for s in list_of_strings: ... print(s) >>> print_list(country_timezones['nz']) Pacific/Auckland Pacific/Chatham >>> print_list(country_timezones['ch']) Europe/Zurich >>> print_list(country_timezones['CH']) Europe/Zurich >>> print_list(country_timezones[unicode('ch')]) Europe/Zurich >>> print_list(country_timezones['XXX']) Traceback (most recent call last): ... KeyError: 'XXX' Previously, this information was exposed as a function rather than a dictionary. This is still supported:: >>> print_list(country_timezones('nz')) Pacific/Auckland Pacific/Chatham """ def __call__(self, iso3166_code): """Backwards compatibility.""" return self[iso3166_code] def _fill(self): data = {} zone_tab = open_resource('zone.tab') try: for line in zone_tab: line = line.decode('UTF-8') if line.startswith('#'): continue code, coordinates, zone = line.split(None, 4)[:3] if zone not in all_timezones_set: # noqa continue try: data[code].append(zone) except KeyError: data[code] = [zone] self.data = data finally: zone_tab.close() country_timezones = _CountryTimezoneDict() class _CountryNameDict(LazyDict): '''Dictionary proving ISO3166 code -> English name. >>> print(country_names['au']) Australia ''' def _fill(self): data = {} zone_tab = open_resource('iso3166.tab') try: for line in zone_tab.readlines(): line = line.decode('UTF-8') if line.startswith('#'): continue code, name = line.split(None, 1) data[code] = name.strip() self.data = data finally: zone_tab.close() country_names = _CountryNameDict() # Time-zone info based solely on fixed offsets class _FixedOffset(datetime.tzinfo): zone = None # to match the standard pytz API def __init__(self, minutes): if abs(minutes) >= 1440: raise ValueError("absolute offset is too large", minutes) self._minutes = minutes self._offset = datetime.timedelta(minutes=minutes) def utcoffset(self, dt): return self._offset def __reduce__(self): return FixedOffset, (self._minutes, ) def dst(self, dt): return ZERO def tzname(self, dt): return None def __repr__(self): return 'pytz.FixedOffset(%d)' % self._minutes def localize(self, dt, is_dst=False): '''Convert naive time to local time''' if dt.tzinfo is not None: raise ValueError('Not naive datetime (tzinfo is already set)') return dt.replace(tzinfo=self) def normalize(self, dt, is_dst=False): '''Correct the timezone information on the given datetime''' if dt.tzinfo is self: return dt if dt.tzinfo is None: raise ValueError('Naive time - no tzinfo set') return dt.astimezone(self) def FixedOffset(offset, _tzinfos={}): """return a fixed-offset timezone based off a number of minutes. >>> one = FixedOffset(-330) >>> one pytz.FixedOffset(-330) >>> str(one.utcoffset(datetime.datetime.now())) '-1 day, 18:30:00' >>> str(one.dst(datetime.datetime.now())) '0:00:00' >>> two = FixedOffset(1380) >>> two pytz.FixedOffset(1380) >>> str(two.utcoffset(datetime.datetime.now())) '23:00:00' >>> str(two.dst(datetime.datetime.now())) '0:00:00' The datetime.timedelta must be between the range of -1 and 1 day, non-inclusive. >>> FixedOffset(1440) Traceback (most recent call last): ... ValueError: ('absolute offset is too large', 1440) >>> FixedOffset(-1440) Traceback (most recent call last): ... ValueError: ('absolute offset is too large', -1440) An offset of 0 is special-cased to return UTC. >>> FixedOffset(0) is UTC True There should always be only one instance of a FixedOffset per timedelta. This should be true for multiple creation calls. >>> FixedOffset(-330) is one True >>> FixedOffset(1380) is two True It should also be true for pickling. >>> import pickle >>> pickle.loads(pickle.dumps(one)) is one True >>> pickle.loads(pickle.dumps(two)) is two True """ if offset == 0: return UTC info = _tzinfos.get(offset) if info is None: # We haven't seen this one before. we need to save it. # Use setdefault to avoid a race condition and make sure we have # only one info = _tzinfos.setdefault(offset, _FixedOffset(offset)) return info FixedOffset.__safe_for_unpickling__ = True def _test(): import doctest sys.path.insert(0, os.pardir) import pytz return doctest.testmod(pytz) if __name__ == '__main__': _test() _all_timezones_unchecked = \ ['Africa/Abidjan', 'Africa/Accra', 'Africa/Addis_Ababa', 'Africa/Algiers', 'Africa/Asmara', 'Africa/Asmera', 'Africa/Bamako', 'Africa/Bangui', 'Africa/Banjul', 'Africa/Bissau', 'Africa/Blantyre', 'Africa/Brazzaville', 'Africa/Bujumbura', 'Africa/Cairo', 'Africa/Casablanca', 'Africa/Ceuta', 'Africa/Conakry', 'Africa/Dakar', 'Africa/Dar_es_Salaam', 'Africa/Djibouti', 'Africa/Douala', 'Africa/El_Aaiun', 'Africa/Freetown', 'Africa/Gaborone', 'Africa/Harare', 'Africa/Johannesburg', 'Africa/Juba', 'Africa/Kampala', 'Africa/Khartoum', 'Africa/Kigali', 'Africa/Kinshasa', 'Africa/Lagos', 'Africa/Libreville', 'Africa/Lome', 'Africa/Luanda', 'Africa/Lubumbashi', 'Africa/Lusaka', 'Africa/Malabo', 'Africa/Maputo', 'Africa/Maseru', 'Africa/Mbabane', 'Africa/Mogadishu', 'Africa/Monrovia', 'Africa/Nairobi', 'Africa/Ndjamena', 'Africa/Niamey', 'Africa/Nouakchott', 'Africa/Ouagadougou', 'Africa/Porto-Novo', 'Africa/Sao_Tome', 'Africa/Timbuktu', 'Africa/Tripoli', 'Africa/Tunis', 'Africa/Windhoek', 'America/Adak', 'America/Anchorage', 'America/Anguilla', 'America/Antigua', 'America/Araguaina', 'America/Argentina/Buenos_Aires', 'America/Argentina/Catamarca', 'America/Argentina/ComodRivadavia', 'America/Argentina/Cordoba', 'America/Argentina/Jujuy', 'America/Argentina/La_Rioja', 'America/Argentina/Mendoza', 'America/Argentina/Rio_Gallegos', 'America/Argentina/Salta', 'America/Argentina/San_Juan', 'America/Argentina/San_Luis', 'America/Argentina/Tucuman', 'America/Argentina/Ushuaia', 'America/Aruba', 'America/Asuncion', 'America/Atikokan', 'America/Atka', 'America/Bahia', 'America/Bahia_Banderas', 'America/Barbados', 'America/Belem', 'America/Belize', 'America/Blanc-Sablon', 'America/Boa_Vista', 'America/Bogota', 'America/Boise', 'America/Buenos_Aires', 'America/Cambridge_Bay', 'America/Campo_Grande', 'America/Cancun', 'America/Caracas', 'America/Catamarca', 'America/Cayenne', 'America/Cayman', 'America/Chicago', 'America/Chihuahua', 'America/Ciudad_Juarez', 'America/Coral_Harbour', 'America/Cordoba', 'America/Costa_Rica', 'America/Creston', 'America/Cuiaba', 'America/Curacao', 'America/Danmarkshavn', 'America/Dawson', 'America/Dawson_Creek', 'America/Denver', 'America/Detroit', 'America/Dominica', 'America/Edmonton', 'America/Eirunepe', 'America/El_Salvador', 'America/Ensenada', 'America/Fort_Nelson', 'America/Fort_Wayne', 'America/Fortaleza', 'America/Glace_Bay', 'America/Godthab', 'America/Goose_Bay', 'America/Grand_Turk', 'America/Grenada', 'America/Guadeloupe', 'America/Guatemala', 'America/Guayaquil', 'America/Guyana', 'America/Halifax', 'America/Havana', 'America/Hermosillo', 'America/Indiana/Indianapolis', 'America/Indiana/Knox', 'America/Indiana/Marengo', 'America/Indiana/Petersburg', 'America/Indiana/Tell_City', 'America/Indiana/Vevay', 'America/Indiana/Vincennes', 'America/Indiana/Winamac', 'America/Indianapolis', 'America/Inuvik', 'America/Iqaluit', 'America/Jamaica', 'America/Jujuy', 'America/Juneau', 'America/Kentucky/Louisville', 'America/Kentucky/Monticello', 'America/Knox_IN', 'America/Kralendijk', 'America/La_Paz', 'America/Lima', 'America/Los_Angeles', 'America/Louisville', 'America/Lower_Princes', 'America/Maceio', 'America/Managua', 'America/Manaus', 'America/Marigot', 'America/Martinique', 'America/Matamoros', 'America/Mazatlan', 'America/Mendoza', 'America/Menominee', 'America/Merida', 'America/Metlakatla', 'America/Mexico_City', 'America/Miquelon', 'America/Moncton', 'America/Monterrey', 'America/Montevideo', 'America/Montreal', 'America/Montserrat', 'America/Nassau', 'America/New_York', 'America/Nipigon', 'America/Nome', 'America/Noronha', 'America/North_Dakota/Beulah', 'America/North_Dakota/Center', 'America/North_Dakota/New_Salem', 'America/Nuuk', 'America/Ojinaga', 'America/Panama', 'America/Pangnirtung', 'America/Paramaribo', 'America/Phoenix', 'America/Port-au-Prince', 'America/Port_of_Spain', 'America/Porto_Acre', 'America/Porto_Velho', 'America/Puerto_Rico', 'America/Punta_Arenas', 'America/Rainy_River', 'America/Rankin_Inlet', 'America/Recife', 'America/Regina', 'America/Resolute', 'America/Rio_Branco', 'America/Rosario', 'America/Santa_Isabel', 'America/Santarem', 'America/Santiago', 'America/Santo_Domingo', 'America/Sao_Paulo', 'America/Scoresbysund', 'America/Shiprock', 'America/Sitka', 'America/St_Barthelemy', 'America/St_Johns', 'America/St_Kitts', 'America/St_Lucia', 'America/St_Thomas', 'America/St_Vincent', 'America/Swift_Current', 'America/Tegucigalpa', 'America/Thule', 'America/Thunder_Bay', 'America/Tijuana', 'America/Toronto', 'America/Tortola', 'America/Vancouver', 'America/Virgin', 'America/Whitehorse', 'America/Winnipeg', 'America/Yakutat', 'America/Yellowknife', 'Antarctica/Casey', 'Antarctica/Davis', 'Antarctica/DumontDUrville', 'Antarctica/Macquarie', 'Antarctica/Mawson', 'Antarctica/McMurdo', 'Antarctica/Palmer', 'Antarctica/Rothera', 'Antarctica/South_Pole', 'Antarctica/Syowa', 'Antarctica/Troll', 'Antarctica/Vostok', 'Arctic/Longyearbyen', 'Asia/Aden', 'Asia/Almaty', 'Asia/Amman', 'Asia/Anadyr', 'Asia/Aqtau', 'Asia/Aqtobe', 'Asia/Ashgabat', 'Asia/Ashkhabad', 'Asia/Atyrau', 'Asia/Baghdad', 'Asia/Bahrain', 'Asia/Baku', 'Asia/Bangkok', 'Asia/Barnaul', 'Asia/Beirut', 'Asia/Bishkek', 'Asia/Brunei', 'Asia/Calcutta', 'Asia/Chita', 'Asia/Choibalsan', 'Asia/Chongqing', 'Asia/Chungking', 'Asia/Colombo', 'Asia/Dacca', 'Asia/Damascus', 'Asia/Dhaka', 'Asia/Dili', 'Asia/Dubai', 'Asia/Dushanbe', 'Asia/Famagusta', 'Asia/Gaza', 'Asia/Harbin', 'Asia/Hebron', 'Asia/Ho_Chi_Minh', 'Asia/Hong_Kong', 'Asia/Hovd', 'Asia/Irkutsk', 'Asia/Istanbul', 'Asia/Jakarta', 'Asia/Jayapura', 'Asia/Jerusalem', 'Asia/Kabul', 'Asia/Kamchatka', 'Asia/Karachi', 'Asia/Kashgar', 'Asia/Kathmandu', 'Asia/Katmandu', 'Asia/Khandyga', 'Asia/Kolkata', 'Asia/Krasnoyarsk', 'Asia/Kuala_Lumpur', 'Asia/Kuching', 'Asia/Kuwait', 'Asia/Macao', 'Asia/Macau', 'Asia/Magadan', 'Asia/Makassar', 'Asia/Manila', 'Asia/Muscat', 'Asia/Nicosia', 'Asia/Novokuznetsk', 'Asia/Novosibirsk', 'Asia/Omsk', 'Asia/Oral', 'Asia/Phnom_Penh', 'Asia/Pontianak', 'Asia/Pyongyang', 'Asia/Qatar', 'Asia/Qostanay', 'Asia/Qyzylorda', 'Asia/Rangoon', 'Asia/Riyadh', 'Asia/Saigon', 'Asia/Sakhalin', 'Asia/Samarkand', 'Asia/Seoul', 'Asia/Shanghai', 'Asia/Singapore', 'Asia/Srednekolymsk', 'Asia/Taipei', 'Asia/Tashkent', 'Asia/Tbilisi', 'Asia/Tehran', 'Asia/Tel_Aviv', 'Asia/Thimbu', 'Asia/Thimphu', 'Asia/Tokyo', 'Asia/Tomsk', 'Asia/Ujung_Pandang', 'Asia/Ulaanbaatar', 'Asia/Ulan_Bator', 'Asia/Urumqi', 'Asia/Ust-Nera', 'Asia/Vientiane', 'Asia/Vladivostok', 'Asia/Yakutsk', 'Asia/Yangon', 'Asia/Yekaterinburg', 'Asia/Yerevan', 'Atlantic/Azores', 'Atlantic/Bermuda', 'Atlantic/Canary', 'Atlantic/Cape_Verde', 'Atlantic/Faeroe', 'Atlantic/Faroe', 'Atlantic/Jan_Mayen', 'Atlantic/Madeira', 'Atlantic/Reykjavik', 'Atlantic/South_Georgia', 'Atlantic/St_Helena', 'Atlantic/Stanley', 'Australia/ACT', 'Australia/Adelaide', 'Australia/Brisbane', 'Australia/Broken_Hill', 'Australia/Canberra', 'Australia/Currie', 'Australia/Darwin', 'Australia/Eucla', 'Australia/Hobart', 'Australia/LHI', 'Australia/Lindeman', 'Australia/Lord_Howe', 'Australia/Melbourne', 'Australia/NSW', 'Australia/North', 'Australia/Perth', 'Australia/Queensland', 'Australia/South', 'Australia/Sydney', 'Australia/Tasmania', 'Australia/Victoria', 'Australia/West', 'Australia/Yancowinna', 'Brazil/Acre', 'Brazil/DeNoronha', 'Brazil/East', 'Brazil/West', 'CET', 'CST6CDT', 'Canada/Atlantic', 'Canada/Central', 'Canada/Eastern', 'Canada/Mountain', 'Canada/Newfoundland', 'Canada/Pacific', 'Canada/Saskatchewan', 'Canada/Yukon', 'Chile/Continental', 'Chile/EasterIsland', 'Cuba', 'EET', 'EST', 'EST5EDT', 'Egypt', 'Eire', 'Etc/GMT', 'Etc/GMT+0', 'Etc/GMT+1', 'Etc/GMT+10', 'Etc/GMT+11', 'Etc/GMT+12', 'Etc/GMT+2', 'Etc/GMT+3', 'Etc/GMT+4', 'Etc/GMT+5', 'Etc/GMT+6', 'Etc/GMT+7', 'Etc/GMT+8', 'Etc/GMT+9', 'Etc/GMT-0', 'Etc/GMT-1', 'Etc/GMT-10', 'Etc/GMT-11', 'Etc/GMT-12', 'Etc/GMT-13', 'Etc/GMT-14', 'Etc/GMT-2', 'Etc/GMT-3', 'Etc/GMT-4', 'Etc/GMT-5', 'Etc/GMT-6', 'Etc/GMT-7', 'Etc/GMT-8', 'Etc/GMT-9', 'Etc/GMT0', 'Etc/Greenwich', 'Etc/UCT', 'Etc/UTC', 'Etc/Universal', 'Etc/Zulu', 'Europe/Amsterdam', 'Europe/Andorra', 'Europe/Astrakhan', 'Europe/Athens', 'Europe/Belfast', 'Europe/Belgrade', 'Europe/Berlin', 'Europe/Bratislava', 'Europe/Brussels', 'Europe/Bucharest', 'Europe/Budapest', 'Europe/Busingen', 'Europe/Chisinau', 'Europe/Copenhagen', 'Europe/Dublin', 'Europe/Gibraltar', 'Europe/Guernsey', 'Europe/Helsinki', 'Europe/Isle_of_Man', 'Europe/Istanbul', 'Europe/Jersey', 'Europe/Kaliningrad', 'Europe/Kiev', 'Europe/Kirov', 'Europe/Kyiv', 'Europe/Lisbon', 'Europe/Ljubljana', 'Europe/London', 'Europe/Luxembourg', 'Europe/Madrid', 'Europe/Malta', 'Europe/Mariehamn', 'Europe/Minsk', 'Europe/Monaco', 'Europe/Moscow', 'Europe/Nicosia', 'Europe/Oslo', 'Europe/Paris', 'Europe/Podgorica', 'Europe/Prague', 'Europe/Riga', 'Europe/Rome', 'Europe/Samara', 'Europe/San_Marino', 'Europe/Sarajevo', 'Europe/Saratov', 'Europe/Simferopol', 'Europe/Skopje', 'Europe/Sofia', 'Europe/Stockholm', 'Europe/Tallinn', 'Europe/Tirane', 'Europe/Tiraspol', 'Europe/Ulyanovsk', 'Europe/Uzhgorod', 'Europe/Vaduz', 'Europe/Vatican', 'Europe/Vienna', 'Europe/Vilnius', 'Europe/Volgograd', 'Europe/Warsaw', 'Europe/Zagreb', 'Europe/Zaporozhye', 'Europe/Zurich', 'GB', 'GB-Eire', 'GMT', 'GMT+0', 'GMT-0', 'GMT0', 'Greenwich', 'HST', 'Hongkong', 'Iceland', 'Indian/Antananarivo', 'Indian/Chagos', 'Indian/Christmas', 'Indian/Cocos', 'Indian/Comoro', 'Indian/Kerguelen', 'Indian/Mahe', 'Indian/Maldives', 'Indian/Mauritius', 'Indian/Mayotte', 'Indian/Reunion', 'Iran', 'Israel', 'Jamaica', 'Japan', 'Kwajalein', 'Libya', 'MET', 'MST', 'MST7MDT', 'Mexico/BajaNorte', 'Mexico/BajaSur', 'Mexico/General', 'NZ', 'NZ-CHAT', 'Navajo', 'PRC', 'PST8PDT', 'Pacific/Apia', 'Pacific/Auckland', 'Pacific/Bougainville', 'Pacific/Chatham', 'Pacific/Chuuk', 'Pacific/Easter', 'Pacific/Efate', 'Pacific/Enderbury', 'Pacific/Fakaofo', 'Pacific/Fiji', 'Pacific/Funafuti', 'Pacific/Galapagos', 'Pacific/Gambier', 'Pacific/Guadalcanal', 'Pacific/Guam', 'Pacific/Honolulu', 'Pacific/Johnston', 'Pacific/Kanton', 'Pacific/Kiritimati', 'Pacific/Kosrae', 'Pacific/Kwajalein', 'Pacific/Majuro', 'Pacific/Marquesas', 'Pacific/Midway', 'Pacific/Nauru', 'Pacific/Niue', 'Pacific/Norfolk', 'Pacific/Noumea', 'Pacific/Pago_Pago', 'Pacific/Palau', 'Pacific/Pitcairn', 'Pacific/Pohnpei', 'Pacific/Ponape', 'Pacific/Port_Moresby', 'Pacific/Rarotonga', 'Pacific/Saipan', 'Pacific/Samoa', 'Pacific/Tahiti', 'Pacific/Tarawa', 'Pacific/Tongatapu', 'Pacific/Truk', 'Pacific/Wake', 'Pacific/Wallis', 'Pacific/Yap', 'Poland', 'Portugal', 'ROC', 'ROK', 'Singapore', 'Turkey', 'UCT', 'US/Alaska', 'US/Aleutian', 'US/Arizona', 'US/Central', 'US/East-Indiana', 'US/Eastern', 'US/Hawaii', 'US/Indiana-Starke', 'US/Michigan', 'US/Mountain', 'US/Pacific', 'US/Samoa', 'UTC', 'Universal', 'W-SU', 'WET', 'Zulu'] all_timezones = LazyList( tz for tz in _all_timezones_unchecked if resource_exists(tz)) all_timezones_set = LazySet(all_timezones) common_timezones = \ ['Africa/Abidjan', 'Africa/Accra', 'Africa/Addis_Ababa', 'Africa/Algiers', 'Africa/Asmara', 'Africa/Bamako', 'Africa/Bangui', 'Africa/Banjul', 'Africa/Bissau', 'Africa/Blantyre', 'Africa/Brazzaville', 'Africa/Bujumbura', 'Africa/Cairo', 'Africa/Casablanca', 'Africa/Ceuta', 'Africa/Conakry', 'Africa/Dakar', 'Africa/Dar_es_Salaam', 'Africa/Djibouti', 'Africa/Douala', 'Africa/El_Aaiun', 'Africa/Freetown', 'Africa/Gaborone', 'Africa/Harare', 'Africa/Johannesburg', 'Africa/Juba', 'Africa/Kampala', 'Africa/Khartoum', 'Africa/Kigali', 'Africa/Kinshasa', 'Africa/Lagos', 'Africa/Libreville', 'Africa/Lome', 'Africa/Luanda', 'Africa/Lubumbashi', 'Africa/Lusaka', 'Africa/Malabo', 'Africa/Maputo', 'Africa/Maseru', 'Africa/Mbabane', 'Africa/Mogadishu', 'Africa/Monrovia', 'Africa/Nairobi', 'Africa/Ndjamena', 'Africa/Niamey', 'Africa/Nouakchott', 'Africa/Ouagadougou', 'Africa/Porto-Novo', 'Africa/Sao_Tome', 'Africa/Tripoli', 'Africa/Tunis', 'Africa/Windhoek', 'America/Adak', 'America/Anchorage', 'America/Anguilla', 'America/Antigua', 'America/Araguaina', 'America/Argentina/Buenos_Aires', 'America/Argentina/Catamarca', 'America/Argentina/Cordoba', 'America/Argentina/Jujuy', 'America/Argentina/La_Rioja', 'America/Argentina/Mendoza', 'America/Argentina/Rio_Gallegos', 'America/Argentina/Salta', 'America/Argentina/San_Juan', 'America/Argentina/San_Luis', 'America/Argentina/Tucuman', 'America/Argentina/Ushuaia', 'America/Aruba', 'America/Asuncion', 'America/Atikokan', 'America/Bahia', 'America/Bahia_Banderas', 'America/Barbados', 'America/Belem', 'America/Belize', 'America/Blanc-Sablon', 'America/Boa_Vista', 'America/Bogota', 'America/Boise', 'America/Cambridge_Bay', 'America/Campo_Grande', 'America/Cancun', 'America/Caracas', 'America/Cayenne', 'America/Cayman', 'America/Chicago', 'America/Chihuahua', 'America/Ciudad_Juarez', 'America/Costa_Rica', 'America/Creston', 'America/Cuiaba', 'America/Curacao', 'America/Danmarkshavn', 'America/Dawson', 'America/Dawson_Creek', 'America/Denver', 'America/Detroit', 'America/Dominica', 'America/Edmonton', 'America/Eirunepe', 'America/El_Salvador', 'America/Fort_Nelson', 'America/Fortaleza', 'America/Glace_Bay', 'America/Goose_Bay', 'America/Grand_Turk', 'America/Grenada', 'America/Guadeloupe', 'America/Guatemala', 'America/Guayaquil', 'America/Guyana', 'America/Halifax', 'America/Havana', 'America/Hermosillo', 'America/Indiana/Indianapolis', 'America/Indiana/Knox', 'America/Indiana/Marengo', 'America/Indiana/Petersburg', 'America/Indiana/Tell_City', 'America/Indiana/Vevay', 'America/Indiana/Vincennes', 'America/Indiana/Winamac', 'America/Inuvik', 'America/Iqaluit', 'America/Jamaica', 'America/Juneau', 'America/Kentucky/Louisville', 'America/Kentucky/Monticello', 'America/Kralendijk', 'America/La_Paz', 'America/Lima', 'America/Los_Angeles', 'America/Lower_Princes', 'America/Maceio', 'America/Managua', 'America/Manaus', 'America/Marigot', 'America/Martinique', 'America/Matamoros', 'America/Mazatlan', 'America/Menominee', 'America/Merida', 'America/Metlakatla', 'America/Mexico_City', 'America/Miquelon', 'America/Moncton', 'America/Monterrey', 'America/Montevideo', 'America/Montserrat', 'America/Nassau', 'America/New_York', 'America/Nome', 'America/Noronha', 'America/North_Dakota/Beulah', 'America/North_Dakota/Center', 'America/North_Dakota/New_Salem', 'America/Nuuk', 'America/Ojinaga', 'America/Panama', 'America/Paramaribo', 'America/Phoenix', 'America/Port-au-Prince', 'America/Port_of_Spain', 'America/Porto_Velho', 'America/Puerto_Rico', 'America/Punta_Arenas', 'America/Rankin_Inlet', 'America/Recife', 'America/Regina', 'America/Resolute', 'America/Rio_Branco', 'America/Santarem', 'America/Santiago', 'America/Santo_Domingo', 'America/Sao_Paulo', 'America/Scoresbysund', 'America/Sitka', 'America/St_Barthelemy', 'America/St_Johns', 'America/St_Kitts', 'America/St_Lucia', 'America/St_Thomas', 'America/St_Vincent', 'America/Swift_Current', 'America/Tegucigalpa', 'America/Thule', 'America/Tijuana', 'America/Toronto', 'America/Tortola', 'America/Vancouver', 'America/Whitehorse', 'America/Winnipeg', 'America/Yakutat', 'America/Yellowknife', 'Antarctica/Casey', 'Antarctica/Davis', 'Antarctica/DumontDUrville', 'Antarctica/Macquarie', 'Antarctica/Mawson', 'Antarctica/McMurdo', 'Antarctica/Palmer', 'Antarctica/Rothera', 'Antarctica/Syowa', 'Antarctica/Troll', 'Antarctica/Vostok', 'Arctic/Longyearbyen', 'Asia/Aden', 'Asia/Almaty', 'Asia/Amman', 'Asia/Anadyr', 'Asia/Aqtau', 'Asia/Aqtobe', 'Asia/Ashgabat', 'Asia/Atyrau', 'Asia/Baghdad', 'Asia/Bahrain', 'Asia/Baku', 'Asia/Bangkok', 'Asia/Barnaul', 'Asia/Beirut', 'Asia/Bishkek', 'Asia/Brunei', 'Asia/Chita', 'Asia/Choibalsan', 'Asia/Colombo', 'Asia/Damascus', 'Asia/Dhaka', 'Asia/Dili', 'Asia/Dubai', 'Asia/Dushanbe', 'Asia/Famagusta', 'Asia/Gaza', 'Asia/Hebron', 'Asia/Ho_Chi_Minh', 'Asia/Hong_Kong', 'Asia/Hovd', 'Asia/Irkutsk', 'Asia/Jakarta', 'Asia/Jayapura', 'Asia/Jerusalem', 'Asia/Kabul', 'Asia/Kamchatka', 'Asia/Karachi', 'Asia/Kathmandu', 'Asia/Khandyga', 'Asia/Kolkata', 'Asia/Krasnoyarsk', 'Asia/Kuala_Lumpur', 'Asia/Kuching', 'Asia/Kuwait', 'Asia/Macau', 'Asia/Magadan', 'Asia/Makassar', 'Asia/Manila', 'Asia/Muscat', 'Asia/Nicosia', 'Asia/Novokuznetsk', 'Asia/Novosibirsk', 'Asia/Omsk', 'Asia/Oral', 'Asia/Phnom_Penh', 'Asia/Pontianak', 'Asia/Pyongyang', 'Asia/Qatar', 'Asia/Qostanay', 'Asia/Qyzylorda', 'Asia/Riyadh', 'Asia/Sakhalin', 'Asia/Samarkand', 'Asia/Seoul', 'Asia/Shanghai', 'Asia/Singapore', 'Asia/Srednekolymsk', 'Asia/Taipei', 'Asia/Tashkent', 'Asia/Tbilisi', 'Asia/Tehran', 'Asia/Thimphu', 'Asia/Tokyo', 'Asia/Tomsk', 'Asia/Ulaanbaatar', 'Asia/Urumqi', 'Asia/Ust-Nera', 'Asia/Vientiane', 'Asia/Vladivostok', 'Asia/Yakutsk', 'Asia/Yangon', 'Asia/Yekaterinburg', 'Asia/Yerevan', 'Atlantic/Azores', 'Atlantic/Bermuda', 'Atlantic/Canary', 'Atlantic/Cape_Verde', 'Atlantic/Faroe', 'Atlantic/Madeira', 'Atlantic/Reykjavik', 'Atlantic/South_Georgia', 'Atlantic/St_Helena', 'Atlantic/Stanley', 'Australia/Adelaide', 'Australia/Brisbane', 'Australia/Broken_Hill', 'Australia/Darwin', 'Australia/Eucla', 'Australia/Hobart', 'Australia/Lindeman', 'Australia/Lord_Howe', 'Australia/Melbourne', 'Australia/Perth', 'Australia/Sydney', 'Canada/Atlantic', 'Canada/Central', 'Canada/Eastern', 'Canada/Mountain', 'Canada/Newfoundland', 'Canada/Pacific', 'Europe/Amsterdam', 'Europe/Andorra', 'Europe/Astrakhan', 'Europe/Athens', 'Europe/Belgrade', 'Europe/Berlin', 'Europe/Bratislava', 'Europe/Brussels', 'Europe/Bucharest', 'Europe/Budapest', 'Europe/Busingen', 'Europe/Chisinau', 'Europe/Copenhagen', 'Europe/Dublin', 'Europe/Gibraltar', 'Europe/Guernsey', 'Europe/Helsinki', 'Europe/Isle_of_Man', 'Europe/Istanbul', 'Europe/Jersey', 'Europe/Kaliningrad', 'Europe/Kirov', 'Europe/Kyiv', 'Europe/Lisbon', 'Europe/Ljubljana', 'Europe/London', 'Europe/Luxembourg', 'Europe/Madrid', 'Europe/Malta', 'Europe/Mariehamn', 'Europe/Minsk', 'Europe/Monaco', 'Europe/Moscow', 'Europe/Oslo', 'Europe/Paris', 'Europe/Podgorica', 'Europe/Prague', 'Europe/Riga', 'Europe/Rome', 'Europe/Samara', 'Europe/San_Marino', 'Europe/Sarajevo', 'Europe/Saratov', 'Europe/Simferopol', 'Europe/Skopje', 'Europe/Sofia', 'Europe/Stockholm', 'Europe/Tallinn', 'Europe/Tirane', 'Europe/Ulyanovsk', 'Europe/Vaduz', 'Europe/Vatican', 'Europe/Vienna', 'Europe/Vilnius', 'Europe/Volgograd', 'Europe/Warsaw', 'Europe/Zagreb', 'Europe/Zurich', 'GMT', 'Indian/Antananarivo', 'Indian/Chagos', 'Indian/Christmas', 'Indian/Cocos', 'Indian/Comoro', 'Indian/Kerguelen', 'Indian/Mahe', 'Indian/Maldives', 'Indian/Mauritius', 'Indian/Mayotte', 'Indian/Reunion', 'Pacific/Apia', 'Pacific/Auckland', 'Pacific/Bougainville', 'Pacific/Chatham', 'Pacific/Chuuk', 'Pacific/Easter', 'Pacific/Efate', 'Pacific/Fakaofo', 'Pacific/Fiji', 'Pacific/Funafuti', 'Pacific/Galapagos', 'Pacific/Gambier', 'Pacific/Guadalcanal', 'Pacific/Guam', 'Pacific/Honolulu', 'Pacific/Kanton', 'Pacific/Kiritimati', 'Pacific/Kosrae', 'Pacific/Kwajalein', 'Pacific/Majuro', 'Pacific/Marquesas', 'Pacific/Midway', 'Pacific/Nauru', 'Pacific/Niue', 'Pacific/Norfolk', 'Pacific/Noumea', 'Pacific/Pago_Pago', 'Pacific/Palau', 'Pacific/Pitcairn', 'Pacific/Pohnpei', 'Pacific/Port_Moresby', 'Pacific/Rarotonga', 'Pacific/Saipan', 'Pacific/Tahiti', 'Pacific/Tarawa', 'Pacific/Tongatapu', 'Pacific/Wake', 'Pacific/Wallis', 'US/Alaska', 'US/Arizona', 'US/Central', 'US/Eastern', 'US/Hawaii', 'US/Mountain', 'US/Pacific', 'UTC'] common_timezones = LazyList( tz for tz in common_timezones if tz in all_timezones) common_timezones_set = LazySet(common_timezones)
35,127
Python
21.561336
108
0.649073
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pytz/tzfile.py
''' $Id: tzfile.py,v 1.8 2004/06/03 00:15:24 zenzen Exp $ ''' from datetime import datetime from struct import unpack, calcsize from pytz.tzinfo import StaticTzInfo, DstTzInfo, memorized_ttinfo from pytz.tzinfo import memorized_datetime, memorized_timedelta def _byte_string(s): """Cast a string or byte string to an ASCII byte string.""" return s.encode('ASCII') _NULL = _byte_string('\0') def _std_string(s): """Cast a string or byte string to an ASCII string.""" return str(s.decode('ASCII')) def build_tzinfo(zone, fp): head_fmt = '>4s c 15x 6l' head_size = calcsize(head_fmt) (magic, format, ttisgmtcnt, ttisstdcnt, leapcnt, timecnt, typecnt, charcnt) = unpack(head_fmt, fp.read(head_size)) # Make sure it is a tzfile(5) file assert magic == _byte_string('TZif'), 'Got magic %s' % repr(magic) # Read out the transition times, localtime indices and ttinfo structures. data_fmt = '>%(timecnt)dl %(timecnt)dB %(ttinfo)s %(charcnt)ds' % dict( timecnt=timecnt, ttinfo='lBB' * typecnt, charcnt=charcnt) data_size = calcsize(data_fmt) data = unpack(data_fmt, fp.read(data_size)) # make sure we unpacked the right number of values assert len(data) == 2 * timecnt + 3 * typecnt + 1 transitions = [memorized_datetime(trans) for trans in data[:timecnt]] lindexes = list(data[timecnt:2 * timecnt]) ttinfo_raw = data[2 * timecnt:-1] tznames_raw = data[-1] del data # Process ttinfo into separate structs ttinfo = [] tznames = {} i = 0 while i < len(ttinfo_raw): # have we looked up this timezone name yet? tzname_offset = ttinfo_raw[i + 2] if tzname_offset not in tznames: nul = tznames_raw.find(_NULL, tzname_offset) if nul < 0: nul = len(tznames_raw) tznames[tzname_offset] = _std_string( tznames_raw[tzname_offset:nul]) ttinfo.append((ttinfo_raw[i], bool(ttinfo_raw[i + 1]), tznames[tzname_offset])) i += 3 # Now build the timezone object if len(ttinfo) == 1 or len(transitions) == 0: ttinfo[0][0], ttinfo[0][2] cls = type(zone, (StaticTzInfo,), dict( zone=zone, _utcoffset=memorized_timedelta(ttinfo[0][0]), _tzname=ttinfo[0][2])) else: # Early dates use the first standard time ttinfo i = 0 while ttinfo[i][1]: i += 1 if ttinfo[i] == ttinfo[lindexes[0]]: transitions[0] = datetime.min else: transitions.insert(0, datetime.min) lindexes.insert(0, i) # calculate transition info transition_info = [] for i in range(len(transitions)): inf = ttinfo[lindexes[i]] utcoffset = inf[0] if not inf[1]: dst = 0 else: for j in range(i - 1, -1, -1): prev_inf = ttinfo[lindexes[j]] if not prev_inf[1]: break dst = inf[0] - prev_inf[0] # dst offset # Bad dst? Look further. DST > 24 hours happens when # a timzone has moved across the international dateline. if dst <= 0 or dst > 3600 * 3: for j in range(i + 1, len(transitions)): stdinf = ttinfo[lindexes[j]] if not stdinf[1]: dst = inf[0] - stdinf[0] if dst > 0: break # Found a useful std time. tzname = inf[2] # Round utcoffset and dst to the nearest minute or the # datetime library will complain. Conversions to these timezones # might be up to plus or minus 30 seconds out, but it is # the best we can do. utcoffset = int((utcoffset + 30) // 60) * 60 dst = int((dst + 30) // 60) * 60 transition_info.append(memorized_ttinfo(utcoffset, dst, tzname)) cls = type(zone, (DstTzInfo,), dict( zone=zone, _utc_transition_times=transitions, _transition_info=transition_info)) return cls() if __name__ == '__main__': import os.path from pprint import pprint base = os.path.join(os.path.dirname(__file__), 'zoneinfo') tz = build_tzinfo('Australia/Melbourne', open(os.path.join(base, 'Australia', 'Melbourne'), 'rb')) tz = build_tzinfo('US/Eastern', open(os.path.join(base, 'US', 'Eastern'), 'rb')) pprint(tz._utc_transition_times)
4,723
Python
34.253731
79
0.545628
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pytz/lazy.py
from threading import RLock try: from collections.abc import Mapping as DictMixin except ImportError: # Python < 3.3 try: from UserDict import DictMixin # Python 2 except ImportError: # Python 3.0-3.3 from collections import Mapping as DictMixin # With lazy loading, we might end up with multiple threads triggering # it at the same time. We need a lock. _fill_lock = RLock() class LazyDict(DictMixin): """Dictionary populated on first use.""" data = None def __getitem__(self, key): if self.data is None: _fill_lock.acquire() try: if self.data is None: self._fill() finally: _fill_lock.release() return self.data[key.upper()] def __contains__(self, key): if self.data is None: _fill_lock.acquire() try: if self.data is None: self._fill() finally: _fill_lock.release() return key in self.data def __iter__(self): if self.data is None: _fill_lock.acquire() try: if self.data is None: self._fill() finally: _fill_lock.release() return iter(self.data) def __len__(self): if self.data is None: _fill_lock.acquire() try: if self.data is None: self._fill() finally: _fill_lock.release() return len(self.data) def keys(self): if self.data is None: _fill_lock.acquire() try: if self.data is None: self._fill() finally: _fill_lock.release() return self.data.keys() class LazyList(list): """List populated on first use.""" _props = [ '__str__', '__repr__', '__unicode__', '__hash__', '__sizeof__', '__cmp__', '__lt__', '__le__', '__eq__', '__ne__', '__gt__', '__ge__', 'append', 'count', 'index', 'extend', 'insert', 'pop', 'remove', 'reverse', 'sort', '__add__', '__radd__', '__iadd__', '__mul__', '__rmul__', '__imul__', '__contains__', '__len__', '__nonzero__', '__getitem__', '__setitem__', '__delitem__', '__iter__', '__reversed__', '__getslice__', '__setslice__', '__delslice__'] def __new__(cls, fill_iter=None): if fill_iter is None: return list() # We need a new class as we will be dynamically messing with its # methods. class LazyList(list): pass fill_iter = [fill_iter] def lazy(name): def _lazy(self, *args, **kw): _fill_lock.acquire() try: if len(fill_iter) > 0: list.extend(self, fill_iter.pop()) for method_name in cls._props: delattr(LazyList, method_name) finally: _fill_lock.release() return getattr(list, name)(self, *args, **kw) return _lazy for name in cls._props: setattr(LazyList, name, lazy(name)) new_list = LazyList() return new_list # Not all versions of Python declare the same magic methods. # Filter out properties that don't exist in this version of Python # from the list. LazyList._props = [prop for prop in LazyList._props if hasattr(list, prop)] class LazySet(set): """Set populated on first use.""" _props = ( '__str__', '__repr__', '__unicode__', '__hash__', '__sizeof__', '__cmp__', '__lt__', '__le__', '__eq__', '__ne__', '__gt__', '__ge__', '__contains__', '__len__', '__nonzero__', '__getitem__', '__setitem__', '__delitem__', '__iter__', '__sub__', '__and__', '__xor__', '__or__', '__rsub__', '__rand__', '__rxor__', '__ror__', '__isub__', '__iand__', '__ixor__', '__ior__', 'add', 'clear', 'copy', 'difference', 'difference_update', 'discard', 'intersection', 'intersection_update', 'isdisjoint', 'issubset', 'issuperset', 'pop', 'remove', 'symmetric_difference', 'symmetric_difference_update', 'union', 'update') def __new__(cls, fill_iter=None): if fill_iter is None: return set() class LazySet(set): pass fill_iter = [fill_iter] def lazy(name): def _lazy(self, *args, **kw): _fill_lock.acquire() try: if len(fill_iter) > 0: for i in fill_iter.pop(): set.add(self, i) for method_name in cls._props: delattr(LazySet, method_name) finally: _fill_lock.release() return getattr(set, name)(self, *args, **kw) return _lazy for name in cls._props: setattr(LazySet, name, lazy(name)) new_set = LazySet() return new_set # Not all versions of Python declare the same magic methods. # Filter out properties that don't exist in this version of Python # from the list. LazySet._props = [prop for prop in LazySet._props if hasattr(set, prop)]
5,404
Python
30.242774
75
0.473908
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pytz/reference.py
''' Reference tzinfo implementations from the Python docs. Used for testing against as they are only correct for the years 1987 to 2006. Do not use these for real code. ''' from datetime import tzinfo, timedelta, datetime from pytz import HOUR, ZERO, UTC __all__ = [ 'FixedOffset', 'LocalTimezone', 'USTimeZone', 'Eastern', 'Central', 'Mountain', 'Pacific', 'UTC' ] # A class building tzinfo objects for fixed-offset time zones. # Note that FixedOffset(0, "UTC") is a different way to build a # UTC tzinfo object. class FixedOffset(tzinfo): """Fixed offset in minutes east from UTC.""" def __init__(self, offset, name): self.__offset = timedelta(minutes=offset) self.__name = name def utcoffset(self, dt): return self.__offset def tzname(self, dt): return self.__name def dst(self, dt): return ZERO import time as _time STDOFFSET = timedelta(seconds=-_time.timezone) if _time.daylight: DSTOFFSET = timedelta(seconds=-_time.altzone) else: DSTOFFSET = STDOFFSET DSTDIFF = DSTOFFSET - STDOFFSET # A class capturing the platform's idea of local time. class LocalTimezone(tzinfo): def utcoffset(self, dt): if self._isdst(dt): return DSTOFFSET else: return STDOFFSET def dst(self, dt): if self._isdst(dt): return DSTDIFF else: return ZERO def tzname(self, dt): return _time.tzname[self._isdst(dt)] def _isdst(self, dt): tt = (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.weekday(), 0, -1) stamp = _time.mktime(tt) tt = _time.localtime(stamp) return tt.tm_isdst > 0 Local = LocalTimezone() def first_sunday_on_or_after(dt): days_to_go = 6 - dt.weekday() if days_to_go: dt += timedelta(days_to_go) return dt # In the US, DST starts at 2am (standard time) on the first Sunday in April. DSTSTART = datetime(1, 4, 1, 2) # and ends at 2am (DST time; 1am standard time) on the last Sunday of Oct. # which is the first Sunday on or after Oct 25. DSTEND = datetime(1, 10, 25, 1) # A complete implementation of current DST rules for major US time zones. class USTimeZone(tzinfo): def __init__(self, hours, reprname, stdname, dstname): self.stdoffset = timedelta(hours=hours) self.reprname = reprname self.stdname = stdname self.dstname = dstname def __repr__(self): return self.reprname def tzname(self, dt): if self.dst(dt): return self.dstname else: return self.stdname def utcoffset(self, dt): return self.stdoffset + self.dst(dt) def dst(self, dt): if dt is None or dt.tzinfo is None: # An exception may be sensible here, in one or both cases. # It depends on how you want to treat them. The default # fromutc() implementation (called by the default astimezone() # implementation) passes a datetime with dt.tzinfo is self. return ZERO assert dt.tzinfo is self # Find first Sunday in April & the last in October. start = first_sunday_on_or_after(DSTSTART.replace(year=dt.year)) end = first_sunday_on_or_after(DSTEND.replace(year=dt.year)) # Can't compare naive to aware objects, so strip the timezone from # dt first. if start <= dt.replace(tzinfo=None) < end: return HOUR else: return ZERO Eastern = USTimeZone(-5, "Eastern", "EST", "EDT") Central = USTimeZone(-6, "Central", "CST", "CDT") Mountain = USTimeZone(-7, "Mountain", "MST", "MDT") Pacific = USTimeZone(-8, "Pacific", "PST", "PDT")
3,778
Python
25.801418
76
0.615934
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiodns/__init__.py
import asyncio import functools import pycares from typing import ( Any, List, Optional, ) # TODO: Work out mypy no attribute error and remove ignore from . import error # type: ignore __version__ = '2.0.0' __all__ = ('DNSResolver', 'error') READ = 1 WRITE = 2 query_type_map = {'A' : pycares.QUERY_TYPE_A, 'AAAA' : pycares.QUERY_TYPE_AAAA, 'ANY' : pycares.QUERY_TYPE_ANY, 'CNAME' : pycares.QUERY_TYPE_CNAME, 'MX' : pycares.QUERY_TYPE_MX, 'NAPTR' : pycares.QUERY_TYPE_NAPTR, 'NS' : pycares.QUERY_TYPE_NS, 'PTR' : pycares.QUERY_TYPE_PTR, 'SOA' : pycares.QUERY_TYPE_SOA, 'SRV' : pycares.QUERY_TYPE_SRV, 'TXT' : pycares.QUERY_TYPE_TXT } class DNSResolver: def __init__(self, nameservers=None, loop=None, **kwargs): # type: (Optional[List[str]], Optional[asyncio.AbstractEventLoop], Any) -> None self.loop = loop or asyncio.get_event_loop() assert self.loop is not None kwargs.pop('sock_state_cb', None) self._channel = pycares.Channel(sock_state_cb=self._sock_state_cb, **kwargs) if nameservers: self.nameservers = nameservers self._read_fds = set() # type: Set[int] self._write_fds = set() # type: Set[int] self._timer = None @property def nameservers(self): # type: () -> pycares.Channel return self._channel.servers @nameservers.setter def nameservers(self, value): # type: (List[str]) -> None self._channel.servers = value @staticmethod def _callback(fut, result, errorno): # type: (asyncio.Future, Any, int) -> None if fut.cancelled(): return if errorno is not None: fut.set_exception(error.DNSError(errorno, pycares.errno.strerror(errorno))) else: fut.set_result(result) def query(self, host, qtype): # type: (str, str) -> asyncio.Future try: qtype = query_type_map[qtype] except KeyError: raise ValueError('invalid query type: {}'.format(qtype)) fut = asyncio.Future(loop=self.loop) cb = functools.partial(self._callback, fut) self._channel.query(host, qtype, cb) return fut def gethostbyname(self, host, family): # type: (str, str) -> asyncio.Future fut = asyncio.Future(loop=self.loop) cb = functools.partial(self._callback, fut) self._channel.gethostbyname(host, family, cb) return fut def gethostbyaddr(self, name): # type: (str) -> asyncio.Future fut = asyncio.Future(loop=self.loop) cb = functools.partial(self._callback, fut) self._channel.gethostbyaddr(name, cb) return fut def cancel(self): # type: () -> None self._channel.cancel() def _sock_state_cb(self, fd, readable, writable): # type: (int, bool, bool) -> None if readable or writable: if readable: self.loop.add_reader(fd, self._handle_event, fd, READ) self._read_fds.add(fd) if writable: self.loop.add_writer(fd, self._handle_event, fd, WRITE) self._write_fds.add(fd) if self._timer is None: self._timer = self.loop.call_later(1.0, self._timer_cb) else: # socket is now closed if fd in self._read_fds: self._read_fds.discard(fd) self.loop.remove_reader(fd) if fd in self._write_fds: self._write_fds.discard(fd) self.loop.remove_writer(fd) if not self._read_fds and not self._write_fds and self._timer is not None: self._timer.cancel() self._timer = None def _handle_event(self, fd, event): # type: (int, Any) -> None read_fd = pycares.ARES_SOCKET_BAD write_fd = pycares.ARES_SOCKET_BAD if event == READ: read_fd = fd elif event == WRITE: write_fd = fd self._channel.process_fd(read_fd, write_fd) def _timer_cb(self): # type: () -> None if self._read_fds or self._write_fds: self._channel.process_fd(pycares.ARES_SOCKET_BAD, pycares.ARES_SOCKET_BAD) self._timer = self.loop.call_later(1.0, self._timer_cb) else: self._timer = None
4,578
Python
31.020979
87
0.548056
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiodns/error.py
import pycares for code, name in pycares.errno.errorcode.items(): globals()[name] = code class DNSError(Exception): pass
134
Python
11.272726
50
0.69403
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/anyio/to_thread.py
from __future__ import annotations from typing import Callable, TypeVar from warnings import warn from ._core._eventloop import get_asynclib from .abc import CapacityLimiter T_Retval = TypeVar("T_Retval") async def run_sync( func: Callable[..., T_Retval], *args: object, cancellable: bool = False, limiter: CapacityLimiter | None = None, ) -> T_Retval: """ Call the given function with the given arguments in a worker thread. If the ``cancellable`` option is enabled and the task waiting for its completion is cancelled, the thread will still run its course but its return value (or any raised exception) will be ignored. :param func: a callable :param args: positional arguments for the callable :param cancellable: ``True`` to allow cancellation of the operation :param limiter: capacity limiter to use to limit the total amount of threads running (if omitted, the default limiter is used) :return: an awaitable that yields the return value of the function. """ return await get_asynclib().run_sync_in_worker_thread( func, *args, cancellable=cancellable, limiter=limiter ) async def run_sync_in_worker_thread( func: Callable[..., T_Retval], *args: object, cancellable: bool = False, limiter: CapacityLimiter | None = None, ) -> T_Retval: warn( "run_sync_in_worker_thread() has been deprecated, use anyio.to_thread.run_sync() instead", DeprecationWarning, ) return await run_sync(func, *args, cancellable=cancellable, limiter=limiter) def current_default_thread_limiter() -> CapacityLimiter: """ Return the capacity limiter that is used by default to limit the number of concurrent threads. :return: a capacity limiter object """ return get_asynclib().current_default_thread_limiter() def current_default_worker_thread_limiter() -> CapacityLimiter: warn( "current_default_worker_thread_limiter() has been deprecated, " "use anyio.to_thread.current_default_thread_limiter() instead", DeprecationWarning, ) return current_default_thread_limiter()
2,146
Python
30.573529
98
0.698043
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/anyio/to_process.py
from __future__ import annotations import os import pickle import subprocess import sys from collections import deque from importlib.util import module_from_spec, spec_from_file_location from typing import Callable, TypeVar, cast from ._core._eventloop import current_time, get_asynclib, get_cancelled_exc_class from ._core._exceptions import BrokenWorkerProcess from ._core._subprocesses import open_process from ._core._synchronization import CapacityLimiter from ._core._tasks import CancelScope, fail_after from .abc import ByteReceiveStream, ByteSendStream, Process from .lowlevel import RunVar, checkpoint_if_cancelled from .streams.buffered import BufferedByteReceiveStream WORKER_MAX_IDLE_TIME = 300 # 5 minutes T_Retval = TypeVar("T_Retval") _process_pool_workers: RunVar[set[Process]] = RunVar("_process_pool_workers") _process_pool_idle_workers: RunVar[deque[tuple[Process, float]]] = RunVar( "_process_pool_idle_workers" ) _default_process_limiter: RunVar[CapacityLimiter] = RunVar("_default_process_limiter") async def run_sync( func: Callable[..., T_Retval], *args: object, cancellable: bool = False, limiter: CapacityLimiter | None = None, ) -> T_Retval: """ Call the given function with the given arguments in a worker process. If the ``cancellable`` option is enabled and the task waiting for its completion is cancelled, the worker process running it will be abruptly terminated using SIGKILL (or ``terminateProcess()`` on Windows). :param func: a callable :param args: positional arguments for the callable :param cancellable: ``True`` to allow cancellation of the operation while it's running :param limiter: capacity limiter to use to limit the total amount of processes running (if omitted, the default limiter is used) :return: an awaitable that yields the return value of the function. """ async def send_raw_command(pickled_cmd: bytes) -> object: try: await stdin.send(pickled_cmd) response = await buffered.receive_until(b"\n", 50) status, length = response.split(b" ") if status not in (b"RETURN", b"EXCEPTION"): raise RuntimeError( f"Worker process returned unexpected response: {response!r}" ) pickled_response = await buffered.receive_exactly(int(length)) except BaseException as exc: workers.discard(process) try: process.kill() with CancelScope(shield=True): await process.aclose() except ProcessLookupError: pass if isinstance(exc, get_cancelled_exc_class()): raise else: raise BrokenWorkerProcess from exc retval = pickle.loads(pickled_response) if status == b"EXCEPTION": assert isinstance(retval, BaseException) raise retval else: return retval # First pickle the request before trying to reserve a worker process await checkpoint_if_cancelled() request = pickle.dumps(("run", func, args), protocol=pickle.HIGHEST_PROTOCOL) # If this is the first run in this event loop thread, set up the necessary variables try: workers = _process_pool_workers.get() idle_workers = _process_pool_idle_workers.get() except LookupError: workers = set() idle_workers = deque() _process_pool_workers.set(workers) _process_pool_idle_workers.set(idle_workers) get_asynclib().setup_process_pool_exit_at_shutdown(workers) async with (limiter or current_default_process_limiter()): # Pop processes from the pool (starting from the most recently used) until we find one that # hasn't exited yet process: Process while idle_workers: process, idle_since = idle_workers.pop() if process.returncode is None: stdin = cast(ByteSendStream, process.stdin) buffered = BufferedByteReceiveStream( cast(ByteReceiveStream, process.stdout) ) # Prune any other workers that have been idle for WORKER_MAX_IDLE_TIME seconds or # longer now = current_time() killed_processes: list[Process] = [] while idle_workers: if now - idle_workers[0][1] < WORKER_MAX_IDLE_TIME: break process, idle_since = idle_workers.popleft() process.kill() workers.remove(process) killed_processes.append(process) with CancelScope(shield=True): for process in killed_processes: await process.aclose() break workers.remove(process) else: command = [sys.executable, "-u", "-m", __name__] process = await open_process( command, stdin=subprocess.PIPE, stdout=subprocess.PIPE ) try: stdin = cast(ByteSendStream, process.stdin) buffered = BufferedByteReceiveStream( cast(ByteReceiveStream, process.stdout) ) with fail_after(20): message = await buffered.receive(6) if message != b"READY\n": raise BrokenWorkerProcess( f"Worker process returned unexpected response: {message!r}" ) main_module_path = getattr(sys.modules["__main__"], "__file__", None) pickled = pickle.dumps( ("init", sys.path, main_module_path), protocol=pickle.HIGHEST_PROTOCOL, ) await send_raw_command(pickled) except (BrokenWorkerProcess, get_cancelled_exc_class()): raise except BaseException as exc: process.kill() raise BrokenWorkerProcess( "Error during worker process initialization" ) from exc workers.add(process) with CancelScope(shield=not cancellable): try: return cast(T_Retval, await send_raw_command(request)) finally: if process in workers: idle_workers.append((process, current_time())) def current_default_process_limiter() -> CapacityLimiter: """ Return the capacity limiter that is used by default to limit the number of worker processes. :return: a capacity limiter object """ try: return _default_process_limiter.get() except LookupError: limiter = CapacityLimiter(os.cpu_count() or 2) _default_process_limiter.set(limiter) return limiter def process_worker() -> None: # Redirect standard streams to os.devnull so that user code won't interfere with the # parent-worker communication stdin = sys.stdin stdout = sys.stdout sys.stdin = open(os.devnull) sys.stdout = open(os.devnull, "w") stdout.buffer.write(b"READY\n") while True: retval = exception = None try: command, *args = pickle.load(stdin.buffer) except EOFError: return except BaseException as exc: exception = exc else: if command == "run": func, args = args try: retval = func(*args) except BaseException as exc: exception = exc elif command == "init": main_module_path: str | None sys.path, main_module_path = args del sys.modules["__main__"] if main_module_path: # Load the parent's main module but as __mp_main__ instead of __main__ # (like multiprocessing does) to avoid infinite recursion try: spec = spec_from_file_location("__mp_main__", main_module_path) if spec and spec.loader: main = module_from_spec(spec) spec.loader.exec_module(main) sys.modules["__main__"] = main except BaseException as exc: exception = exc try: if exception is not None: status = b"EXCEPTION" pickled = pickle.dumps(exception, pickle.HIGHEST_PROTOCOL) else: status = b"RETURN" pickled = pickle.dumps(retval, pickle.HIGHEST_PROTOCOL) except BaseException as exc: exception = exc status = b"EXCEPTION" pickled = pickle.dumps(exc, pickle.HIGHEST_PROTOCOL) stdout.buffer.write(b"%s %d\n" % (status, len(pickled))) stdout.buffer.write(pickled) # Respect SIGTERM if isinstance(exception, SystemExit): raise exception if __name__ == "__main__": process_worker()
9,242
Python
35.972
99
0.577797
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/anyio/__init__.py
from __future__ import annotations __all__ = ( "maybe_async", "maybe_async_cm", "run", "sleep", "sleep_forever", "sleep_until", "current_time", "get_all_backends", "get_cancelled_exc_class", "BrokenResourceError", "BrokenWorkerProcess", "BusyResourceError", "ClosedResourceError", "DelimiterNotFound", "EndOfStream", "ExceptionGroup", "IncompleteRead", "TypedAttributeLookupError", "WouldBlock", "AsyncFile", "Path", "open_file", "wrap_file", "aclose_forcefully", "open_signal_receiver", "connect_tcp", "connect_unix", "create_tcp_listener", "create_unix_listener", "create_udp_socket", "create_connected_udp_socket", "getaddrinfo", "getnameinfo", "wait_socket_readable", "wait_socket_writable", "create_memory_object_stream", "run_process", "open_process", "create_lock", "CapacityLimiter", "CapacityLimiterStatistics", "Condition", "ConditionStatistics", "Event", "EventStatistics", "Lock", "LockStatistics", "Semaphore", "SemaphoreStatistics", "create_condition", "create_event", "create_semaphore", "create_capacity_limiter", "open_cancel_scope", "fail_after", "move_on_after", "current_effective_deadline", "TASK_STATUS_IGNORED", "CancelScope", "create_task_group", "TaskInfo", "get_current_task", "get_running_tasks", "wait_all_tasks_blocked", "run_sync_in_worker_thread", "run_async_from_thread", "run_sync_from_thread", "current_default_worker_thread_limiter", "create_blocking_portal", "start_blocking_portal", "typed_attribute", "TypedAttributeSet", "TypedAttributeProvider", ) from typing import Any from ._core._compat import maybe_async, maybe_async_cm from ._core._eventloop import ( current_time, get_all_backends, get_cancelled_exc_class, run, sleep, sleep_forever, sleep_until, ) from ._core._exceptions import ( BrokenResourceError, BrokenWorkerProcess, BusyResourceError, ClosedResourceError, DelimiterNotFound, EndOfStream, ExceptionGroup, IncompleteRead, TypedAttributeLookupError, WouldBlock, ) from ._core._fileio import AsyncFile, Path, open_file, wrap_file from ._core._resources import aclose_forcefully from ._core._signals import open_signal_receiver from ._core._sockets import ( connect_tcp, connect_unix, create_connected_udp_socket, create_tcp_listener, create_udp_socket, create_unix_listener, getaddrinfo, getnameinfo, wait_socket_readable, wait_socket_writable, ) from ._core._streams import create_memory_object_stream from ._core._subprocesses import open_process, run_process from ._core._synchronization import ( CapacityLimiter, CapacityLimiterStatistics, Condition, ConditionStatistics, Event, EventStatistics, Lock, LockStatistics, Semaphore, SemaphoreStatistics, create_capacity_limiter, create_condition, create_event, create_lock, create_semaphore, ) from ._core._tasks import ( TASK_STATUS_IGNORED, CancelScope, create_task_group, current_effective_deadline, fail_after, move_on_after, open_cancel_scope, ) from ._core._testing import ( TaskInfo, get_current_task, get_running_tasks, wait_all_tasks_blocked, ) from ._core._typedattr import TypedAttributeProvider, TypedAttributeSet, typed_attribute # Re-exported here, for backwards compatibility # isort: off from .to_thread import current_default_worker_thread_limiter, run_sync_in_worker_thread from .from_thread import ( create_blocking_portal, run_async_from_thread, run_sync_from_thread, start_blocking_portal, ) # Re-export imports so they look like they live directly in this package key: str value: Any for key, value in list(locals().items()): if getattr(value, "__module__", "").startswith("anyio."): value.__module__ = __name__
4,073
Python
22.964706
88
0.665603
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/anyio/pytest_plugin.py
from __future__ import annotations from contextlib import contextmanager from inspect import isasyncgenfunction, iscoroutinefunction from typing import Any, Dict, Generator, Tuple, cast import pytest import sniffio from ._core._eventloop import get_all_backends, get_asynclib from .abc import TestRunner _current_runner: TestRunner | None = None def extract_backend_and_options(backend: object) -> tuple[str, dict[str, Any]]: if isinstance(backend, str): return backend, {} elif isinstance(backend, tuple) and len(backend) == 2: if isinstance(backend[0], str) and isinstance(backend[1], dict): return cast(Tuple[str, Dict[str, Any]], backend) raise TypeError("anyio_backend must be either a string or tuple of (string, dict)") @contextmanager def get_runner( backend_name: str, backend_options: dict[str, Any] ) -> Generator[TestRunner, object, None]: global _current_runner if _current_runner: yield _current_runner return asynclib = get_asynclib(backend_name) token = None if sniffio.current_async_library_cvar.get(None) is None: # Since we're in control of the event loop, we can cache the name of the async library token = sniffio.current_async_library_cvar.set(backend_name) try: backend_options = backend_options or {} with asynclib.TestRunner(**backend_options) as runner: _current_runner = runner yield runner finally: _current_runner = None if token: sniffio.current_async_library_cvar.reset(token) def pytest_configure(config: Any) -> None: config.addinivalue_line( "markers", "anyio: mark the (coroutine function) test to be run " "asynchronously via anyio.", ) def pytest_fixture_setup(fixturedef: Any, request: Any) -> None: def wrapper(*args, anyio_backend, **kwargs): # type: ignore[no-untyped-def] backend_name, backend_options = extract_backend_and_options(anyio_backend) if has_backend_arg: kwargs["anyio_backend"] = anyio_backend with get_runner(backend_name, backend_options) as runner: if isasyncgenfunction(func): yield from runner.run_asyncgen_fixture(func, kwargs) else: yield runner.run_fixture(func, kwargs) # Only apply this to coroutine functions and async generator functions in requests that involve # the anyio_backend fixture func = fixturedef.func if isasyncgenfunction(func) or iscoroutinefunction(func): if "anyio_backend" in request.fixturenames: has_backend_arg = "anyio_backend" in fixturedef.argnames fixturedef.func = wrapper if not has_backend_arg: fixturedef.argnames += ("anyio_backend",) @pytest.hookimpl(tryfirst=True) def pytest_pycollect_makeitem(collector: Any, name: Any, obj: Any) -> None: if collector.istestfunction(obj, name): inner_func = obj.hypothesis.inner_test if hasattr(obj, "hypothesis") else obj if iscoroutinefunction(inner_func): marker = collector.get_closest_marker("anyio") own_markers = getattr(obj, "pytestmark", ()) if marker or any(marker.name == "anyio" for marker in own_markers): pytest.mark.usefixtures("anyio_backend")(obj) @pytest.hookimpl(tryfirst=True) def pytest_pyfunc_call(pyfuncitem: Any) -> bool | None: def run_with_hypothesis(**kwargs: Any) -> None: with get_runner(backend_name, backend_options) as runner: runner.run_test(original_func, kwargs) backend = pyfuncitem.funcargs.get("anyio_backend") if backend: backend_name, backend_options = extract_backend_and_options(backend) if hasattr(pyfuncitem.obj, "hypothesis"): # Wrap the inner test function unless it's already wrapped original_func = pyfuncitem.obj.hypothesis.inner_test if original_func.__qualname__ != run_with_hypothesis.__qualname__: if iscoroutinefunction(original_func): pyfuncitem.obj.hypothesis.inner_test = run_with_hypothesis return None if iscoroutinefunction(pyfuncitem.obj): funcargs = pyfuncitem.funcargs testargs = {arg: funcargs[arg] for arg in pyfuncitem._fixtureinfo.argnames} with get_runner(backend_name, backend_options) as runner: runner.run_test(pyfuncitem.obj, testargs) return True return None @pytest.fixture(params=get_all_backends()) def anyio_backend(request: Any) -> Any: return request.param @pytest.fixture def anyio_backend_name(anyio_backend: Any) -> str: if isinstance(anyio_backend, str): return anyio_backend else: return anyio_backend[0] @pytest.fixture def anyio_backend_options(anyio_backend: Any) -> dict[str, Any]: if isinstance(anyio_backend, str): return {} else: return anyio_backend[1]
5,022
Python
34.125874
99
0.660096
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/anyio/lowlevel.py
from __future__ import annotations import enum import sys from dataclasses import dataclass from typing import Any, Generic, TypeVar, overload from weakref import WeakKeyDictionary from ._core._eventloop import get_asynclib if sys.version_info >= (3, 8): from typing import Literal else: from typing_extensions import Literal T = TypeVar("T") D = TypeVar("D") async def checkpoint() -> None: """ Check for cancellation and allow the scheduler to switch to another task. Equivalent to (but more efficient than):: await checkpoint_if_cancelled() await cancel_shielded_checkpoint() .. versionadded:: 3.0 """ await get_asynclib().checkpoint() async def checkpoint_if_cancelled() -> None: """ Enter a checkpoint if the enclosing cancel scope has been cancelled. This does not allow the scheduler to switch to a different task. .. versionadded:: 3.0 """ await get_asynclib().checkpoint_if_cancelled() async def cancel_shielded_checkpoint() -> None: """ Allow the scheduler to switch to another task but without checking for cancellation. Equivalent to (but potentially more efficient than):: with CancelScope(shield=True): await checkpoint() .. versionadded:: 3.0 """ await get_asynclib().cancel_shielded_checkpoint() def current_token() -> object: """Return a backend specific token object that can be used to get back to the event loop.""" return get_asynclib().current_token() _run_vars: WeakKeyDictionary[Any, dict[str, Any]] = WeakKeyDictionary() _token_wrappers: dict[Any, _TokenWrapper] = {} @dataclass(frozen=True) class _TokenWrapper: __slots__ = "_token", "__weakref__" _token: object class _NoValueSet(enum.Enum): NO_VALUE_SET = enum.auto() class RunvarToken(Generic[T]): __slots__ = "_var", "_value", "_redeemed" def __init__(self, var: RunVar[T], value: T | Literal[_NoValueSet.NO_VALUE_SET]): self._var = var self._value: T | Literal[_NoValueSet.NO_VALUE_SET] = value self._redeemed = False class RunVar(Generic[T]): """ Like a :class:`~contextvars.ContextVar`, except scoped to the running event loop. """ __slots__ = "_name", "_default" NO_VALUE_SET: Literal[_NoValueSet.NO_VALUE_SET] = _NoValueSet.NO_VALUE_SET _token_wrappers: set[_TokenWrapper] = set() def __init__( self, name: str, default: T | Literal[_NoValueSet.NO_VALUE_SET] = NO_VALUE_SET, ): self._name = name self._default = default @property def _current_vars(self) -> dict[str, T]: token = current_token() while True: try: return _run_vars[token] except TypeError: # Happens when token isn't weak referable (TrioToken). # This workaround does mean that some memory will leak on Trio until the problem # is fixed on their end. token = _TokenWrapper(token) self._token_wrappers.add(token) except KeyError: run_vars = _run_vars[token] = {} return run_vars @overload def get(self, default: D) -> T | D: ... @overload def get(self) -> T: ... def get( self, default: D | Literal[_NoValueSet.NO_VALUE_SET] = NO_VALUE_SET ) -> T | D: try: return self._current_vars[self._name] except KeyError: if default is not RunVar.NO_VALUE_SET: return default elif self._default is not RunVar.NO_VALUE_SET: return self._default raise LookupError( f'Run variable "{self._name}" has no value and no default set' ) def set(self, value: T) -> RunvarToken[T]: current_vars = self._current_vars token = RunvarToken(self, current_vars.get(self._name, RunVar.NO_VALUE_SET)) current_vars[self._name] = value return token def reset(self, token: RunvarToken[T]) -> None: if token._var is not self: raise ValueError("This token does not belong to this RunVar") if token._redeemed: raise ValueError("This token has already been used") if token._value is _NoValueSet.NO_VALUE_SET: try: del self._current_vars[self._name] except KeyError: pass else: self._current_vars[self._name] = token._value token._redeemed = True def __repr__(self) -> str: return f"<RunVar name={self._name!r}>"
4,647
Python
25.56
96
0.599527
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/anyio/from_thread.py
from __future__ import annotations import threading from asyncio import iscoroutine from concurrent.futures import FIRST_COMPLETED, Future, ThreadPoolExecutor, wait from contextlib import AbstractContextManager, contextmanager from types import TracebackType from typing import ( Any, AsyncContextManager, Awaitable, Callable, ContextManager, Generator, Generic, Iterable, TypeVar, cast, overload, ) from warnings import warn from ._core import _eventloop from ._core._eventloop import get_asynclib, get_cancelled_exc_class, threadlocals from ._core._synchronization import Event from ._core._tasks import CancelScope, create_task_group from .abc._tasks import TaskStatus T_Retval = TypeVar("T_Retval") T_co = TypeVar("T_co") def run(func: Callable[..., Awaitable[T_Retval]], *args: object) -> T_Retval: """ Call a coroutine function from a worker thread. :param func: a coroutine function :param args: positional arguments for the callable :return: the return value of the coroutine function """ try: asynclib = threadlocals.current_async_module except AttributeError: raise RuntimeError("This function can only be run from an AnyIO worker thread") return asynclib.run_async_from_thread(func, *args) def run_async_from_thread( func: Callable[..., Awaitable[T_Retval]], *args: object ) -> T_Retval: warn( "run_async_from_thread() has been deprecated, use anyio.from_thread.run() instead", DeprecationWarning, ) return run(func, *args) def run_sync(func: Callable[..., T_Retval], *args: object) -> T_Retval: """ Call a function in the event loop thread from a worker thread. :param func: a callable :param args: positional arguments for the callable :return: the return value of the callable """ try: asynclib = threadlocals.current_async_module except AttributeError: raise RuntimeError("This function can only be run from an AnyIO worker thread") return asynclib.run_sync_from_thread(func, *args) def run_sync_from_thread(func: Callable[..., T_Retval], *args: object) -> T_Retval: warn( "run_sync_from_thread() has been deprecated, use anyio.from_thread.run_sync() instead", DeprecationWarning, ) return run_sync(func, *args) class _BlockingAsyncContextManager(Generic[T_co], AbstractContextManager): _enter_future: Future _exit_future: Future _exit_event: Event _exit_exc_info: tuple[ type[BaseException] | None, BaseException | None, TracebackType | None ] = (None, None, None) def __init__(self, async_cm: AsyncContextManager[T_co], portal: BlockingPortal): self._async_cm = async_cm self._portal = portal async def run_async_cm(self) -> bool | None: try: self._exit_event = Event() value = await self._async_cm.__aenter__() except BaseException as exc: self._enter_future.set_exception(exc) raise else: self._enter_future.set_result(value) try: # Wait for the sync context manager to exit. # This next statement can raise `get_cancelled_exc_class()` if # something went wrong in a task group in this async context # manager. await self._exit_event.wait() finally: # In case of cancellation, it could be that we end up here before # `_BlockingAsyncContextManager.__exit__` is called, and an # `_exit_exc_info` has been set. result = await self._async_cm.__aexit__(*self._exit_exc_info) return result def __enter__(self) -> T_co: self._enter_future = Future() self._exit_future = self._portal.start_task_soon(self.run_async_cm) cm = self._enter_future.result() return cast(T_co, cm) def __exit__( self, __exc_type: type[BaseException] | None, __exc_value: BaseException | None, __traceback: TracebackType | None, ) -> bool | None: self._exit_exc_info = __exc_type, __exc_value, __traceback self._portal.call(self._exit_event.set) return self._exit_future.result() class _BlockingPortalTaskStatus(TaskStatus): def __init__(self, future: Future): self._future = future def started(self, value: object = None) -> None: self._future.set_result(value) class BlockingPortal: """An object that lets external threads run code in an asynchronous event loop.""" def __new__(cls) -> BlockingPortal: return get_asynclib().BlockingPortal() def __init__(self) -> None: self._event_loop_thread_id: int | None = threading.get_ident() self._stop_event = Event() self._task_group = create_task_group() self._cancelled_exc_class = get_cancelled_exc_class() async def __aenter__(self) -> BlockingPortal: await self._task_group.__aenter__() return self async def __aexit__( self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None, ) -> bool | None: await self.stop() return await self._task_group.__aexit__(exc_type, exc_val, exc_tb) def _check_running(self) -> None: if self._event_loop_thread_id is None: raise RuntimeError("This portal is not running") if self._event_loop_thread_id == threading.get_ident(): raise RuntimeError( "This method cannot be called from the event loop thread" ) async def sleep_until_stopped(self) -> None: """Sleep until :meth:`stop` is called.""" await self._stop_event.wait() async def stop(self, cancel_remaining: bool = False) -> None: """ Signal the portal to shut down. This marks the portal as no longer accepting new calls and exits from :meth:`sleep_until_stopped`. :param cancel_remaining: ``True`` to cancel all the remaining tasks, ``False`` to let them finish before returning """ self._event_loop_thread_id = None self._stop_event.set() if cancel_remaining: self._task_group.cancel_scope.cancel() async def _call_func( self, func: Callable, args: tuple, kwargs: dict[str, Any], future: Future ) -> None: def callback(f: Future) -> None: if f.cancelled() and self._event_loop_thread_id not in ( None, threading.get_ident(), ): self.call(scope.cancel) try: retval = func(*args, **kwargs) if iscoroutine(retval): with CancelScope() as scope: if future.cancelled(): scope.cancel() else: future.add_done_callback(callback) retval = await retval except self._cancelled_exc_class: future.cancel() except BaseException as exc: if not future.cancelled(): future.set_exception(exc) # Let base exceptions fall through if not isinstance(exc, Exception): raise else: if not future.cancelled(): future.set_result(retval) finally: scope = None # type: ignore[assignment] def _spawn_task_from_thread( self, func: Callable, args: tuple, kwargs: dict[str, Any], name: object, future: Future, ) -> None: """ Spawn a new task using the given callable. Implementors must ensure that the future is resolved when the task finishes. :param func: a callable :param args: positional arguments to be passed to the callable :param kwargs: keyword arguments to be passed to the callable :param name: name of the task (will be coerced to a string if not ``None``) :param future: a future that will resolve to the return value of the callable, or the exception raised during its execution """ raise NotImplementedError @overload def call(self, func: Callable[..., Awaitable[T_Retval]], *args: object) -> T_Retval: ... @overload def call(self, func: Callable[..., T_Retval], *args: object) -> T_Retval: ... def call( self, func: Callable[..., Awaitable[T_Retval] | T_Retval], *args: object ) -> T_Retval: """ Call the given function in the event loop thread. If the callable returns a coroutine object, it is awaited on. :param func: any callable :raises RuntimeError: if the portal is not running or if this method is called from within the event loop thread """ return cast(T_Retval, self.start_task_soon(func, *args).result()) @overload def spawn_task( self, func: Callable[..., Awaitable[T_Retval]], *args: object, name: object = None, ) -> Future[T_Retval]: ... @overload def spawn_task( self, func: Callable[..., T_Retval], *args: object, name: object = None ) -> Future[T_Retval]: ... def spawn_task( self, func: Callable[..., Awaitable[T_Retval] | T_Retval], *args: object, name: object = None, ) -> Future[T_Retval]: """ Start a task in the portal's task group. :param func: the target coroutine function :param args: positional arguments passed to ``func`` :param name: name of the task (will be coerced to a string if not ``None``) :return: a future that resolves with the return value of the callable if the task completes successfully, or with the exception raised in the task :raises RuntimeError: if the portal is not running or if this method is called from within the event loop thread .. versionadded:: 2.1 .. deprecated:: 3.0 Use :meth:`start_task_soon` instead. If your code needs AnyIO 2 compatibility, you can keep using this until AnyIO 4. """ warn( "spawn_task() is deprecated -- use start_task_soon() instead", DeprecationWarning, ) return self.start_task_soon(func, *args, name=name) # type: ignore[arg-type] @overload def start_task_soon( self, func: Callable[..., Awaitable[T_Retval]], *args: object, name: object = None, ) -> Future[T_Retval]: ... @overload def start_task_soon( self, func: Callable[..., T_Retval], *args: object, name: object = None ) -> Future[T_Retval]: ... def start_task_soon( self, func: Callable[..., Awaitable[T_Retval] | T_Retval], *args: object, name: object = None, ) -> Future[T_Retval]: """ Start a task in the portal's task group. The task will be run inside a cancel scope which can be cancelled by cancelling the returned future. :param func: the target function :param args: positional arguments passed to ``func`` :param name: name of the task (will be coerced to a string if not ``None``) :return: a future that resolves with the return value of the callable if the task completes successfully, or with the exception raised in the task :raises RuntimeError: if the portal is not running or if this method is called from within the event loop thread :rtype: concurrent.futures.Future[T_Retval] .. versionadded:: 3.0 """ self._check_running() f: Future = Future() self._spawn_task_from_thread(func, args, {}, name, f) return f def start_task( self, func: Callable[..., Awaitable[Any]], *args: object, name: object = None ) -> tuple[Future[Any], Any]: """ Start a task in the portal's task group and wait until it signals for readiness. This method works the same way as :meth:`.abc.TaskGroup.start`. :param func: the target function :param args: positional arguments passed to ``func`` :param name: name of the task (will be coerced to a string if not ``None``) :return: a tuple of (future, task_status_value) where the ``task_status_value`` is the value passed to ``task_status.started()`` from within the target function :rtype: tuple[concurrent.futures.Future[Any], Any] .. versionadded:: 3.0 """ def task_done(future: Future) -> None: if not task_status_future.done(): if future.cancelled(): task_status_future.cancel() elif future.exception(): task_status_future.set_exception(future.exception()) else: exc = RuntimeError( "Task exited without calling task_status.started()" ) task_status_future.set_exception(exc) self._check_running() task_status_future: Future = Future() task_status = _BlockingPortalTaskStatus(task_status_future) f: Future = Future() f.add_done_callback(task_done) self._spawn_task_from_thread(func, args, {"task_status": task_status}, name, f) return f, task_status_future.result() def wrap_async_context_manager( self, cm: AsyncContextManager[T_co] ) -> ContextManager[T_co]: """ Wrap an async context manager as a synchronous context manager via this portal. Spawns a task that will call both ``__aenter__()`` and ``__aexit__()``, stopping in the middle until the synchronous context manager exits. :param cm: an asynchronous context manager :return: a synchronous context manager .. versionadded:: 2.1 """ return _BlockingAsyncContextManager(cm, self) def create_blocking_portal() -> BlockingPortal: """ Create a portal for running functions in the event loop thread from external threads. Use this function in asynchronous code when you need to allow external threads access to the event loop where your asynchronous code is currently running. .. deprecated:: 3.0 Use :class:`.BlockingPortal` directly. """ warn( "create_blocking_portal() has been deprecated -- use anyio.from_thread.BlockingPortal() " "directly", DeprecationWarning, ) return BlockingPortal() @contextmanager def start_blocking_portal( backend: str = "asyncio", backend_options: dict[str, Any] | None = None ) -> Generator[BlockingPortal, Any, None]: """ Start a new event loop in a new thread and run a blocking portal in its main task. The parameters are the same as for :func:`~anyio.run`. :param backend: name of the backend :param backend_options: backend options :return: a context manager that yields a blocking portal .. versionchanged:: 3.0 Usage as a context manager is now required. """ async def run_portal() -> None: async with BlockingPortal() as portal_: if future.set_running_or_notify_cancel(): future.set_result(portal_) await portal_.sleep_until_stopped() future: Future[BlockingPortal] = Future() with ThreadPoolExecutor(1) as executor: run_future = executor.submit( _eventloop.run, run_portal, # type: ignore[arg-type] backend=backend, backend_options=backend_options, ) try: wait( cast(Iterable[Future], [run_future, future]), return_when=FIRST_COMPLETED, ) except BaseException: future.cancel() run_future.cancel() raise if future.done(): portal = future.result() cancel_remaining_tasks = False try: yield portal except BaseException: cancel_remaining_tasks = True raise finally: try: portal.call(portal.stop, cancel_remaining_tasks) except RuntimeError: pass run_future.result()
16,563
Python
32.061876
99
0.595242
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/anyio/abc/_subprocesses.py
from __future__ import annotations from abc import abstractmethod from signal import Signals from ._resources import AsyncResource from ._streams import ByteReceiveStream, ByteSendStream class Process(AsyncResource): """An asynchronous version of :class:`subprocess.Popen`.""" @abstractmethod async def wait(self) -> int: """ Wait until the process exits. :return: the exit code of the process """ @abstractmethod def terminate(self) -> None: """ Terminates the process, gracefully if possible. On Windows, this calls ``TerminateProcess()``. On POSIX systems, this sends ``SIGTERM`` to the process. .. seealso:: :meth:`subprocess.Popen.terminate` """ @abstractmethod def kill(self) -> None: """ Kills the process. On Windows, this calls ``TerminateProcess()``. On POSIX systems, this sends ``SIGKILL`` to the process. .. seealso:: :meth:`subprocess.Popen.kill` """ @abstractmethod def send_signal(self, signal: Signals) -> None: """ Send a signal to the subprocess. .. seealso:: :meth:`subprocess.Popen.send_signal` :param signal: the signal number (e.g. :data:`signal.SIGHUP`) """ @property @abstractmethod def pid(self) -> int: """The process ID of the process.""" @property @abstractmethod def returncode(self) -> int | None: """ The return code of the process. If the process has not yet terminated, this will be ``None``. """ @property @abstractmethod def stdin(self) -> ByteSendStream | None: """The stream for the standard input of the process.""" @property @abstractmethod def stdout(self) -> ByteReceiveStream | None: """The stream for the standard output of the process.""" @property @abstractmethod def stderr(self) -> ByteReceiveStream | None: """The stream for the standard error output of the process."""
2,067
Python
24.85
91
0.612482
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/anyio/abc/_testing.py
from __future__ import annotations import types from abc import ABCMeta, abstractmethod from collections.abc import AsyncGenerator, Iterable from typing import Any, Callable, Coroutine, TypeVar _T = TypeVar("_T") class TestRunner(metaclass=ABCMeta): """ Encapsulates a running event loop. Every call made through this object will use the same event loop. """ def __enter__(self) -> TestRunner: return self def __exit__( self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: types.TracebackType | None, ) -> bool | None: self.close() return None @abstractmethod def close(self) -> None: """Close the event loop.""" @abstractmethod def run_asyncgen_fixture( self, fixture_func: Callable[..., AsyncGenerator[_T, Any]], kwargs: dict[str, Any], ) -> Iterable[_T]: """ Run an async generator fixture. :param fixture_func: the fixture function :param kwargs: keyword arguments to call the fixture function with :return: an iterator yielding the value yielded from the async generator """ @abstractmethod def run_fixture( self, fixture_func: Callable[..., Coroutine[Any, Any, _T]], kwargs: dict[str, Any], ) -> _T: """ Run an async fixture. :param fixture_func: the fixture function :param kwargs: keyword arguments to call the fixture function with :return: the return value of the fixture function """ @abstractmethod def run_test( self, test_func: Callable[..., Coroutine[Any, Any, Any]], kwargs: dict[str, Any] ) -> None: """ Run an async test function. :param test_func: the test function :param kwargs: keyword arguments to call the test function with """
1,924
Python
26.112676
98
0.606549
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/anyio/abc/__init__.py
from __future__ import annotations __all__ = ( "AsyncResource", "IPAddressType", "IPSockAddrType", "SocketAttribute", "SocketStream", "SocketListener", "UDPSocket", "UNIXSocketStream", "UDPPacketType", "ConnectedUDPSocket", "UnreliableObjectReceiveStream", "UnreliableObjectSendStream", "UnreliableObjectStream", "ObjectReceiveStream", "ObjectSendStream", "ObjectStream", "ByteReceiveStream", "ByteSendStream", "ByteStream", "AnyUnreliableByteReceiveStream", "AnyUnreliableByteSendStream", "AnyUnreliableByteStream", "AnyByteReceiveStream", "AnyByteSendStream", "AnyByteStream", "Listener", "Process", "Event", "Condition", "Lock", "Semaphore", "CapacityLimiter", "CancelScope", "TaskGroup", "TaskStatus", "TestRunner", "BlockingPortal", ) from typing import Any from ._resources import AsyncResource from ._sockets import ( ConnectedUDPSocket, IPAddressType, IPSockAddrType, SocketAttribute, SocketListener, SocketStream, UDPPacketType, UDPSocket, UNIXSocketStream, ) from ._streams import ( AnyByteReceiveStream, AnyByteSendStream, AnyByteStream, AnyUnreliableByteReceiveStream, AnyUnreliableByteSendStream, AnyUnreliableByteStream, ByteReceiveStream, ByteSendStream, ByteStream, Listener, ObjectReceiveStream, ObjectSendStream, ObjectStream, UnreliableObjectReceiveStream, UnreliableObjectSendStream, UnreliableObjectStream, ) from ._subprocesses import Process from ._tasks import TaskGroup, TaskStatus from ._testing import TestRunner # Re-exported here, for backwards compatibility # isort: off from .._core._synchronization import CapacityLimiter, Condition, Event, Lock, Semaphore from .._core._tasks import CancelScope from ..from_thread import BlockingPortal # Re-export imports so they look like they live directly in this package key: str value: Any for key, value in list(locals().items()): if getattr(value, "__module__", "").startswith("anyio.abc."): value.__module__ = __name__
2,159
Python
22.736263
87
0.699398
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/anyio/abc/_resources.py
from __future__ import annotations from abc import ABCMeta, abstractmethod from types import TracebackType from typing import TypeVar T = TypeVar("T") class AsyncResource(metaclass=ABCMeta): """ Abstract base class for all closeable asynchronous resources. Works as an asynchronous context manager which returns the instance itself on enter, and calls :meth:`aclose` on exit. """ async def __aenter__(self: T) -> T: return self async def __aexit__( self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None, ) -> None: await self.aclose() @abstractmethod async def aclose(self) -> None: """Close the resource."""
763
Python
22.874999
98
0.647444
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/anyio/abc/_tasks.py
from __future__ import annotations import sys from abc import ABCMeta, abstractmethod from types import TracebackType from typing import TYPE_CHECKING, Any, Awaitable, Callable, TypeVar, overload from warnings import warn if sys.version_info >= (3, 8): from typing import Protocol else: from typing_extensions import Protocol if TYPE_CHECKING: from anyio._core._tasks import CancelScope T_Retval = TypeVar("T_Retval") T_contra = TypeVar("T_contra", contravariant=True) class TaskStatus(Protocol[T_contra]): @overload def started(self: TaskStatus[None]) -> None: ... @overload def started(self, value: T_contra) -> None: ... def started(self, value: T_contra | None = None) -> None: """ Signal that the task has started. :param value: object passed back to the starter of the task """ class TaskGroup(metaclass=ABCMeta): """ Groups several asynchronous tasks together. :ivar cancel_scope: the cancel scope inherited by all child tasks :vartype cancel_scope: CancelScope """ cancel_scope: CancelScope async def spawn( self, func: Callable[..., Awaitable[Any]], *args: object, name: object = None, ) -> None: """ Start a new task in this task group. :param func: a coroutine function :param args: positional arguments to call the function with :param name: name of the task, for the purposes of introspection and debugging .. deprecated:: 3.0 Use :meth:`start_soon` instead. If your code needs AnyIO 2 compatibility, you can keep using this until AnyIO 4. """ warn( 'spawn() is deprecated -- use start_soon() (without the "await") instead', DeprecationWarning, ) self.start_soon(func, *args, name=name) @abstractmethod def start_soon( self, func: Callable[..., Awaitable[Any]], *args: object, name: object = None, ) -> None: """ Start a new task in this task group. :param func: a coroutine function :param args: positional arguments to call the function with :param name: name of the task, for the purposes of introspection and debugging .. versionadded:: 3.0 """ @abstractmethod async def start( self, func: Callable[..., Awaitable[Any]], *args: object, name: object = None, ) -> Any: """ Start a new task and wait until it signals for readiness. :param func: a coroutine function :param args: positional arguments to call the function with :param name: name of the task, for the purposes of introspection and debugging :return: the value passed to ``task_status.started()`` :raises RuntimeError: if the task finishes without calling ``task_status.started()`` .. versionadded:: 3.0 """ @abstractmethod async def __aenter__(self) -> TaskGroup: """Enter the task group context and allow starting new tasks.""" @abstractmethod async def __aexit__( self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None, ) -> bool | None: """Exit the task group context waiting for all tasks to finish."""
3,413
Python
27.45
92
0.615001
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/anyio/abc/_streams.py
from __future__ import annotations from abc import abstractmethod from typing import Any, Callable, Generic, TypeVar, Union from .._core._exceptions import EndOfStream from .._core._typedattr import TypedAttributeProvider from ._resources import AsyncResource from ._tasks import TaskGroup T_Item = TypeVar("T_Item") T_co = TypeVar("T_co", covariant=True) T_contra = TypeVar("T_contra", contravariant=True) class UnreliableObjectReceiveStream( Generic[T_co], AsyncResource, TypedAttributeProvider ): """ An interface for receiving objects. This interface makes no guarantees that the received messages arrive in the order in which they were sent, or that no messages are missed. Asynchronously iterating over objects of this type will yield objects matching the given type parameter. """ def __aiter__(self) -> UnreliableObjectReceiveStream[T_co]: return self async def __anext__(self) -> T_co: try: return await self.receive() except EndOfStream: raise StopAsyncIteration @abstractmethod async def receive(self) -> T_co: """ Receive the next item. :raises ~anyio.ClosedResourceError: if the receive stream has been explicitly closed :raises ~anyio.EndOfStream: if this stream has been closed from the other end :raises ~anyio.BrokenResourceError: if this stream has been rendered unusable due to external causes """ class UnreliableObjectSendStream( Generic[T_contra], AsyncResource, TypedAttributeProvider ): """ An interface for sending objects. This interface makes no guarantees that the messages sent will reach the recipient(s) in the same order in which they were sent, or at all. """ @abstractmethod async def send(self, item: T_contra) -> None: """ Send an item to the peer(s). :param item: the item to send :raises ~anyio.ClosedResourceError: if the send stream has been explicitly closed :raises ~anyio.BrokenResourceError: if this stream has been rendered unusable due to external causes """ class UnreliableObjectStream( UnreliableObjectReceiveStream[T_Item], UnreliableObjectSendStream[T_Item] ): """ A bidirectional message stream which does not guarantee the order or reliability of message delivery. """ class ObjectReceiveStream(UnreliableObjectReceiveStream[T_co]): """ A receive message stream which guarantees that messages are received in the same order in which they were sent, and that no messages are missed. """ class ObjectSendStream(UnreliableObjectSendStream[T_contra]): """ A send message stream which guarantees that messages are delivered in the same order in which they were sent, without missing any messages in the middle. """ class ObjectStream( ObjectReceiveStream[T_Item], ObjectSendStream[T_Item], UnreliableObjectStream[T_Item], ): """ A bidirectional message stream which guarantees the order and reliability of message delivery. """ @abstractmethod async def send_eof(self) -> None: """ Send an end-of-file indication to the peer. You should not try to send any further data to this stream after calling this method. This method is idempotent (does nothing on successive calls). """ class ByteReceiveStream(AsyncResource, TypedAttributeProvider): """ An interface for receiving bytes from a single peer. Iterating this byte stream will yield a byte string of arbitrary length, but no more than 65536 bytes. """ def __aiter__(self) -> ByteReceiveStream: return self async def __anext__(self) -> bytes: try: return await self.receive() except EndOfStream: raise StopAsyncIteration @abstractmethod async def receive(self, max_bytes: int = 65536) -> bytes: """ Receive at most ``max_bytes`` bytes from the peer. .. note:: Implementors of this interface should not return an empty :class:`bytes` object, and users should ignore them. :param max_bytes: maximum number of bytes to receive :return: the received bytes :raises ~anyio.EndOfStream: if this stream has been closed from the other end """ class ByteSendStream(AsyncResource, TypedAttributeProvider): """An interface for sending bytes to a single peer.""" @abstractmethod async def send(self, item: bytes) -> None: """ Send the given bytes to the peer. :param item: the bytes to send """ class ByteStream(ByteReceiveStream, ByteSendStream): """A bidirectional byte stream.""" @abstractmethod async def send_eof(self) -> None: """ Send an end-of-file indication to the peer. You should not try to send any further data to this stream after calling this method. This method is idempotent (does nothing on successive calls). """ #: Type alias for all unreliable bytes-oriented receive streams. AnyUnreliableByteReceiveStream = Union[ UnreliableObjectReceiveStream[bytes], ByteReceiveStream ] #: Type alias for all unreliable bytes-oriented send streams. AnyUnreliableByteSendStream = Union[UnreliableObjectSendStream[bytes], ByteSendStream] #: Type alias for all unreliable bytes-oriented streams. AnyUnreliableByteStream = Union[UnreliableObjectStream[bytes], ByteStream] #: Type alias for all bytes-oriented receive streams. AnyByteReceiveStream = Union[ObjectReceiveStream[bytes], ByteReceiveStream] #: Type alias for all bytes-oriented send streams. AnyByteSendStream = Union[ObjectSendStream[bytes], ByteSendStream] #: Type alias for all bytes-oriented streams. AnyByteStream = Union[ObjectStream[bytes], ByteStream] class Listener(Generic[T_co], AsyncResource, TypedAttributeProvider): """An interface for objects that let you accept incoming connections.""" @abstractmethod async def serve( self, handler: Callable[[T_co], Any], task_group: TaskGroup | None = None, ) -> None: """ Accept incoming connections as they come in and start tasks to handle them. :param handler: a callable that will be used to handle each accepted connection :param task_group: the task group that will be used to start tasks for handling each accepted connection (if omitted, an ad-hoc task group will be created) """
6,584
Python
31.279412
99
0.691981
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/anyio/abc/_sockets.py
from __future__ import annotations import socket from abc import abstractmethod from contextlib import AsyncExitStack from io import IOBase from ipaddress import IPv4Address, IPv6Address from socket import AddressFamily from typing import ( Any, Callable, Collection, Mapping, Tuple, TypeVar, Union, ) from .._core._tasks import create_task_group from .._core._typedattr import ( TypedAttributeProvider, TypedAttributeSet, typed_attribute, ) from ._streams import ByteStream, Listener, UnreliableObjectStream from ._tasks import TaskGroup IPAddressType = Union[str, IPv4Address, IPv6Address] IPSockAddrType = Tuple[str, int] SockAddrType = Union[IPSockAddrType, str] UDPPacketType = Tuple[bytes, IPSockAddrType] T_Retval = TypeVar("T_Retval") class SocketAttribute(TypedAttributeSet): #: the address family of the underlying socket family: AddressFamily = typed_attribute() #: the local socket address of the underlying socket local_address: SockAddrType = typed_attribute() #: for IP addresses, the local port the underlying socket is bound to local_port: int = typed_attribute() #: the underlying stdlib socket object raw_socket: socket.socket = typed_attribute() #: the remote address the underlying socket is connected to remote_address: SockAddrType = typed_attribute() #: for IP addresses, the remote port the underlying socket is connected to remote_port: int = typed_attribute() class _SocketProvider(TypedAttributeProvider): @property def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]: from .._core._sockets import convert_ipv6_sockaddr as convert attributes: dict[Any, Callable[[], Any]] = { SocketAttribute.family: lambda: self._raw_socket.family, SocketAttribute.local_address: lambda: convert( self._raw_socket.getsockname() ), SocketAttribute.raw_socket: lambda: self._raw_socket, } try: peername: tuple[str, int] | None = convert(self._raw_socket.getpeername()) except OSError: peername = None # Provide the remote address for connected sockets if peername is not None: attributes[SocketAttribute.remote_address] = lambda: peername # Provide local and remote ports for IP based sockets if self._raw_socket.family in (AddressFamily.AF_INET, AddressFamily.AF_INET6): attributes[ SocketAttribute.local_port ] = lambda: self._raw_socket.getsockname()[1] if peername is not None: remote_port = peername[1] attributes[SocketAttribute.remote_port] = lambda: remote_port return attributes @property @abstractmethod def _raw_socket(self) -> socket.socket: pass class SocketStream(ByteStream, _SocketProvider): """ Transports bytes over a socket. Supports all relevant extra attributes from :class:`~SocketAttribute`. """ class UNIXSocketStream(SocketStream): @abstractmethod async def send_fds(self, message: bytes, fds: Collection[int | IOBase]) -> None: """ Send file descriptors along with a message to the peer. :param message: a non-empty bytestring :param fds: a collection of files (either numeric file descriptors or open file or socket objects) """ @abstractmethod async def receive_fds(self, msglen: int, maxfds: int) -> tuple[bytes, list[int]]: """ Receive file descriptors along with a message from the peer. :param msglen: length of the message to expect from the peer :param maxfds: maximum number of file descriptors to expect from the peer :return: a tuple of (message, file descriptors) """ class SocketListener(Listener[SocketStream], _SocketProvider): """ Listens to incoming socket connections. Supports all relevant extra attributes from :class:`~SocketAttribute`. """ @abstractmethod async def accept(self) -> SocketStream: """Accept an incoming connection.""" async def serve( self, handler: Callable[[SocketStream], Any], task_group: TaskGroup | None = None, ) -> None: async with AsyncExitStack() as exit_stack: if task_group is None: task_group = await exit_stack.enter_async_context(create_task_group()) while True: stream = await self.accept() task_group.start_soon(handler, stream) class UDPSocket(UnreliableObjectStream[UDPPacketType], _SocketProvider): """ Represents an unconnected UDP socket. Supports all relevant extra attributes from :class:`~SocketAttribute`. """ async def sendto(self, data: bytes, host: str, port: int) -> None: """Alias for :meth:`~.UnreliableObjectSendStream.send` ((data, (host, port))).""" return await self.send((data, (host, port))) class ConnectedUDPSocket(UnreliableObjectStream[bytes], _SocketProvider): """ Represents an connected UDP socket. Supports all relevant extra attributes from :class:`~SocketAttribute`. """
5,243
Python
31.571428
97
0.666603
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/anyio/streams/text.py
from __future__ import annotations import codecs from dataclasses import InitVar, dataclass, field from typing import Any, Callable, Mapping from ..abc import ( AnyByteReceiveStream, AnyByteSendStream, AnyByteStream, ObjectReceiveStream, ObjectSendStream, ObjectStream, ) @dataclass(eq=False) class TextReceiveStream(ObjectReceiveStream[str]): """ Stream wrapper that decodes bytes to strings using the given encoding. Decoding is done using :class:`~codecs.IncrementalDecoder` which returns any completely received unicode characters as soon as they come in. :param transport_stream: any bytes-based receive stream :param encoding: character encoding to use for decoding bytes to strings (defaults to ``utf-8``) :param errors: handling scheme for decoding errors (defaults to ``strict``; see the `codecs module documentation`_ for a comprehensive list of options) .. _codecs module documentation: https://docs.python.org/3/library/codecs.html#codec-objects """ transport_stream: AnyByteReceiveStream encoding: InitVar[str] = "utf-8" errors: InitVar[str] = "strict" _decoder: codecs.IncrementalDecoder = field(init=False) def __post_init__(self, encoding: str, errors: str) -> None: decoder_class = codecs.getincrementaldecoder(encoding) self._decoder = decoder_class(errors=errors) async def receive(self) -> str: while True: chunk = await self.transport_stream.receive() decoded = self._decoder.decode(chunk) if decoded: return decoded async def aclose(self) -> None: await self.transport_stream.aclose() self._decoder.reset() @property def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]: return self.transport_stream.extra_attributes @dataclass(eq=False) class TextSendStream(ObjectSendStream[str]): """ Sends strings to the wrapped stream as bytes using the given encoding. :param AnyByteSendStream transport_stream: any bytes-based send stream :param str encoding: character encoding to use for encoding strings to bytes (defaults to ``utf-8``) :param str errors: handling scheme for encoding errors (defaults to ``strict``; see the `codecs module documentation`_ for a comprehensive list of options) .. _codecs module documentation: https://docs.python.org/3/library/codecs.html#codec-objects """ transport_stream: AnyByteSendStream encoding: InitVar[str] = "utf-8" errors: str = "strict" _encoder: Callable[..., tuple[bytes, int]] = field(init=False) def __post_init__(self, encoding: str) -> None: self._encoder = codecs.getencoder(encoding) async def send(self, item: str) -> None: encoded = self._encoder(item, self.errors)[0] await self.transport_stream.send(encoded) async def aclose(self) -> None: await self.transport_stream.aclose() @property def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]: return self.transport_stream.extra_attributes @dataclass(eq=False) class TextStream(ObjectStream[str]): """ A bidirectional stream that decodes bytes to strings on receive and encodes strings to bytes on send. Extra attributes will be provided from both streams, with the receive stream providing the values in case of a conflict. :param AnyByteStream transport_stream: any bytes-based stream :param str encoding: character encoding to use for encoding/decoding strings to/from bytes (defaults to ``utf-8``) :param str errors: handling scheme for encoding errors (defaults to ``strict``; see the `codecs module documentation`_ for a comprehensive list of options) .. _codecs module documentation: https://docs.python.org/3/library/codecs.html#codec-objects """ transport_stream: AnyByteStream encoding: InitVar[str] = "utf-8" errors: InitVar[str] = "strict" _receive_stream: TextReceiveStream = field(init=False) _send_stream: TextSendStream = field(init=False) def __post_init__(self, encoding: str, errors: str) -> None: self._receive_stream = TextReceiveStream( self.transport_stream, encoding=encoding, errors=errors ) self._send_stream = TextSendStream( self.transport_stream, encoding=encoding, errors=errors ) async def receive(self) -> str: return await self._receive_stream.receive() async def send(self, item: str) -> None: await self._send_stream.send(item) async def send_eof(self) -> None: await self.transport_stream.send_eof() async def aclose(self) -> None: await self._send_stream.aclose() await self._receive_stream.aclose() @property def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]: return { **self._send_stream.extra_attributes, **self._receive_stream.extra_attributes, }
5,043
Python
34.027778
99
0.678961
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/anyio/streams/stapled.py
from __future__ import annotations from dataclasses import dataclass from typing import Any, Callable, Generic, Mapping, Sequence, TypeVar from ..abc import ( ByteReceiveStream, ByteSendStream, ByteStream, Listener, ObjectReceiveStream, ObjectSendStream, ObjectStream, TaskGroup, ) T_Item = TypeVar("T_Item") T_Stream = TypeVar("T_Stream") @dataclass(eq=False) class StapledByteStream(ByteStream): """ Combines two byte streams into a single, bidirectional byte stream. Extra attributes will be provided from both streams, with the receive stream providing the values in case of a conflict. :param ByteSendStream send_stream: the sending byte stream :param ByteReceiveStream receive_stream: the receiving byte stream """ send_stream: ByteSendStream receive_stream: ByteReceiveStream async def receive(self, max_bytes: int = 65536) -> bytes: return await self.receive_stream.receive(max_bytes) async def send(self, item: bytes) -> None: await self.send_stream.send(item) async def send_eof(self) -> None: await self.send_stream.aclose() async def aclose(self) -> None: await self.send_stream.aclose() await self.receive_stream.aclose() @property def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]: return { **self.send_stream.extra_attributes, **self.receive_stream.extra_attributes, } @dataclass(eq=False) class StapledObjectStream(Generic[T_Item], ObjectStream[T_Item]): """ Combines two object streams into a single, bidirectional object stream. Extra attributes will be provided from both streams, with the receive stream providing the values in case of a conflict. :param ObjectSendStream send_stream: the sending object stream :param ObjectReceiveStream receive_stream: the receiving object stream """ send_stream: ObjectSendStream[T_Item] receive_stream: ObjectReceiveStream[T_Item] async def receive(self) -> T_Item: return await self.receive_stream.receive() async def send(self, item: T_Item) -> None: await self.send_stream.send(item) async def send_eof(self) -> None: await self.send_stream.aclose() async def aclose(self) -> None: await self.send_stream.aclose() await self.receive_stream.aclose() @property def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]: return { **self.send_stream.extra_attributes, **self.receive_stream.extra_attributes, } @dataclass(eq=False) class MultiListener(Generic[T_Stream], Listener[T_Stream]): """ Combines multiple listeners into one, serving connections from all of them at once. Any MultiListeners in the given collection of listeners will have their listeners moved into this one. Extra attributes are provided from each listener, with each successive listener overriding any conflicting attributes from the previous one. :param listeners: listeners to serve :type listeners: Sequence[Listener[T_Stream]] """ listeners: Sequence[Listener[T_Stream]] def __post_init__(self) -> None: listeners: list[Listener[T_Stream]] = [] for listener in self.listeners: if isinstance(listener, MultiListener): listeners.extend(listener.listeners) del listener.listeners[:] # type: ignore[attr-defined] else: listeners.append(listener) self.listeners = listeners async def serve( self, handler: Callable[[T_Stream], Any], task_group: TaskGroup | None = None ) -> None: from .. import create_task_group async with create_task_group() as tg: for listener in self.listeners: tg.start_soon(listener.serve, handler, task_group) async def aclose(self) -> None: for listener in self.listeners: await listener.aclose() @property def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]: attributes: dict = {} for listener in self.listeners: attributes.update(listener.extra_attributes) return attributes
4,275
Python
29.326241
98
0.665731
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/anyio/streams/buffered.py
from __future__ import annotations from dataclasses import dataclass, field from typing import Any, Callable, Mapping from .. import ClosedResourceError, DelimiterNotFound, EndOfStream, IncompleteRead from ..abc import AnyByteReceiveStream, ByteReceiveStream @dataclass(eq=False) class BufferedByteReceiveStream(ByteReceiveStream): """ Wraps any bytes-based receive stream and uses a buffer to provide sophisticated receiving capabilities in the form of a byte stream. """ receive_stream: AnyByteReceiveStream _buffer: bytearray = field(init=False, default_factory=bytearray) _closed: bool = field(init=False, default=False) async def aclose(self) -> None: await self.receive_stream.aclose() self._closed = True @property def buffer(self) -> bytes: """The bytes currently in the buffer.""" return bytes(self._buffer) @property def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]: return self.receive_stream.extra_attributes async def receive(self, max_bytes: int = 65536) -> bytes: if self._closed: raise ClosedResourceError if self._buffer: chunk = bytes(self._buffer[:max_bytes]) del self._buffer[:max_bytes] return chunk elif isinstance(self.receive_stream, ByteReceiveStream): return await self.receive_stream.receive(max_bytes) else: # With a bytes-oriented object stream, we need to handle any surplus bytes we get from # the receive() call chunk = await self.receive_stream.receive() if len(chunk) > max_bytes: # Save the surplus bytes in the buffer self._buffer.extend(chunk[max_bytes:]) return chunk[:max_bytes] else: return chunk async def receive_exactly(self, nbytes: int) -> bytes: """ Read exactly the given amount of bytes from the stream. :param nbytes: the number of bytes to read :return: the bytes read :raises ~anyio.IncompleteRead: if the stream was closed before the requested amount of bytes could be read from the stream """ while True: remaining = nbytes - len(self._buffer) if remaining <= 0: retval = self._buffer[:nbytes] del self._buffer[:nbytes] return bytes(retval) try: if isinstance(self.receive_stream, ByteReceiveStream): chunk = await self.receive_stream.receive(remaining) else: chunk = await self.receive_stream.receive() except EndOfStream as exc: raise IncompleteRead from exc self._buffer.extend(chunk) async def receive_until(self, delimiter: bytes, max_bytes: int) -> bytes: """ Read from the stream until the delimiter is found or max_bytes have been read. :param delimiter: the marker to look for in the stream :param max_bytes: maximum number of bytes that will be read before raising :exc:`~anyio.DelimiterNotFound` :return: the bytes read (not including the delimiter) :raises ~anyio.IncompleteRead: if the stream was closed before the delimiter was found :raises ~anyio.DelimiterNotFound: if the delimiter is not found within the bytes read up to the maximum allowed """ delimiter_size = len(delimiter) offset = 0 while True: # Check if the delimiter can be found in the current buffer index = self._buffer.find(delimiter, offset) if index >= 0: found = self._buffer[:index] del self._buffer[: index + len(delimiter) :] return bytes(found) # Check if the buffer is already at or over the limit if len(self._buffer) >= max_bytes: raise DelimiterNotFound(max_bytes) # Read more data into the buffer from the socket try: data = await self.receive_stream.receive() except EndOfStream as exc: raise IncompleteRead from exc # Move the offset forward and add the new data to the buffer offset = max(len(self._buffer) - delimiter_size + 1, 0) self._buffer.extend(data)
4,473
Python
36.596638
98
0.607199
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/anyio/streams/file.py
from __future__ import annotations from io import SEEK_SET, UnsupportedOperation from os import PathLike from pathlib import Path from typing import Any, BinaryIO, Callable, Mapping, cast from .. import ( BrokenResourceError, ClosedResourceError, EndOfStream, TypedAttributeSet, to_thread, typed_attribute, ) from ..abc import ByteReceiveStream, ByteSendStream class FileStreamAttribute(TypedAttributeSet): #: the open file descriptor file: BinaryIO = typed_attribute() #: the path of the file on the file system, if available (file must be a real file) path: Path = typed_attribute() #: the file number, if available (file must be a real file or a TTY) fileno: int = typed_attribute() class _BaseFileStream: def __init__(self, file: BinaryIO): self._file = file async def aclose(self) -> None: await to_thread.run_sync(self._file.close) @property def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]: attributes: dict[Any, Callable[[], Any]] = { FileStreamAttribute.file: lambda: self._file, } if hasattr(self._file, "name"): attributes[FileStreamAttribute.path] = lambda: Path(self._file.name) try: self._file.fileno() except UnsupportedOperation: pass else: attributes[FileStreamAttribute.fileno] = lambda: self._file.fileno() return attributes class FileReadStream(_BaseFileStream, ByteReceiveStream): """ A byte stream that reads from a file in the file system. :param file: a file that has been opened for reading in binary mode .. versionadded:: 3.0 """ @classmethod async def from_path(cls, path: str | PathLike[str]) -> FileReadStream: """ Create a file read stream by opening the given file. :param path: path of the file to read from """ file = await to_thread.run_sync(Path(path).open, "rb") return cls(cast(BinaryIO, file)) async def receive(self, max_bytes: int = 65536) -> bytes: try: data = await to_thread.run_sync(self._file.read, max_bytes) except ValueError: raise ClosedResourceError from None except OSError as exc: raise BrokenResourceError from exc if data: return data else: raise EndOfStream async def seek(self, position: int, whence: int = SEEK_SET) -> int: """ Seek the file to the given position. .. seealso:: :meth:`io.IOBase.seek` .. note:: Not all file descriptors are seekable. :param position: position to seek the file to :param whence: controls how ``position`` is interpreted :return: the new absolute position :raises OSError: if the file is not seekable """ return await to_thread.run_sync(self._file.seek, position, whence) async def tell(self) -> int: """ Return the current stream position. .. note:: Not all file descriptors are seekable. :return: the current absolute position :raises OSError: if the file is not seekable """ return await to_thread.run_sync(self._file.tell) class FileWriteStream(_BaseFileStream, ByteSendStream): """ A byte stream that writes to a file in the file system. :param file: a file that has been opened for writing in binary mode .. versionadded:: 3.0 """ @classmethod async def from_path( cls, path: str | PathLike[str], append: bool = False ) -> FileWriteStream: """ Create a file write stream by opening the given file for writing. :param path: path of the file to write to :param append: if ``True``, open the file for appending; if ``False``, any existing file at the given path will be truncated """ mode = "ab" if append else "wb" file = await to_thread.run_sync(Path(path).open, mode) return cls(cast(BinaryIO, file)) async def send(self, item: bytes) -> None: try: await to_thread.run_sync(self._file.write, item) except ValueError: raise ClosedResourceError from None except OSError as exc: raise BrokenResourceError from exc
4,356
Python
28.439189
96
0.624656
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/anyio/streams/tls.py
from __future__ import annotations import logging import re import ssl from dataclasses import dataclass from functools import wraps from typing import Any, Callable, Mapping, Tuple, TypeVar from .. import ( BrokenResourceError, EndOfStream, aclose_forcefully, get_cancelled_exc_class, ) from .._core._typedattr import TypedAttributeSet, typed_attribute from ..abc import AnyByteStream, ByteStream, Listener, TaskGroup T_Retval = TypeVar("T_Retval") _PCTRTT = Tuple[Tuple[str, str], ...] _PCTRTTT = Tuple[_PCTRTT, ...] class TLSAttribute(TypedAttributeSet): """Contains Transport Layer Security related attributes.""" #: the selected ALPN protocol alpn_protocol: str | None = typed_attribute() #: the channel binding for type ``tls-unique`` channel_binding_tls_unique: bytes = typed_attribute() #: the selected cipher cipher: tuple[str, str, int] = typed_attribute() #: the peer certificate in dictionary form (see :meth:`ssl.SSLSocket.getpeercert` #: for more information) peer_certificate: dict[str, str | _PCTRTTT | _PCTRTT] | None = typed_attribute() #: the peer certificate in binary form peer_certificate_binary: bytes | None = typed_attribute() #: ``True`` if this is the server side of the connection server_side: bool = typed_attribute() #: ciphers shared by the client during the TLS handshake (``None`` if this is the #: client side) shared_ciphers: list[tuple[str, str, int]] | None = typed_attribute() #: the :class:`~ssl.SSLObject` used for encryption ssl_object: ssl.SSLObject = typed_attribute() #: ``True`` if this stream does (and expects) a closing TLS handshake when the #: stream is being closed standard_compatible: bool = typed_attribute() #: the TLS protocol version (e.g. ``TLSv1.2``) tls_version: str = typed_attribute() @dataclass(eq=False) class TLSStream(ByteStream): """ A stream wrapper that encrypts all sent data and decrypts received data. This class has no public initializer; use :meth:`wrap` instead. All extra attributes from :class:`~TLSAttribute` are supported. :var AnyByteStream transport_stream: the wrapped stream """ transport_stream: AnyByteStream standard_compatible: bool _ssl_object: ssl.SSLObject _read_bio: ssl.MemoryBIO _write_bio: ssl.MemoryBIO @classmethod async def wrap( cls, transport_stream: AnyByteStream, *, server_side: bool | None = None, hostname: str | None = None, ssl_context: ssl.SSLContext | None = None, standard_compatible: bool = True, ) -> TLSStream: """ Wrap an existing stream with Transport Layer Security. This performs a TLS handshake with the peer. :param transport_stream: a bytes-transporting stream to wrap :param server_side: ``True`` if this is the server side of the connection, ``False`` if this is the client side (if omitted, will be set to ``False`` if ``hostname`` has been provided, ``False`` otherwise). Used only to create a default context when an explicit context has not been provided. :param hostname: host name of the peer (if host name checking is desired) :param ssl_context: the SSLContext object to use (if not provided, a secure default will be created) :param standard_compatible: if ``False``, skip the closing handshake when closing the connection, and don't raise an exception if the peer does the same :raises ~ssl.SSLError: if the TLS handshake fails """ if server_side is None: server_side = not hostname if not ssl_context: purpose = ( ssl.Purpose.CLIENT_AUTH if server_side else ssl.Purpose.SERVER_AUTH ) ssl_context = ssl.create_default_context(purpose) # Re-enable detection of unexpected EOFs if it was disabled by Python if hasattr(ssl, "OP_IGNORE_UNEXPECTED_EOF"): ssl_context.options &= ~ssl.OP_IGNORE_UNEXPECTED_EOF bio_in = ssl.MemoryBIO() bio_out = ssl.MemoryBIO() ssl_object = ssl_context.wrap_bio( bio_in, bio_out, server_side=server_side, server_hostname=hostname ) wrapper = cls( transport_stream=transport_stream, standard_compatible=standard_compatible, _ssl_object=ssl_object, _read_bio=bio_in, _write_bio=bio_out, ) await wrapper._call_sslobject_method(ssl_object.do_handshake) return wrapper async def _call_sslobject_method( self, func: Callable[..., T_Retval], *args: object ) -> T_Retval: while True: try: result = func(*args) except ssl.SSLWantReadError: try: # Flush any pending writes first if self._write_bio.pending: await self.transport_stream.send(self._write_bio.read()) data = await self.transport_stream.receive() except EndOfStream: self._read_bio.write_eof() except OSError as exc: self._read_bio.write_eof() self._write_bio.write_eof() raise BrokenResourceError from exc else: self._read_bio.write(data) except ssl.SSLWantWriteError: await self.transport_stream.send(self._write_bio.read()) except ssl.SSLSyscallError as exc: self._read_bio.write_eof() self._write_bio.write_eof() raise BrokenResourceError from exc except ssl.SSLError as exc: self._read_bio.write_eof() self._write_bio.write_eof() if ( isinstance(exc, ssl.SSLEOFError) or "UNEXPECTED_EOF_WHILE_READING" in exc.strerror ): if self.standard_compatible: raise BrokenResourceError from exc else: raise EndOfStream from None raise else: # Flush any pending writes first if self._write_bio.pending: await self.transport_stream.send(self._write_bio.read()) return result async def unwrap(self) -> tuple[AnyByteStream, bytes]: """ Does the TLS closing handshake. :return: a tuple of (wrapped byte stream, bytes left in the read buffer) """ await self._call_sslobject_method(self._ssl_object.unwrap) self._read_bio.write_eof() self._write_bio.write_eof() return self.transport_stream, self._read_bio.read() async def aclose(self) -> None: if self.standard_compatible: try: await self.unwrap() except BaseException: await aclose_forcefully(self.transport_stream) raise await self.transport_stream.aclose() async def receive(self, max_bytes: int = 65536) -> bytes: data = await self._call_sslobject_method(self._ssl_object.read, max_bytes) if not data: raise EndOfStream return data async def send(self, item: bytes) -> None: await self._call_sslobject_method(self._ssl_object.write, item) async def send_eof(self) -> None: tls_version = self.extra(TLSAttribute.tls_version) match = re.match(r"TLSv(\d+)(?:\.(\d+))?", tls_version) if match: major, minor = int(match.group(1)), int(match.group(2) or 0) if (major, minor) < (1, 3): raise NotImplementedError( f"send_eof() requires at least TLSv1.3; current " f"session uses {tls_version}" ) raise NotImplementedError( "send_eof() has not yet been implemented for TLS streams" ) @property def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]: return { **self.transport_stream.extra_attributes, TLSAttribute.alpn_protocol: self._ssl_object.selected_alpn_protocol, TLSAttribute.channel_binding_tls_unique: self._ssl_object.get_channel_binding, TLSAttribute.cipher: self._ssl_object.cipher, TLSAttribute.peer_certificate: lambda: self._ssl_object.getpeercert(False), TLSAttribute.peer_certificate_binary: lambda: self._ssl_object.getpeercert( True ), TLSAttribute.server_side: lambda: self._ssl_object.server_side, TLSAttribute.shared_ciphers: lambda: self._ssl_object.shared_ciphers() if self._ssl_object.server_side else None, TLSAttribute.standard_compatible: lambda: self.standard_compatible, TLSAttribute.ssl_object: lambda: self._ssl_object, TLSAttribute.tls_version: self._ssl_object.version, } @dataclass(eq=False) class TLSListener(Listener[TLSStream]): """ A convenience listener that wraps another listener and auto-negotiates a TLS session on every accepted connection. If the TLS handshake times out or raises an exception, :meth:`handle_handshake_error` is called to do whatever post-mortem processing is deemed necessary. Supports only the :attr:`~TLSAttribute.standard_compatible` extra attribute. :param Listener listener: the listener to wrap :param ssl_context: the SSL context object :param standard_compatible: a flag passed through to :meth:`TLSStream.wrap` :param handshake_timeout: time limit for the TLS handshake (passed to :func:`~anyio.fail_after`) """ listener: Listener[Any] ssl_context: ssl.SSLContext standard_compatible: bool = True handshake_timeout: float = 30 @staticmethod async def handle_handshake_error(exc: BaseException, stream: AnyByteStream) -> None: """ Handle an exception raised during the TLS handshake. This method does 3 things: #. Forcefully closes the original stream #. Logs the exception (unless it was a cancellation exception) using the ``anyio.streams.tls`` logger #. Reraises the exception if it was a base exception or a cancellation exception :param exc: the exception :param stream: the original stream """ await aclose_forcefully(stream) # Log all except cancellation exceptions if not isinstance(exc, get_cancelled_exc_class()): logging.getLogger(__name__).exception("Error during TLS handshake") # Only reraise base exceptions and cancellation exceptions if not isinstance(exc, Exception) or isinstance(exc, get_cancelled_exc_class()): raise async def serve( self, handler: Callable[[TLSStream], Any], task_group: TaskGroup | None = None, ) -> None: @wraps(handler) async def handler_wrapper(stream: AnyByteStream) -> None: from .. import fail_after try: with fail_after(self.handshake_timeout): wrapped_stream = await TLSStream.wrap( stream, ssl_context=self.ssl_context, standard_compatible=self.standard_compatible, ) except BaseException as exc: await self.handle_handshake_error(exc, stream) else: await handler(wrapped_stream) await self.listener.serve(handler_wrapper, task_group) async def aclose(self) -> None: await self.listener.aclose() @property def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]: return { TLSAttribute.standard_compatible: lambda: self.standard_compatible, }
12,099
Python
36.694704
97
0.609059
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/anyio/streams/memory.py
from __future__ import annotations from collections import OrderedDict, deque from dataclasses import dataclass, field from types import TracebackType from typing import Generic, NamedTuple, TypeVar from .. import ( BrokenResourceError, ClosedResourceError, EndOfStream, WouldBlock, get_cancelled_exc_class, ) from .._core._compat import DeprecatedAwaitable from ..abc import Event, ObjectReceiveStream, ObjectSendStream from ..lowlevel import checkpoint T_Item = TypeVar("T_Item") T_co = TypeVar("T_co", covariant=True) T_contra = TypeVar("T_contra", contravariant=True) class MemoryObjectStreamStatistics(NamedTuple): current_buffer_used: int #: number of items stored in the buffer #: maximum number of items that can be stored on this stream (or :data:`math.inf`) max_buffer_size: float open_send_streams: int #: number of unclosed clones of the send stream open_receive_streams: int #: number of unclosed clones of the receive stream tasks_waiting_send: int #: number of tasks blocked on :meth:`MemoryObjectSendStream.send` #: number of tasks blocked on :meth:`MemoryObjectReceiveStream.receive` tasks_waiting_receive: int @dataclass(eq=False) class MemoryObjectStreamState(Generic[T_Item]): max_buffer_size: float = field() buffer: deque[T_Item] = field(init=False, default_factory=deque) open_send_channels: int = field(init=False, default=0) open_receive_channels: int = field(init=False, default=0) waiting_receivers: OrderedDict[Event, list[T_Item]] = field( init=False, default_factory=OrderedDict ) waiting_senders: OrderedDict[Event, T_Item] = field( init=False, default_factory=OrderedDict ) def statistics(self) -> MemoryObjectStreamStatistics: return MemoryObjectStreamStatistics( len(self.buffer), self.max_buffer_size, self.open_send_channels, self.open_receive_channels, len(self.waiting_senders), len(self.waiting_receivers), ) @dataclass(eq=False) class MemoryObjectReceiveStream(Generic[T_co], ObjectReceiveStream[T_co]): _state: MemoryObjectStreamState[T_co] _closed: bool = field(init=False, default=False) def __post_init__(self) -> None: self._state.open_receive_channels += 1 def receive_nowait(self) -> T_co: """ Receive the next item if it can be done without waiting. :return: the received item :raises ~anyio.ClosedResourceError: if this send stream has been closed :raises ~anyio.EndOfStream: if the buffer is empty and this stream has been closed from the sending end :raises ~anyio.WouldBlock: if there are no items in the buffer and no tasks waiting to send """ if self._closed: raise ClosedResourceError if self._state.waiting_senders: # Get the item from the next sender send_event, item = self._state.waiting_senders.popitem(last=False) self._state.buffer.append(item) send_event.set() if self._state.buffer: return self._state.buffer.popleft() elif not self._state.open_send_channels: raise EndOfStream raise WouldBlock async def receive(self) -> T_co: await checkpoint() try: return self.receive_nowait() except WouldBlock: # Add ourselves in the queue receive_event = Event() container: list[T_co] = [] self._state.waiting_receivers[receive_event] = container try: await receive_event.wait() except get_cancelled_exc_class(): # Ignore the immediate cancellation if we already received an item, so as not to # lose it if not container: raise finally: self._state.waiting_receivers.pop(receive_event, None) if container: return container[0] else: raise EndOfStream def clone(self) -> MemoryObjectReceiveStream[T_co]: """ Create a clone of this receive stream. Each clone can be closed separately. Only when all clones have been closed will the receiving end of the memory stream be considered closed by the sending ends. :return: the cloned stream """ if self._closed: raise ClosedResourceError return MemoryObjectReceiveStream(_state=self._state) def close(self) -> None: """ Close the stream. This works the exact same way as :meth:`aclose`, but is provided as a special case for the benefit of synchronous callbacks. """ if not self._closed: self._closed = True self._state.open_receive_channels -= 1 if self._state.open_receive_channels == 0: send_events = list(self._state.waiting_senders.keys()) for event in send_events: event.set() async def aclose(self) -> None: self.close() def statistics(self) -> MemoryObjectStreamStatistics: """ Return statistics about the current state of this stream. .. versionadded:: 3.0 """ return self._state.statistics() def __enter__(self) -> MemoryObjectReceiveStream[T_co]: return self def __exit__( self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None, ) -> None: self.close() @dataclass(eq=False) class MemoryObjectSendStream(Generic[T_contra], ObjectSendStream[T_contra]): _state: MemoryObjectStreamState[T_contra] _closed: bool = field(init=False, default=False) def __post_init__(self) -> None: self._state.open_send_channels += 1 def send_nowait(self, item: T_contra) -> DeprecatedAwaitable: """ Send an item immediately if it can be done without waiting. :param item: the item to send :raises ~anyio.ClosedResourceError: if this send stream has been closed :raises ~anyio.BrokenResourceError: if the stream has been closed from the receiving end :raises ~anyio.WouldBlock: if the buffer is full and there are no tasks waiting to receive """ if self._closed: raise ClosedResourceError if not self._state.open_receive_channels: raise BrokenResourceError if self._state.waiting_receivers: receive_event, container = self._state.waiting_receivers.popitem(last=False) container.append(item) receive_event.set() elif len(self._state.buffer) < self._state.max_buffer_size: self._state.buffer.append(item) else: raise WouldBlock return DeprecatedAwaitable(self.send_nowait) async def send(self, item: T_contra) -> None: await checkpoint() try: self.send_nowait(item) except WouldBlock: # Wait until there's someone on the receiving end send_event = Event() self._state.waiting_senders[send_event] = item try: await send_event.wait() except BaseException: self._state.waiting_senders.pop(send_event, None) # type: ignore[arg-type] raise if self._state.waiting_senders.pop(send_event, None): # type: ignore[arg-type] raise BrokenResourceError def clone(self) -> MemoryObjectSendStream[T_contra]: """ Create a clone of this send stream. Each clone can be closed separately. Only when all clones have been closed will the sending end of the memory stream be considered closed by the receiving ends. :return: the cloned stream """ if self._closed: raise ClosedResourceError return MemoryObjectSendStream(_state=self._state) def close(self) -> None: """ Close the stream. This works the exact same way as :meth:`aclose`, but is provided as a special case for the benefit of synchronous callbacks. """ if not self._closed: self._closed = True self._state.open_send_channels -= 1 if self._state.open_send_channels == 0: receive_events = list(self._state.waiting_receivers.keys()) self._state.waiting_receivers.clear() for event in receive_events: event.set() async def aclose(self) -> None: self.close() def statistics(self) -> MemoryObjectStreamStatistics: """ Return statistics about the current state of this stream. .. versionadded:: 3.0 """ return self._state.statistics() def __enter__(self) -> MemoryObjectSendStream[T_contra]: return self def __exit__( self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None, ) -> None: self.close()
9,274
Python
32.125
98
0.614514
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/anyio/_core/_eventloop.py
from __future__ import annotations import math import sys import threading from contextlib import contextmanager from importlib import import_module from typing import ( Any, Awaitable, Callable, Generator, TypeVar, ) import sniffio # This must be updated when new backends are introduced from ._compat import DeprecatedAwaitableFloat BACKENDS = "asyncio", "trio" T_Retval = TypeVar("T_Retval") threadlocals = threading.local() def run( func: Callable[..., Awaitable[T_Retval]], *args: object, backend: str = "asyncio", backend_options: dict[str, Any] | None = None, ) -> T_Retval: """ Run the given coroutine function in an asynchronous event loop. The current thread must not be already running an event loop. :param func: a coroutine function :param args: positional arguments to ``func`` :param backend: name of the asynchronous event loop implementation – currently either ``asyncio`` or ``trio`` :param backend_options: keyword arguments to call the backend ``run()`` implementation with (documented :ref:`here <backend options>`) :return: the return value of the coroutine function :raises RuntimeError: if an asynchronous event loop is already running in this thread :raises LookupError: if the named backend is not found """ try: asynclib_name = sniffio.current_async_library() except sniffio.AsyncLibraryNotFoundError: pass else: raise RuntimeError(f"Already running {asynclib_name} in this thread") try: asynclib = import_module(f"..._backends._{backend}", package=__name__) except ImportError as exc: raise LookupError(f"No such backend: {backend}") from exc token = None if sniffio.current_async_library_cvar.get(None) is None: # Since we're in control of the event loop, we can cache the name of the async library token = sniffio.current_async_library_cvar.set(backend) try: backend_options = backend_options or {} return asynclib.run(func, *args, **backend_options) finally: if token: sniffio.current_async_library_cvar.reset(token) async def sleep(delay: float) -> None: """ Pause the current task for the specified duration. :param delay: the duration, in seconds """ return await get_asynclib().sleep(delay) async def sleep_forever() -> None: """ Pause the current task until it's cancelled. This is a shortcut for ``sleep(math.inf)``. .. versionadded:: 3.1 """ await sleep(math.inf) async def sleep_until(deadline: float) -> None: """ Pause the current task until the given time. :param deadline: the absolute time to wake up at (according to the internal monotonic clock of the event loop) .. versionadded:: 3.1 """ now = current_time() await sleep(max(deadline - now, 0)) def current_time() -> DeprecatedAwaitableFloat: """ Return the current value of the event loop's internal clock. :return: the clock value (seconds) """ return DeprecatedAwaitableFloat(get_asynclib().current_time(), current_time) def get_all_backends() -> tuple[str, ...]: """Return a tuple of the names of all built-in backends.""" return BACKENDS def get_cancelled_exc_class() -> type[BaseException]: """Return the current async library's cancellation exception class.""" return get_asynclib().CancelledError # # Private API # @contextmanager def claim_worker_thread(backend: str) -> Generator[Any, None, None]: module = sys.modules["anyio._backends._" + backend] threadlocals.current_async_module = module try: yield finally: del threadlocals.current_async_module def get_asynclib(asynclib_name: str | None = None) -> Any: if asynclib_name is None: asynclib_name = sniffio.current_async_library() modulename = "anyio._backends._" + asynclib_name try: return sys.modules[modulename] except KeyError: return import_module(modulename)
4,081
Python
25.506493
98
0.672629
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/anyio/_core/_subprocesses.py
from __future__ import annotations from io import BytesIO from os import PathLike from subprocess import DEVNULL, PIPE, CalledProcessError, CompletedProcess from typing import ( IO, Any, AsyncIterable, Mapping, Sequence, cast, ) from ..abc import Process from ._eventloop import get_asynclib from ._tasks import create_task_group async def run_process( command: str | bytes | Sequence[str | bytes], *, input: bytes | None = None, stdout: int | IO[Any] | None = PIPE, stderr: int | IO[Any] | None = PIPE, check: bool = True, cwd: str | bytes | PathLike[str] | None = None, env: Mapping[str, str] | None = None, start_new_session: bool = False, ) -> CompletedProcess[bytes]: """ Run an external command in a subprocess and wait until it completes. .. seealso:: :func:`subprocess.run` :param command: either a string to pass to the shell, or an iterable of strings containing the executable name or path and its arguments :param input: bytes passed to the standard input of the subprocess :param stdout: either :data:`subprocess.PIPE` or :data:`subprocess.DEVNULL` :param stderr: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL` or :data:`subprocess.STDOUT` :param check: if ``True``, raise :exc:`~subprocess.CalledProcessError` if the process terminates with a return code other than 0 :param cwd: If not ``None``, change the working directory to this before running the command :param env: if not ``None``, this mapping replaces the inherited environment variables from the parent process :param start_new_session: if ``true`` the setsid() system call will be made in the child process prior to the execution of the subprocess. (POSIX only) :return: an object representing the completed process :raises ~subprocess.CalledProcessError: if ``check`` is ``True`` and the process exits with a nonzero return code """ async def drain_stream(stream: AsyncIterable[bytes], index: int) -> None: buffer = BytesIO() async for chunk in stream: buffer.write(chunk) stream_contents[index] = buffer.getvalue() async with await open_process( command, stdin=PIPE if input else DEVNULL, stdout=stdout, stderr=stderr, cwd=cwd, env=env, start_new_session=start_new_session, ) as process: stream_contents: list[bytes | None] = [None, None] try: async with create_task_group() as tg: if process.stdout: tg.start_soon(drain_stream, process.stdout, 0) if process.stderr: tg.start_soon(drain_stream, process.stderr, 1) if process.stdin and input: await process.stdin.send(input) await process.stdin.aclose() await process.wait() except BaseException: process.kill() raise output, errors = stream_contents if check and process.returncode != 0: raise CalledProcessError(cast(int, process.returncode), command, output, errors) return CompletedProcess(command, cast(int, process.returncode), output, errors) async def open_process( command: str | bytes | Sequence[str | bytes], *, stdin: int | IO[Any] | None = PIPE, stdout: int | IO[Any] | None = PIPE, stderr: int | IO[Any] | None = PIPE, cwd: str | bytes | PathLike[str] | None = None, env: Mapping[str, str] | None = None, start_new_session: bool = False, ) -> Process: """ Start an external command in a subprocess. .. seealso:: :class:`subprocess.Popen` :param command: either a string to pass to the shell, or an iterable of strings containing the executable name or path and its arguments :param stdin: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`, a file-like object, or ``None`` :param stdout: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`, a file-like object, or ``None`` :param stderr: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`, :data:`subprocess.STDOUT`, a file-like object, or ``None`` :param cwd: If not ``None``, the working directory is changed before executing :param env: If env is not ``None``, it must be a mapping that defines the environment variables for the new process :param start_new_session: if ``true`` the setsid() system call will be made in the child process prior to the execution of the subprocess. (POSIX only) :return: an asynchronous process object """ shell = isinstance(command, str) return await get_asynclib().open_process( command, shell=shell, stdin=stdin, stdout=stdout, stderr=stderr, cwd=cwd, env=env, start_new_session=start_new_session, )
4,977
Python
35.602941
99
0.639743
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/anyio/_core/_testing.py
from __future__ import annotations from typing import Any, Awaitable, Generator from ._compat import DeprecatedAwaitableList, _warn_deprecation from ._eventloop import get_asynclib class TaskInfo: """ Represents an asynchronous task. :ivar int id: the unique identifier of the task :ivar parent_id: the identifier of the parent task, if any :vartype parent_id: Optional[int] :ivar str name: the description of the task (if any) :ivar ~collections.abc.Coroutine coro: the coroutine object of the task """ __slots__ = "_name", "id", "parent_id", "name", "coro" def __init__( self, id: int, parent_id: int | None, name: str | None, coro: Generator[Any, Any, Any] | Awaitable[Any], ): func = get_current_task self._name = f"{func.__module__}.{func.__qualname__}" self.id: int = id self.parent_id: int | None = parent_id self.name: str | None = name self.coro: Generator[Any, Any, Any] | Awaitable[Any] = coro def __eq__(self, other: object) -> bool: if isinstance(other, TaskInfo): return self.id == other.id return NotImplemented def __hash__(self) -> int: return hash(self.id) def __repr__(self) -> str: return f"{self.__class__.__name__}(id={self.id!r}, name={self.name!r})" def __await__(self) -> Generator[None, None, TaskInfo]: _warn_deprecation(self) if False: yield return self def _unwrap(self) -> TaskInfo: return self def get_current_task() -> TaskInfo: """ Return the current task. :return: a representation of the current task """ return get_asynclib().get_current_task() def get_running_tasks() -> DeprecatedAwaitableList[TaskInfo]: """ Return a list of running tasks in the current event loop. :return: a list of task info objects """ tasks = get_asynclib().get_running_tasks() return DeprecatedAwaitableList(tasks, func=get_running_tasks) async def wait_all_tasks_blocked() -> None: """Wait until all other tasks are waiting for something.""" await get_asynclib().wait_all_tasks_blocked()
2,217
Python
25.722891
79
0.612539
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/anyio/_core/_compat.py
from __future__ import annotations from abc import ABCMeta, abstractmethod from contextlib import AbstractContextManager from types import TracebackType from typing import ( TYPE_CHECKING, Any, AsyncContextManager, Callable, ContextManager, Generator, Generic, Iterable, List, TypeVar, Union, overload, ) from warnings import warn if TYPE_CHECKING: from ._testing import TaskInfo else: TaskInfo = object T = TypeVar("T") AnyDeprecatedAwaitable = Union[ "DeprecatedAwaitable", "DeprecatedAwaitableFloat", "DeprecatedAwaitableList[T]", TaskInfo, ] @overload async def maybe_async(__obj: TaskInfo) -> TaskInfo: ... @overload async def maybe_async(__obj: DeprecatedAwaitableFloat) -> float: ... @overload async def maybe_async(__obj: DeprecatedAwaitableList[T]) -> list[T]: ... @overload async def maybe_async(__obj: DeprecatedAwaitable) -> None: ... async def maybe_async( __obj: AnyDeprecatedAwaitable[T], ) -> TaskInfo | float | list[T] | None: """ Await on the given object if necessary. This function is intended to bridge the gap between AnyIO 2.x and 3.x where some functions and methods were converted from coroutine functions into regular functions. Do **not** try to use this for any other purpose! :return: the result of awaiting on the object if coroutine, or the object itself otherwise .. versionadded:: 2.2 """ return __obj._unwrap() class _ContextManagerWrapper: def __init__(self, cm: ContextManager[T]): self._cm = cm async def __aenter__(self) -> T: return self._cm.__enter__() async def __aexit__( self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None, ) -> bool | None: return self._cm.__exit__(exc_type, exc_val, exc_tb) def maybe_async_cm( cm: ContextManager[T] | AsyncContextManager[T], ) -> AsyncContextManager[T]: """ Wrap a regular context manager as an async one if necessary. This function is intended to bridge the gap between AnyIO 2.x and 3.x where some functions and methods were changed to return regular context managers instead of async ones. :param cm: a regular or async context manager :return: an async context manager .. versionadded:: 2.2 """ if not isinstance(cm, AbstractContextManager): raise TypeError("Given object is not an context manager") return _ContextManagerWrapper(cm) def _warn_deprecation( awaitable: AnyDeprecatedAwaitable[Any], stacklevel: int = 1 ) -> None: warn( f'Awaiting on {awaitable._name}() is deprecated. Use "await ' f"anyio.maybe_async({awaitable._name}(...)) if you have to support both AnyIO 2.x " f'and 3.x, or just remove the "await" if you are completely migrating to AnyIO 3+.', DeprecationWarning, stacklevel=stacklevel + 1, ) class DeprecatedAwaitable: def __init__(self, func: Callable[..., DeprecatedAwaitable]): self._name = f"{func.__module__}.{func.__qualname__}" def __await__(self) -> Generator[None, None, None]: _warn_deprecation(self) if False: yield def __reduce__(self) -> tuple[type[None], tuple[()]]: return type(None), () def _unwrap(self) -> None: return None class DeprecatedAwaitableFloat(float): def __new__( cls, x: float, func: Callable[..., DeprecatedAwaitableFloat] ) -> DeprecatedAwaitableFloat: return super().__new__(cls, x) def __init__(self, x: float, func: Callable[..., DeprecatedAwaitableFloat]): self._name = f"{func.__module__}.{func.__qualname__}" def __await__(self) -> Generator[None, None, float]: _warn_deprecation(self) if False: yield return float(self) def __reduce__(self) -> tuple[type[float], tuple[float]]: return float, (float(self),) def _unwrap(self) -> float: return float(self) class DeprecatedAwaitableList(List[T]): def __init__( self, iterable: Iterable[T] = (), *, func: Callable[..., DeprecatedAwaitableList[T]], ): super().__init__(iterable) self._name = f"{func.__module__}.{func.__qualname__}" def __await__(self) -> Generator[None, None, list[T]]: _warn_deprecation(self) if False: yield return list(self) def __reduce__(self) -> tuple[type[list[T]], tuple[list[T]]]: return list, (list(self),) def _unwrap(self) -> list[T]: return list(self) class DeprecatedAsyncContextManager(Generic[T], metaclass=ABCMeta): @abstractmethod def __enter__(self) -> T: pass @abstractmethod def __exit__( self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None, ) -> bool | None: pass async def __aenter__(self) -> T: warn( f"Using {self.__class__.__name__} as an async context manager has been deprecated. " f'Use "async with anyio.maybe_async_cm(yourcontextmanager) as foo:" if you have to ' f'support both AnyIO 2.x and 3.x, or just remove the "async" from "async with" if ' f"you are completely migrating to AnyIO 3+.", DeprecationWarning, ) return self.__enter__() async def __aexit__( self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None, ) -> bool | None: return self.__exit__(exc_type, exc_val, exc_tb)
5,726
Python
25.270642
98
0.613517
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/anyio/_core/_exceptions.py
from __future__ import annotations from traceback import format_exception class BrokenResourceError(Exception): """ Raised when trying to use a resource that has been rendered unusable due to external causes (e.g. a send stream whose peer has disconnected). """ class BrokenWorkerProcess(Exception): """ Raised by :func:`run_sync_in_process` if the worker process terminates abruptly or otherwise misbehaves. """ class BusyResourceError(Exception): """Raised when two tasks are trying to read from or write to the same resource concurrently.""" def __init__(self, action: str): super().__init__(f"Another task is already {action} this resource") class ClosedResourceError(Exception): """Raised when trying to use a resource that has been closed.""" class DelimiterNotFound(Exception): """ Raised during :meth:`~anyio.streams.buffered.BufferedByteReceiveStream.receive_until` if the maximum number of bytes has been read without the delimiter being found. """ def __init__(self, max_bytes: int) -> None: super().__init__( f"The delimiter was not found among the first {max_bytes} bytes" ) class EndOfStream(Exception): """Raised when trying to read from a stream that has been closed from the other end.""" class ExceptionGroup(BaseException): """ Raised when multiple exceptions have been raised in a task group. :var ~typing.Sequence[BaseException] exceptions: the sequence of exceptions raised together """ SEPARATOR = "----------------------------\n" exceptions: list[BaseException] def __str__(self) -> str: tracebacks = [ "".join(format_exception(type(exc), exc, exc.__traceback__)) for exc in self.exceptions ] return ( f"{len(self.exceptions)} exceptions were raised in the task group:\n" f"{self.SEPARATOR}{self.SEPARATOR.join(tracebacks)}" ) def __repr__(self) -> str: exception_reprs = ", ".join(repr(exc) for exc in self.exceptions) return f"<{self.__class__.__name__}: {exception_reprs}>" class IncompleteRead(Exception): """ Raised during :meth:`~anyio.streams.buffered.BufferedByteReceiveStream.receive_exactly` or :meth:`~anyio.streams.buffered.BufferedByteReceiveStream.receive_until` if the connection is closed before the requested amount of bytes has been read. """ def __init__(self) -> None: super().__init__( "The stream was closed before the read operation could be completed" ) class TypedAttributeLookupError(LookupError): """ Raised by :meth:`~anyio.TypedAttributeProvider.extra` when the given typed attribute is not found and no default value has been given. """ class WouldBlock(Exception): """Raised by ``X_nowait`` functions if ``X()`` would block."""
2,916
Python
29.705263
99
0.659808
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/anyio/_core/_resources.py
from __future__ import annotations from ..abc import AsyncResource from ._tasks import CancelScope async def aclose_forcefully(resource: AsyncResource) -> None: """ Close an asynchronous resource in a cancelled scope. Doing this closes the resource without waiting on anything. :param resource: the resource to close """ with CancelScope() as scope: scope.cancel() await resource.aclose()
435
Python
21.947367
63
0.701149
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/anyio/_core/_tasks.py
from __future__ import annotations import math from types import TracebackType from warnings import warn from ..abc._tasks import TaskGroup, TaskStatus from ._compat import ( DeprecatedAsyncContextManager, DeprecatedAwaitable, DeprecatedAwaitableFloat, ) from ._eventloop import get_asynclib class _IgnoredTaskStatus(TaskStatus[object]): def started(self, value: object = None) -> None: pass TASK_STATUS_IGNORED = _IgnoredTaskStatus() class CancelScope(DeprecatedAsyncContextManager["CancelScope"]): """ Wraps a unit of work that can be made separately cancellable. :param deadline: The time (clock value) when this scope is cancelled automatically :param shield: ``True`` to shield the cancel scope from external cancellation """ def __new__( cls, *, deadline: float = math.inf, shield: bool = False ) -> CancelScope: return get_asynclib().CancelScope(shield=shield, deadline=deadline) def cancel(self) -> DeprecatedAwaitable: """Cancel this scope immediately.""" raise NotImplementedError @property def deadline(self) -> float: """ The time (clock value) when this scope is cancelled automatically. Will be ``float('inf')`` if no timeout has been set. """ raise NotImplementedError @deadline.setter def deadline(self, value: float) -> None: raise NotImplementedError @property def cancel_called(self) -> bool: """``True`` if :meth:`cancel` has been called.""" raise NotImplementedError @property def shield(self) -> bool: """ ``True`` if this scope is shielded from external cancellation. While a scope is shielded, it will not receive cancellations from outside. """ raise NotImplementedError @shield.setter def shield(self, value: bool) -> None: raise NotImplementedError def __enter__(self) -> CancelScope: raise NotImplementedError def __exit__( self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None, ) -> bool | None: raise NotImplementedError def open_cancel_scope(*, shield: bool = False) -> CancelScope: """ Open a cancel scope. :param shield: ``True`` to shield the cancel scope from external cancellation :return: a cancel scope .. deprecated:: 3.0 Use :class:`~CancelScope` directly. """ warn( "open_cancel_scope() is deprecated -- use CancelScope() directly", DeprecationWarning, ) return get_asynclib().CancelScope(shield=shield) class FailAfterContextManager(DeprecatedAsyncContextManager[CancelScope]): def __init__(self, cancel_scope: CancelScope): self._cancel_scope = cancel_scope def __enter__(self) -> CancelScope: return self._cancel_scope.__enter__() def __exit__( self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None, ) -> bool | None: retval = self._cancel_scope.__exit__(exc_type, exc_val, exc_tb) if self._cancel_scope.cancel_called: raise TimeoutError return retval def fail_after(delay: float | None, shield: bool = False) -> FailAfterContextManager: """ Create a context manager which raises a :class:`TimeoutError` if does not finish in time. :param delay: maximum allowed time (in seconds) before raising the exception, or ``None`` to disable the timeout :param shield: ``True`` to shield the cancel scope from external cancellation :return: a context manager that yields a cancel scope :rtype: :class:`~typing.ContextManager`\\[:class:`~anyio.CancelScope`\\] """ deadline = ( (get_asynclib().current_time() + delay) if delay is not None else math.inf ) cancel_scope = get_asynclib().CancelScope(deadline=deadline, shield=shield) return FailAfterContextManager(cancel_scope) def move_on_after(delay: float | None, shield: bool = False) -> CancelScope: """ Create a cancel scope with a deadline that expires after the given delay. :param delay: maximum allowed time (in seconds) before exiting the context block, or ``None`` to disable the timeout :param shield: ``True`` to shield the cancel scope from external cancellation :return: a cancel scope """ deadline = ( (get_asynclib().current_time() + delay) if delay is not None else math.inf ) return get_asynclib().CancelScope(deadline=deadline, shield=shield) def current_effective_deadline() -> DeprecatedAwaitableFloat: """ Return the nearest deadline among all the cancel scopes effective for the current task. :return: a clock value from the event loop's internal clock (or ``float('inf')`` if there is no deadline in effect, or ``float('-inf')`` if the current scope has been cancelled) :rtype: float """ return DeprecatedAwaitableFloat( get_asynclib().current_effective_deadline(), current_effective_deadline ) def create_task_group() -> TaskGroup: """ Create a task group. :return: a task group """ return get_asynclib().TaskGroup()
5,316
Python
28.37569
97
0.656132
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/anyio/_core/_signals.py
from __future__ import annotations from typing import AsyncIterator from ._compat import DeprecatedAsyncContextManager from ._eventloop import get_asynclib def open_signal_receiver( *signals: int, ) -> DeprecatedAsyncContextManager[AsyncIterator[int]]: """ Start receiving operating system signals. :param signals: signals to receive (e.g. ``signal.SIGINT``) :return: an asynchronous context manager for an asynchronous iterator which yields signal numbers .. warning:: Windows does not support signals natively so it is best to avoid relying on this in cross-platform applications. .. warning:: On asyncio, this permanently replaces any previous signal handler for the given signals, as set via :meth:`~asyncio.loop.add_signal_handler`. """ return get_asynclib().open_signal_receiver(*signals)
863
Python
30.999999
97
0.730012
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/anyio/_core/_streams.py
from __future__ import annotations import math from typing import Any, TypeVar, overload from ..streams.memory import ( MemoryObjectReceiveStream, MemoryObjectSendStream, MemoryObjectStreamState, ) T_Item = TypeVar("T_Item") @overload def create_memory_object_stream( max_buffer_size: float = ..., ) -> tuple[MemoryObjectSendStream[Any], MemoryObjectReceiveStream[Any]]: ... @overload def create_memory_object_stream( max_buffer_size: float = ..., item_type: type[T_Item] = ... ) -> tuple[MemoryObjectSendStream[T_Item], MemoryObjectReceiveStream[T_Item]]: ... def create_memory_object_stream( max_buffer_size: float = 0, item_type: type[T_Item] | None = None ) -> tuple[MemoryObjectSendStream[Any], MemoryObjectReceiveStream[Any]]: """ Create a memory object stream. :param max_buffer_size: number of items held in the buffer until ``send()`` starts blocking :param item_type: type of item, for marking the streams with the right generic type for static typing (not used at run time) :return: a tuple of (send stream, receive stream) """ if max_buffer_size != math.inf and not isinstance(max_buffer_size, int): raise ValueError("max_buffer_size must be either an integer or math.inf") if max_buffer_size < 0: raise ValueError("max_buffer_size cannot be negative") state: MemoryObjectStreamState = MemoryObjectStreamState(max_buffer_size) return MemoryObjectSendStream(state), MemoryObjectReceiveStream(state)
1,518
Python
30.645833
95
0.711462
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/anyio/_core/_synchronization.py
from __future__ import annotations from collections import deque from dataclasses import dataclass from types import TracebackType from warnings import warn from ..lowlevel import cancel_shielded_checkpoint, checkpoint, checkpoint_if_cancelled from ._compat import DeprecatedAwaitable from ._eventloop import get_asynclib from ._exceptions import BusyResourceError, WouldBlock from ._tasks import CancelScope from ._testing import TaskInfo, get_current_task @dataclass(frozen=True) class EventStatistics: """ :ivar int tasks_waiting: number of tasks waiting on :meth:`~.Event.wait` """ tasks_waiting: int @dataclass(frozen=True) class CapacityLimiterStatistics: """ :ivar int borrowed_tokens: number of tokens currently borrowed by tasks :ivar float total_tokens: total number of available tokens :ivar tuple borrowers: tasks or other objects currently holding tokens borrowed from this limiter :ivar int tasks_waiting: number of tasks waiting on :meth:`~.CapacityLimiter.acquire` or :meth:`~.CapacityLimiter.acquire_on_behalf_of` """ borrowed_tokens: int total_tokens: float borrowers: tuple[object, ...] tasks_waiting: int @dataclass(frozen=True) class LockStatistics: """ :ivar bool locked: flag indicating if this lock is locked or not :ivar ~anyio.TaskInfo owner: task currently holding the lock (or ``None`` if the lock is not held by any task) :ivar int tasks_waiting: number of tasks waiting on :meth:`~.Lock.acquire` """ locked: bool owner: TaskInfo | None tasks_waiting: int @dataclass(frozen=True) class ConditionStatistics: """ :ivar int tasks_waiting: number of tasks blocked on :meth:`~.Condition.wait` :ivar ~anyio.LockStatistics lock_statistics: statistics of the underlying :class:`~.Lock` """ tasks_waiting: int lock_statistics: LockStatistics @dataclass(frozen=True) class SemaphoreStatistics: """ :ivar int tasks_waiting: number of tasks waiting on :meth:`~.Semaphore.acquire` """ tasks_waiting: int class Event: def __new__(cls) -> Event: return get_asynclib().Event() def set(self) -> DeprecatedAwaitable: """Set the flag, notifying all listeners.""" raise NotImplementedError def is_set(self) -> bool: """Return ``True`` if the flag is set, ``False`` if not.""" raise NotImplementedError async def wait(self) -> None: """ Wait until the flag has been set. If the flag has already been set when this method is called, it returns immediately. """ raise NotImplementedError def statistics(self) -> EventStatistics: """Return statistics about the current state of this event.""" raise NotImplementedError class Lock: _owner_task: TaskInfo | None = None def __init__(self) -> None: self._waiters: deque[tuple[TaskInfo, Event]] = deque() async def __aenter__(self) -> None: await self.acquire() async def __aexit__( self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None, ) -> None: self.release() async def acquire(self) -> None: """Acquire the lock.""" await checkpoint_if_cancelled() try: self.acquire_nowait() except WouldBlock: task = get_current_task() event = Event() token = task, event self._waiters.append(token) try: await event.wait() except BaseException: if not event.is_set(): self._waiters.remove(token) elif self._owner_task == task: self.release() raise assert self._owner_task == task else: try: await cancel_shielded_checkpoint() except BaseException: self.release() raise def acquire_nowait(self) -> None: """ Acquire the lock, without blocking. :raises ~anyio.WouldBlock: if the operation would block """ task = get_current_task() if self._owner_task == task: raise RuntimeError("Attempted to acquire an already held Lock") if self._owner_task is not None: raise WouldBlock self._owner_task = task def release(self) -> DeprecatedAwaitable: """Release the lock.""" if self._owner_task != get_current_task(): raise RuntimeError("The current task is not holding this lock") if self._waiters: self._owner_task, event = self._waiters.popleft() event.set() else: del self._owner_task return DeprecatedAwaitable(self.release) def locked(self) -> bool: """Return True if the lock is currently held.""" return self._owner_task is not None def statistics(self) -> LockStatistics: """ Return statistics about the current state of this lock. .. versionadded:: 3.0 """ return LockStatistics(self.locked(), self._owner_task, len(self._waiters)) class Condition: _owner_task: TaskInfo | None = None def __init__(self, lock: Lock | None = None): self._lock = lock or Lock() self._waiters: deque[Event] = deque() async def __aenter__(self) -> None: await self.acquire() async def __aexit__( self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None, ) -> None: self.release() def _check_acquired(self) -> None: if self._owner_task != get_current_task(): raise RuntimeError("The current task is not holding the underlying lock") async def acquire(self) -> None: """Acquire the underlying lock.""" await self._lock.acquire() self._owner_task = get_current_task() def acquire_nowait(self) -> None: """ Acquire the underlying lock, without blocking. :raises ~anyio.WouldBlock: if the operation would block """ self._lock.acquire_nowait() self._owner_task = get_current_task() def release(self) -> DeprecatedAwaitable: """Release the underlying lock.""" self._lock.release() return DeprecatedAwaitable(self.release) def locked(self) -> bool: """Return True if the lock is set.""" return self._lock.locked() def notify(self, n: int = 1) -> None: """Notify exactly n listeners.""" self._check_acquired() for _ in range(n): try: event = self._waiters.popleft() except IndexError: break event.set() def notify_all(self) -> None: """Notify all the listeners.""" self._check_acquired() for event in self._waiters: event.set() self._waiters.clear() async def wait(self) -> None: """Wait for a notification.""" await checkpoint() event = Event() self._waiters.append(event) self.release() try: await event.wait() except BaseException: if not event.is_set(): self._waiters.remove(event) raise finally: with CancelScope(shield=True): await self.acquire() def statistics(self) -> ConditionStatistics: """ Return statistics about the current state of this condition. .. versionadded:: 3.0 """ return ConditionStatistics(len(self._waiters), self._lock.statistics()) class Semaphore: def __init__(self, initial_value: int, *, max_value: int | None = None): if not isinstance(initial_value, int): raise TypeError("initial_value must be an integer") if initial_value < 0: raise ValueError("initial_value must be >= 0") if max_value is not None: if not isinstance(max_value, int): raise TypeError("max_value must be an integer or None") if max_value < initial_value: raise ValueError( "max_value must be equal to or higher than initial_value" ) self._value = initial_value self._max_value = max_value self._waiters: deque[Event] = deque() async def __aenter__(self) -> Semaphore: await self.acquire() return self async def __aexit__( self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None, ) -> None: self.release() async def acquire(self) -> None: """Decrement the semaphore value, blocking if necessary.""" await checkpoint_if_cancelled() try: self.acquire_nowait() except WouldBlock: event = Event() self._waiters.append(event) try: await event.wait() except BaseException: if not event.is_set(): self._waiters.remove(event) else: self.release() raise else: try: await cancel_shielded_checkpoint() except BaseException: self.release() raise def acquire_nowait(self) -> None: """ Acquire the underlying lock, without blocking. :raises ~anyio.WouldBlock: if the operation would block """ if self._value == 0: raise WouldBlock self._value -= 1 def release(self) -> DeprecatedAwaitable: """Increment the semaphore value.""" if self._max_value is not None and self._value == self._max_value: raise ValueError("semaphore released too many times") if self._waiters: self._waiters.popleft().set() else: self._value += 1 return DeprecatedAwaitable(self.release) @property def value(self) -> int: """The current value of the semaphore.""" return self._value @property def max_value(self) -> int | None: """The maximum value of the semaphore.""" return self._max_value def statistics(self) -> SemaphoreStatistics: """ Return statistics about the current state of this semaphore. .. versionadded:: 3.0 """ return SemaphoreStatistics(len(self._waiters)) class CapacityLimiter: def __new__(cls, total_tokens: float) -> CapacityLimiter: return get_asynclib().CapacityLimiter(total_tokens) async def __aenter__(self) -> None: raise NotImplementedError async def __aexit__( self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None, ) -> bool | None: raise NotImplementedError @property def total_tokens(self) -> float: """ The total number of tokens available for borrowing. This is a read-write property. If the total number of tokens is increased, the proportionate number of tasks waiting on this limiter will be granted their tokens. .. versionchanged:: 3.0 The property is now writable. """ raise NotImplementedError @total_tokens.setter def total_tokens(self, value: float) -> None: raise NotImplementedError async def set_total_tokens(self, value: float) -> None: warn( "CapacityLimiter.set_total_tokens has been deprecated. Set the value of the" '"total_tokens" attribute directly.', DeprecationWarning, ) self.total_tokens = value @property def borrowed_tokens(self) -> int: """The number of tokens that have currently been borrowed.""" raise NotImplementedError @property def available_tokens(self) -> float: """The number of tokens currently available to be borrowed""" raise NotImplementedError def acquire_nowait(self) -> DeprecatedAwaitable: """ Acquire a token for the current task without waiting for one to become available. :raises ~anyio.WouldBlock: if there are no tokens available for borrowing """ raise NotImplementedError def acquire_on_behalf_of_nowait(self, borrower: object) -> DeprecatedAwaitable: """ Acquire a token without waiting for one to become available. :param borrower: the entity borrowing a token :raises ~anyio.WouldBlock: if there are no tokens available for borrowing """ raise NotImplementedError async def acquire(self) -> None: """ Acquire a token for the current task, waiting if necessary for one to become available. """ raise NotImplementedError async def acquire_on_behalf_of(self, borrower: object) -> None: """ Acquire a token, waiting if necessary for one to become available. :param borrower: the entity borrowing a token """ raise NotImplementedError def release(self) -> None: """ Release the token held by the current task. :raises RuntimeError: if the current task has not borrowed a token from this limiter. """ raise NotImplementedError def release_on_behalf_of(self, borrower: object) -> None: """ Release the token held by the given borrower. :raises RuntimeError: if the borrower has not borrowed a token from this limiter. """ raise NotImplementedError def statistics(self) -> CapacityLimiterStatistics: """ Return statistics about the current state of this limiter. .. versionadded:: 3.0 """ raise NotImplementedError def create_lock() -> Lock: """ Create an asynchronous lock. :return: a lock object .. deprecated:: 3.0 Use :class:`~Lock` directly. """ warn("create_lock() is deprecated -- use Lock() directly", DeprecationWarning) return Lock() def create_condition(lock: Lock | None = None) -> Condition: """ Create an asynchronous condition. :param lock: the lock to base the condition object on :return: a condition object .. deprecated:: 3.0 Use :class:`~Condition` directly. """ warn( "create_condition() is deprecated -- use Condition() directly", DeprecationWarning, ) return Condition(lock=lock) def create_event() -> Event: """ Create an asynchronous event object. :return: an event object .. deprecated:: 3.0 Use :class:`~Event` directly. """ warn("create_event() is deprecated -- use Event() directly", DeprecationWarning) return get_asynclib().Event() def create_semaphore(value: int, *, max_value: int | None = None) -> Semaphore: """ Create an asynchronous semaphore. :param value: the semaphore's initial value :param max_value: if set, makes this a "bounded" semaphore that raises :exc:`ValueError` if the semaphore's value would exceed this number :return: a semaphore object .. deprecated:: 3.0 Use :class:`~Semaphore` directly. """ warn( "create_semaphore() is deprecated -- use Semaphore() directly", DeprecationWarning, ) return Semaphore(value, max_value=max_value) def create_capacity_limiter(total_tokens: float) -> CapacityLimiter: """ Create a capacity limiter. :param total_tokens: the total number of tokens available for borrowing (can be an integer or :data:`math.inf`) :return: a capacity limiter object .. deprecated:: 3.0 Use :class:`~CapacityLimiter` directly. """ warn( "create_capacity_limiter() is deprecated -- use CapacityLimiter() directly", DeprecationWarning, ) return get_asynclib().CapacityLimiter(total_tokens) class ResourceGuard: __slots__ = "action", "_guarded" def __init__(self, action: str): self.action = action self._guarded = False def __enter__(self) -> None: if self._guarded: raise BusyResourceError(self.action) self._guarded = True def __exit__( self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None, ) -> bool | None: self._guarded = False return None
16,747
Python
27.053601
99
0.597122
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/anyio/_core/_typedattr.py
from __future__ import annotations import sys from typing import Any, Callable, Mapping, TypeVar, overload from ._exceptions import TypedAttributeLookupError if sys.version_info >= (3, 8): from typing import final else: from typing_extensions import final T_Attr = TypeVar("T_Attr") T_Default = TypeVar("T_Default") undefined = object() def typed_attribute() -> Any: """Return a unique object, used to mark typed attributes.""" return object() class TypedAttributeSet: """ Superclass for typed attribute collections. Checks that every public attribute of every subclass has a type annotation. """ def __init_subclass__(cls) -> None: annotations: dict[str, Any] = getattr(cls, "__annotations__", {}) for attrname in dir(cls): if not attrname.startswith("_") and attrname not in annotations: raise TypeError( f"Attribute {attrname!r} is missing its type annotation" ) super().__init_subclass__() class TypedAttributeProvider: """Base class for classes that wish to provide typed extra attributes.""" @property def extra_attributes(self) -> Mapping[T_Attr, Callable[[], T_Attr]]: """ A mapping of the extra attributes to callables that return the corresponding values. If the provider wraps another provider, the attributes from that wrapper should also be included in the returned mapping (but the wrapper may override the callables from the wrapped instance). """ return {} @overload def extra(self, attribute: T_Attr) -> T_Attr: ... @overload def extra(self, attribute: T_Attr, default: T_Default) -> T_Attr | T_Default: ... @final def extra(self, attribute: Any, default: object = undefined) -> object: """ extra(attribute, default=undefined) Return the value of the given typed extra attribute. :param attribute: the attribute (member of a :class:`~TypedAttributeSet`) to look for :param default: the value that should be returned if no value is found for the attribute :raises ~anyio.TypedAttributeLookupError: if the search failed and no default value was given """ try: return self.extra_attributes[attribute]() except KeyError: if default is undefined: raise TypedAttributeLookupError("Attribute not found") from None else: return default
2,551
Python
29.380952
96
0.638965
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/anyio/_core/_fileio.py
from __future__ import annotations import os import pathlib import sys from dataclasses import dataclass from functools import partial from os import PathLike from typing import ( IO, TYPE_CHECKING, Any, AnyStr, AsyncIterator, Callable, Generic, Iterable, Iterator, Sequence, cast, overload, ) from .. import to_thread from ..abc import AsyncResource if sys.version_info >= (3, 8): from typing import Final else: from typing_extensions import Final if TYPE_CHECKING: from _typeshed import OpenBinaryMode, OpenTextMode, ReadableBuffer, WriteableBuffer else: ReadableBuffer = OpenBinaryMode = OpenTextMode = WriteableBuffer = object class AsyncFile(AsyncResource, Generic[AnyStr]): """ An asynchronous file object. This class wraps a standard file object and provides async friendly versions of the following blocking methods (where available on the original file object): * read * read1 * readline * readlines * readinto * readinto1 * write * writelines * truncate * seek * tell * flush All other methods are directly passed through. This class supports the asynchronous context manager protocol which closes the underlying file at the end of the context block. This class also supports asynchronous iteration:: async with await open_file(...) as f: async for line in f: print(line) """ def __init__(self, fp: IO[AnyStr]) -> None: self._fp: Any = fp def __getattr__(self, name: str) -> object: return getattr(self._fp, name) @property def wrapped(self) -> IO[AnyStr]: """The wrapped file object.""" return self._fp async def __aiter__(self) -> AsyncIterator[AnyStr]: while True: line = await self.readline() if line: yield line else: break async def aclose(self) -> None: return await to_thread.run_sync(self._fp.close) async def read(self, size: int = -1) -> AnyStr: return await to_thread.run_sync(self._fp.read, size) async def read1(self: AsyncFile[bytes], size: int = -1) -> bytes: return await to_thread.run_sync(self._fp.read1, size) async def readline(self) -> AnyStr: return await to_thread.run_sync(self._fp.readline) async def readlines(self) -> list[AnyStr]: return await to_thread.run_sync(self._fp.readlines) async def readinto(self: AsyncFile[bytes], b: WriteableBuffer) -> bytes: return await to_thread.run_sync(self._fp.readinto, b) async def readinto1(self: AsyncFile[bytes], b: WriteableBuffer) -> bytes: return await to_thread.run_sync(self._fp.readinto1, b) @overload async def write(self: AsyncFile[bytes], b: ReadableBuffer) -> int: ... @overload async def write(self: AsyncFile[str], b: str) -> int: ... async def write(self, b: ReadableBuffer | str) -> int: return await to_thread.run_sync(self._fp.write, b) @overload async def writelines( self: AsyncFile[bytes], lines: Iterable[ReadableBuffer] ) -> None: ... @overload async def writelines(self: AsyncFile[str], lines: Iterable[str]) -> None: ... async def writelines(self, lines: Iterable[ReadableBuffer] | Iterable[str]) -> None: return await to_thread.run_sync(self._fp.writelines, lines) async def truncate(self, size: int | None = None) -> int: return await to_thread.run_sync(self._fp.truncate, size) async def seek(self, offset: int, whence: int | None = os.SEEK_SET) -> int: return await to_thread.run_sync(self._fp.seek, offset, whence) async def tell(self) -> int: return await to_thread.run_sync(self._fp.tell) async def flush(self) -> None: return await to_thread.run_sync(self._fp.flush) @overload async def open_file( file: str | PathLike[str] | int, mode: OpenBinaryMode, buffering: int = ..., encoding: str | None = ..., errors: str | None = ..., newline: str | None = ..., closefd: bool = ..., opener: Callable[[str, int], int] | None = ..., ) -> AsyncFile[bytes]: ... @overload async def open_file( file: str | PathLike[str] | int, mode: OpenTextMode = ..., buffering: int = ..., encoding: str | None = ..., errors: str | None = ..., newline: str | None = ..., closefd: bool = ..., opener: Callable[[str, int], int] | None = ..., ) -> AsyncFile[str]: ... async def open_file( file: str | PathLike[str] | int, mode: str = "r", buffering: int = -1, encoding: str | None = None, errors: str | None = None, newline: str | None = None, closefd: bool = True, opener: Callable[[str, int], int] | None = None, ) -> AsyncFile[Any]: """ Open a file asynchronously. The arguments are exactly the same as for the builtin :func:`open`. :return: an asynchronous file object """ fp = await to_thread.run_sync( open, file, mode, buffering, encoding, errors, newline, closefd, opener ) return AsyncFile(fp) def wrap_file(file: IO[AnyStr]) -> AsyncFile[AnyStr]: """ Wrap an existing file as an asynchronous file. :param file: an existing file-like object :return: an asynchronous file object """ return AsyncFile(file) @dataclass(eq=False) class _PathIterator(AsyncIterator["Path"]): iterator: Iterator[PathLike[str]] async def __anext__(self) -> Path: nextval = await to_thread.run_sync(next, self.iterator, None, cancellable=True) if nextval is None: raise StopAsyncIteration from None return Path(cast("PathLike[str]", nextval)) class Path: """ An asynchronous version of :class:`pathlib.Path`. This class cannot be substituted for :class:`pathlib.Path` or :class:`pathlib.PurePath`, but it is compatible with the :class:`os.PathLike` interface. It implements the Python 3.10 version of :class:`pathlib.Path` interface, except for the deprecated :meth:`~pathlib.Path.link_to` method. Any methods that do disk I/O need to be awaited on. These methods are: * :meth:`~pathlib.Path.absolute` * :meth:`~pathlib.Path.chmod` * :meth:`~pathlib.Path.cwd` * :meth:`~pathlib.Path.exists` * :meth:`~pathlib.Path.expanduser` * :meth:`~pathlib.Path.group` * :meth:`~pathlib.Path.hardlink_to` * :meth:`~pathlib.Path.home` * :meth:`~pathlib.Path.is_block_device` * :meth:`~pathlib.Path.is_char_device` * :meth:`~pathlib.Path.is_dir` * :meth:`~pathlib.Path.is_fifo` * :meth:`~pathlib.Path.is_file` * :meth:`~pathlib.Path.is_mount` * :meth:`~pathlib.Path.lchmod` * :meth:`~pathlib.Path.lstat` * :meth:`~pathlib.Path.mkdir` * :meth:`~pathlib.Path.open` * :meth:`~pathlib.Path.owner` * :meth:`~pathlib.Path.read_bytes` * :meth:`~pathlib.Path.read_text` * :meth:`~pathlib.Path.readlink` * :meth:`~pathlib.Path.rename` * :meth:`~pathlib.Path.replace` * :meth:`~pathlib.Path.rmdir` * :meth:`~pathlib.Path.samefile` * :meth:`~pathlib.Path.stat` * :meth:`~pathlib.Path.touch` * :meth:`~pathlib.Path.unlink` * :meth:`~pathlib.Path.write_bytes` * :meth:`~pathlib.Path.write_text` Additionally, the following methods return an async iterator yielding :class:`~.Path` objects: * :meth:`~pathlib.Path.glob` * :meth:`~pathlib.Path.iterdir` * :meth:`~pathlib.Path.rglob` """ __slots__ = "_path", "__weakref__" __weakref__: Any def __init__(self, *args: str | PathLike[str]) -> None: self._path: Final[pathlib.Path] = pathlib.Path(*args) def __fspath__(self) -> str: return self._path.__fspath__() def __str__(self) -> str: return self._path.__str__() def __repr__(self) -> str: return f"{self.__class__.__name__}({self.as_posix()!r})" def __bytes__(self) -> bytes: return self._path.__bytes__() def __hash__(self) -> int: return self._path.__hash__() def __eq__(self, other: object) -> bool: target = other._path if isinstance(other, Path) else other return self._path.__eq__(target) def __lt__(self, other: Path) -> bool: target = other._path if isinstance(other, Path) else other return self._path.__lt__(target) def __le__(self, other: Path) -> bool: target = other._path if isinstance(other, Path) else other return self._path.__le__(target) def __gt__(self, other: Path) -> bool: target = other._path if isinstance(other, Path) else other return self._path.__gt__(target) def __ge__(self, other: Path) -> bool: target = other._path if isinstance(other, Path) else other return self._path.__ge__(target) def __truediv__(self, other: Any) -> Path: return Path(self._path / other) def __rtruediv__(self, other: Any) -> Path: return Path(other) / self @property def parts(self) -> tuple[str, ...]: return self._path.parts @property def drive(self) -> str: return self._path.drive @property def root(self) -> str: return self._path.root @property def anchor(self) -> str: return self._path.anchor @property def parents(self) -> Sequence[Path]: return tuple(Path(p) for p in self._path.parents) @property def parent(self) -> Path: return Path(self._path.parent) @property def name(self) -> str: return self._path.name @property def suffix(self) -> str: return self._path.suffix @property def suffixes(self) -> list[str]: return self._path.suffixes @property def stem(self) -> str: return self._path.stem async def absolute(self) -> Path: path = await to_thread.run_sync(self._path.absolute) return Path(path) def as_posix(self) -> str: return self._path.as_posix() def as_uri(self) -> str: return self._path.as_uri() def match(self, path_pattern: str) -> bool: return self._path.match(path_pattern) def is_relative_to(self, *other: str | PathLike[str]) -> bool: try: self.relative_to(*other) return True except ValueError: return False async def chmod(self, mode: int, *, follow_symlinks: bool = True) -> None: func = partial(os.chmod, follow_symlinks=follow_symlinks) return await to_thread.run_sync(func, self._path, mode) @classmethod async def cwd(cls) -> Path: path = await to_thread.run_sync(pathlib.Path.cwd) return cls(path) async def exists(self) -> bool: return await to_thread.run_sync(self._path.exists, cancellable=True) async def expanduser(self) -> Path: return Path(await to_thread.run_sync(self._path.expanduser, cancellable=True)) def glob(self, pattern: str) -> AsyncIterator[Path]: gen = self._path.glob(pattern) return _PathIterator(gen) async def group(self) -> str: return await to_thread.run_sync(self._path.group, cancellable=True) async def hardlink_to(self, target: str | pathlib.Path | Path) -> None: if isinstance(target, Path): target = target._path await to_thread.run_sync(os.link, target, self) @classmethod async def home(cls) -> Path: home_path = await to_thread.run_sync(pathlib.Path.home) return cls(home_path) def is_absolute(self) -> bool: return self._path.is_absolute() async def is_block_device(self) -> bool: return await to_thread.run_sync(self._path.is_block_device, cancellable=True) async def is_char_device(self) -> bool: return await to_thread.run_sync(self._path.is_char_device, cancellable=True) async def is_dir(self) -> bool: return await to_thread.run_sync(self._path.is_dir, cancellable=True) async def is_fifo(self) -> bool: return await to_thread.run_sync(self._path.is_fifo, cancellable=True) async def is_file(self) -> bool: return await to_thread.run_sync(self._path.is_file, cancellable=True) async def is_mount(self) -> bool: return await to_thread.run_sync(os.path.ismount, self._path, cancellable=True) def is_reserved(self) -> bool: return self._path.is_reserved() async def is_socket(self) -> bool: return await to_thread.run_sync(self._path.is_socket, cancellable=True) async def is_symlink(self) -> bool: return await to_thread.run_sync(self._path.is_symlink, cancellable=True) def iterdir(self) -> AsyncIterator[Path]: gen = self._path.iterdir() return _PathIterator(gen) def joinpath(self, *args: str | PathLike[str]) -> Path: return Path(self._path.joinpath(*args)) async def lchmod(self, mode: int) -> None: await to_thread.run_sync(self._path.lchmod, mode) async def lstat(self) -> os.stat_result: return await to_thread.run_sync(self._path.lstat, cancellable=True) async def mkdir( self, mode: int = 0o777, parents: bool = False, exist_ok: bool = False ) -> None: await to_thread.run_sync(self._path.mkdir, mode, parents, exist_ok) @overload async def open( self, mode: OpenBinaryMode, buffering: int = ..., encoding: str | None = ..., errors: str | None = ..., newline: str | None = ..., ) -> AsyncFile[bytes]: ... @overload async def open( self, mode: OpenTextMode = ..., buffering: int = ..., encoding: str | None = ..., errors: str | None = ..., newline: str | None = ..., ) -> AsyncFile[str]: ... async def open( self, mode: str = "r", buffering: int = -1, encoding: str | None = None, errors: str | None = None, newline: str | None = None, ) -> AsyncFile[Any]: fp = await to_thread.run_sync( self._path.open, mode, buffering, encoding, errors, newline ) return AsyncFile(fp) async def owner(self) -> str: return await to_thread.run_sync(self._path.owner, cancellable=True) async def read_bytes(self) -> bytes: return await to_thread.run_sync(self._path.read_bytes) async def read_text( self, encoding: str | None = None, errors: str | None = None ) -> str: return await to_thread.run_sync(self._path.read_text, encoding, errors) def relative_to(self, *other: str | PathLike[str]) -> Path: return Path(self._path.relative_to(*other)) async def readlink(self) -> Path: target = await to_thread.run_sync(os.readlink, self._path) return Path(cast(str, target)) async def rename(self, target: str | pathlib.PurePath | Path) -> Path: if isinstance(target, Path): target = target._path await to_thread.run_sync(self._path.rename, target) return Path(target) async def replace(self, target: str | pathlib.PurePath | Path) -> Path: if isinstance(target, Path): target = target._path await to_thread.run_sync(self._path.replace, target) return Path(target) async def resolve(self, strict: bool = False) -> Path: func = partial(self._path.resolve, strict=strict) return Path(await to_thread.run_sync(func, cancellable=True)) def rglob(self, pattern: str) -> AsyncIterator[Path]: gen = self._path.rglob(pattern) return _PathIterator(gen) async def rmdir(self) -> None: await to_thread.run_sync(self._path.rmdir) async def samefile( self, other_path: str | bytes | int | pathlib.Path | Path ) -> bool: if isinstance(other_path, Path): other_path = other_path._path return await to_thread.run_sync( self._path.samefile, other_path, cancellable=True ) async def stat(self, *, follow_symlinks: bool = True) -> os.stat_result: func = partial(os.stat, follow_symlinks=follow_symlinks) return await to_thread.run_sync(func, self._path, cancellable=True) async def symlink_to( self, target: str | pathlib.Path | Path, target_is_directory: bool = False, ) -> None: if isinstance(target, Path): target = target._path await to_thread.run_sync(self._path.symlink_to, target, target_is_directory) async def touch(self, mode: int = 0o666, exist_ok: bool = True) -> None: await to_thread.run_sync(self._path.touch, mode, exist_ok) async def unlink(self, missing_ok: bool = False) -> None: try: await to_thread.run_sync(self._path.unlink) except FileNotFoundError: if not missing_ok: raise def with_name(self, name: str) -> Path: return Path(self._path.with_name(name)) def with_stem(self, stem: str) -> Path: return Path(self._path.with_name(stem + self._path.suffix)) def with_suffix(self, suffix: str) -> Path: return Path(self._path.with_suffix(suffix)) async def write_bytes(self, data: bytes) -> int: return await to_thread.run_sync(self._path.write_bytes, data) async def write_text( self, data: str, encoding: str | None = None, errors: str | None = None, newline: str | None = None, ) -> int: # Path.write_text() does not support the "newline" parameter before Python 3.10 def sync_write_text() -> int: with self._path.open( "w", encoding=encoding, errors=errors, newline=newline ) as fp: return fp.write(data) return await to_thread.run_sync(sync_write_text) PathLike.register(Path)
18,026
Python
28.846026
98
0.605681
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/anyio/_core/_sockets.py
from __future__ import annotations import socket import ssl import sys from ipaddress import IPv6Address, ip_address from os import PathLike, chmod from pathlib import Path from socket import AddressFamily, SocketKind from typing import Awaitable, List, Tuple, cast, overload from .. import to_thread from ..abc import ( ConnectedUDPSocket, IPAddressType, IPSockAddrType, SocketListener, SocketStream, UDPSocket, UNIXSocketStream, ) from ..streams.stapled import MultiListener from ..streams.tls import TLSStream from ._eventloop import get_asynclib from ._resources import aclose_forcefully from ._synchronization import Event from ._tasks import create_task_group, move_on_after if sys.version_info >= (3, 8): from typing import Literal else: from typing_extensions import Literal IPPROTO_IPV6 = getattr(socket, "IPPROTO_IPV6", 41) # https://bugs.python.org/issue29515 GetAddrInfoReturnType = List[ Tuple[AddressFamily, SocketKind, int, str, Tuple[str, int]] ] AnyIPAddressFamily = Literal[ AddressFamily.AF_UNSPEC, AddressFamily.AF_INET, AddressFamily.AF_INET6 ] IPAddressFamily = Literal[AddressFamily.AF_INET, AddressFamily.AF_INET6] # tls_hostname given @overload async def connect_tcp( remote_host: IPAddressType, remote_port: int, *, local_host: IPAddressType | None = ..., ssl_context: ssl.SSLContext | None = ..., tls_standard_compatible: bool = ..., tls_hostname: str, happy_eyeballs_delay: float = ..., ) -> TLSStream: ... # ssl_context given @overload async def connect_tcp( remote_host: IPAddressType, remote_port: int, *, local_host: IPAddressType | None = ..., ssl_context: ssl.SSLContext, tls_standard_compatible: bool = ..., tls_hostname: str | None = ..., happy_eyeballs_delay: float = ..., ) -> TLSStream: ... # tls=True @overload async def connect_tcp( remote_host: IPAddressType, remote_port: int, *, local_host: IPAddressType | None = ..., tls: Literal[True], ssl_context: ssl.SSLContext | None = ..., tls_standard_compatible: bool = ..., tls_hostname: str | None = ..., happy_eyeballs_delay: float = ..., ) -> TLSStream: ... # tls=False @overload async def connect_tcp( remote_host: IPAddressType, remote_port: int, *, local_host: IPAddressType | None = ..., tls: Literal[False], ssl_context: ssl.SSLContext | None = ..., tls_standard_compatible: bool = ..., tls_hostname: str | None = ..., happy_eyeballs_delay: float = ..., ) -> SocketStream: ... # No TLS arguments @overload async def connect_tcp( remote_host: IPAddressType, remote_port: int, *, local_host: IPAddressType | None = ..., happy_eyeballs_delay: float = ..., ) -> SocketStream: ... async def connect_tcp( remote_host: IPAddressType, remote_port: int, *, local_host: IPAddressType | None = None, tls: bool = False, ssl_context: ssl.SSLContext | None = None, tls_standard_compatible: bool = True, tls_hostname: str | None = None, happy_eyeballs_delay: float = 0.25, ) -> SocketStream | TLSStream: """ Connect to a host using the TCP protocol. This function implements the stateless version of the Happy Eyeballs algorithm (RFC 6555). If ``remote_host`` is a host name that resolves to multiple IP addresses, each one is tried until one connection attempt succeeds. If the first attempt does not connected within 250 milliseconds, a second attempt is started using the next address in the list, and so on. On IPv6 enabled systems, an IPv6 address (if available) is tried first. When the connection has been established, a TLS handshake will be done if either ``ssl_context`` or ``tls_hostname`` is not ``None``, or if ``tls`` is ``True``. :param remote_host: the IP address or host name to connect to :param remote_port: port on the target host to connect to :param local_host: the interface address or name to bind the socket to before connecting :param tls: ``True`` to do a TLS handshake with the connected stream and return a :class:`~anyio.streams.tls.TLSStream` instead :param ssl_context: the SSL context object to use (if omitted, a default context is created) :param tls_standard_compatible: If ``True``, performs the TLS shutdown handshake before closing the stream and requires that the server does this as well. Otherwise, :exc:`~ssl.SSLEOFError` may be raised during reads from the stream. Some protocols, such as HTTP, require this option to be ``False``. See :meth:`~ssl.SSLContext.wrap_socket` for details. :param tls_hostname: host name to check the server certificate against (defaults to the value of ``remote_host``) :param happy_eyeballs_delay: delay (in seconds) before starting the next connection attempt :return: a socket stream object if no TLS handshake was done, otherwise a TLS stream :raises OSError: if the connection attempt fails """ # Placed here due to https://github.com/python/mypy/issues/7057 connected_stream: SocketStream | None = None async def try_connect(remote_host: str, event: Event) -> None: nonlocal connected_stream try: stream = await asynclib.connect_tcp(remote_host, remote_port, local_address) except OSError as exc: oserrors.append(exc) return else: if connected_stream is None: connected_stream = stream tg.cancel_scope.cancel() else: await stream.aclose() finally: event.set() asynclib = get_asynclib() local_address: IPSockAddrType | None = None family = socket.AF_UNSPEC if local_host: gai_res = await getaddrinfo(str(local_host), None) family, *_, local_address = gai_res[0] target_host = str(remote_host) try: addr_obj = ip_address(remote_host) except ValueError: # getaddrinfo() will raise an exception if name resolution fails gai_res = await getaddrinfo( target_host, remote_port, family=family, type=socket.SOCK_STREAM ) # Organize the list so that the first address is an IPv6 address (if available) and the # second one is an IPv4 addresses. The rest can be in whatever order. v6_found = v4_found = False target_addrs: list[tuple[socket.AddressFamily, str]] = [] for af, *rest, sa in gai_res: if af == socket.AF_INET6 and not v6_found: v6_found = True target_addrs.insert(0, (af, sa[0])) elif af == socket.AF_INET and not v4_found and v6_found: v4_found = True target_addrs.insert(1, (af, sa[0])) else: target_addrs.append((af, sa[0])) else: if isinstance(addr_obj, IPv6Address): target_addrs = [(socket.AF_INET6, addr_obj.compressed)] else: target_addrs = [(socket.AF_INET, addr_obj.compressed)] oserrors: list[OSError] = [] async with create_task_group() as tg: for i, (af, addr) in enumerate(target_addrs): event = Event() tg.start_soon(try_connect, addr, event) with move_on_after(happy_eyeballs_delay): await event.wait() if connected_stream is None: cause = oserrors[0] if len(oserrors) == 1 else asynclib.ExceptionGroup(oserrors) raise OSError("All connection attempts failed") from cause if tls or tls_hostname or ssl_context: try: return await TLSStream.wrap( connected_stream, server_side=False, hostname=tls_hostname or str(remote_host), ssl_context=ssl_context, standard_compatible=tls_standard_compatible, ) except BaseException: await aclose_forcefully(connected_stream) raise return connected_stream async def connect_unix(path: str | PathLike[str]) -> UNIXSocketStream: """ Connect to the given UNIX socket. Not available on Windows. :param path: path to the socket :return: a socket stream object """ path = str(Path(path)) return await get_asynclib().connect_unix(path) async def create_tcp_listener( *, local_host: IPAddressType | None = None, local_port: int = 0, family: AnyIPAddressFamily = socket.AddressFamily.AF_UNSPEC, backlog: int = 65536, reuse_port: bool = False, ) -> MultiListener[SocketStream]: """ Create a TCP socket listener. :param local_port: port number to listen on :param local_host: IP address of the interface to listen on. If omitted, listen on all IPv4 and IPv6 interfaces. To listen on all interfaces on a specific address family, use ``0.0.0.0`` for IPv4 or ``::`` for IPv6. :param family: address family (used if ``local_host`` was omitted) :param backlog: maximum number of queued incoming connections (up to a maximum of 2**16, or 65536) :param reuse_port: ``True`` to allow multiple sockets to bind to the same address/port (not supported on Windows) :return: a list of listener objects """ asynclib = get_asynclib() backlog = min(backlog, 65536) local_host = str(local_host) if local_host is not None else None gai_res = await getaddrinfo( local_host, # type: ignore[arg-type] local_port, family=family, type=socket.SocketKind.SOCK_STREAM if sys.platform == "win32" else 0, flags=socket.AI_PASSIVE | socket.AI_ADDRCONFIG, ) listeners: list[SocketListener] = [] try: # The set() is here to work around a glibc bug: # https://sourceware.org/bugzilla/show_bug.cgi?id=14969 sockaddr: tuple[str, int] | tuple[str, int, int, int] for fam, kind, *_, sockaddr in sorted(set(gai_res)): # Workaround for an uvloop bug where we don't get the correct scope ID for # IPv6 link-local addresses when passing type=socket.SOCK_STREAM to # getaddrinfo(): https://github.com/MagicStack/uvloop/issues/539 if sys.platform != "win32" and kind is not SocketKind.SOCK_STREAM: continue raw_socket = socket.socket(fam) raw_socket.setblocking(False) # For Windows, enable exclusive address use. For others, enable address reuse. if sys.platform == "win32": raw_socket.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1) else: raw_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) if reuse_port: raw_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) # If only IPv6 was requested, disable dual stack operation if fam == socket.AF_INET6: raw_socket.setsockopt(IPPROTO_IPV6, socket.IPV6_V6ONLY, 1) # Workaround for #554 if "%" in sockaddr[0]: addr, scope_id = sockaddr[0].split("%", 1) sockaddr = (addr, sockaddr[1], 0, int(scope_id)) raw_socket.bind(sockaddr) raw_socket.listen(backlog) listener = asynclib.TCPSocketListener(raw_socket) listeners.append(listener) except BaseException: for listener in listeners: await listener.aclose() raise return MultiListener(listeners) async def create_unix_listener( path: str | PathLike[str], *, mode: int | None = None, backlog: int = 65536, ) -> SocketListener: """ Create a UNIX socket listener. Not available on Windows. :param path: path of the socket :param mode: permissions to set on the socket :param backlog: maximum number of queued incoming connections (up to a maximum of 2**16, or 65536) :return: a listener object .. versionchanged:: 3.0 If a socket already exists on the file system in the given path, it will be removed first. """ path_str = str(path) path = Path(path) if path.is_socket(): path.unlink() backlog = min(backlog, 65536) raw_socket = socket.socket(socket.AF_UNIX) raw_socket.setblocking(False) try: await to_thread.run_sync(raw_socket.bind, path_str, cancellable=True) if mode is not None: await to_thread.run_sync(chmod, path_str, mode, cancellable=True) raw_socket.listen(backlog) return get_asynclib().UNIXSocketListener(raw_socket) except BaseException: raw_socket.close() raise async def create_udp_socket( family: AnyIPAddressFamily = AddressFamily.AF_UNSPEC, *, local_host: IPAddressType | None = None, local_port: int = 0, reuse_port: bool = False, ) -> UDPSocket: """ Create a UDP socket. If ``local_port`` has been given, the socket will be bound to this port on the local machine, making this socket suitable for providing UDP based services. :param family: address family (``AF_INET`` or ``AF_INET6``) – automatically determined from ``local_host`` if omitted :param local_host: IP address or host name of the local interface to bind to :param local_port: local port to bind to :param reuse_port: ``True`` to allow multiple sockets to bind to the same address/port (not supported on Windows) :return: a UDP socket """ if family is AddressFamily.AF_UNSPEC and not local_host: raise ValueError('Either "family" or "local_host" must be given') if local_host: gai_res = await getaddrinfo( str(local_host), local_port, family=family, type=socket.SOCK_DGRAM, flags=socket.AI_PASSIVE | socket.AI_ADDRCONFIG, ) family = cast(AnyIPAddressFamily, gai_res[0][0]) local_address = gai_res[0][-1] elif family is AddressFamily.AF_INET6: local_address = ("::", 0) else: local_address = ("0.0.0.0", 0) return await get_asynclib().create_udp_socket( family, local_address, None, reuse_port ) async def create_connected_udp_socket( remote_host: IPAddressType, remote_port: int, *, family: AnyIPAddressFamily = AddressFamily.AF_UNSPEC, local_host: IPAddressType | None = None, local_port: int = 0, reuse_port: bool = False, ) -> ConnectedUDPSocket: """ Create a connected UDP socket. Connected UDP sockets can only communicate with the specified remote host/port, and any packets sent from other sources are dropped. :param remote_host: remote host to set as the default target :param remote_port: port on the remote host to set as the default target :param family: address family (``AF_INET`` or ``AF_INET6``) – automatically determined from ``local_host`` or ``remote_host`` if omitted :param local_host: IP address or host name of the local interface to bind to :param local_port: local port to bind to :param reuse_port: ``True`` to allow multiple sockets to bind to the same address/port (not supported on Windows) :return: a connected UDP socket """ local_address = None if local_host: gai_res = await getaddrinfo( str(local_host), local_port, family=family, type=socket.SOCK_DGRAM, flags=socket.AI_PASSIVE | socket.AI_ADDRCONFIG, ) family = cast(AnyIPAddressFamily, gai_res[0][0]) local_address = gai_res[0][-1] gai_res = await getaddrinfo( str(remote_host), remote_port, family=family, type=socket.SOCK_DGRAM ) family = cast(AnyIPAddressFamily, gai_res[0][0]) remote_address = gai_res[0][-1] return await get_asynclib().create_udp_socket( family, local_address, remote_address, reuse_port ) async def getaddrinfo( host: bytearray | bytes | str, port: str | int | None, *, family: int | AddressFamily = 0, type: int | SocketKind = 0, proto: int = 0, flags: int = 0, ) -> GetAddrInfoReturnType: """ Look up a numeric IP address given a host name. Internationalized domain names are translated according to the (non-transitional) IDNA 2008 standard. .. note:: 4-tuple IPv6 socket addresses are automatically converted to 2-tuples of (host, port), unlike what :func:`socket.getaddrinfo` does. :param host: host name :param port: port number :param family: socket family (`'AF_INET``, ...) :param type: socket type (``SOCK_STREAM``, ...) :param proto: protocol number :param flags: flags to pass to upstream ``getaddrinfo()`` :return: list of tuples containing (family, type, proto, canonname, sockaddr) .. seealso:: :func:`socket.getaddrinfo` """ # Handle unicode hostnames if isinstance(host, str): try: encoded_host = host.encode("ascii") except UnicodeEncodeError: import idna encoded_host = idna.encode(host, uts46=True) else: encoded_host = host gai_res = await get_asynclib().getaddrinfo( encoded_host, port, family=family, type=type, proto=proto, flags=flags ) return [ (family, type, proto, canonname, convert_ipv6_sockaddr(sockaddr)) for family, type, proto, canonname, sockaddr in gai_res ] def getnameinfo(sockaddr: IPSockAddrType, flags: int = 0) -> Awaitable[tuple[str, str]]: """ Look up the host name of an IP address. :param sockaddr: socket address (e.g. (ipaddress, port) for IPv4) :param flags: flags to pass to upstream ``getnameinfo()`` :return: a tuple of (host name, service name) .. seealso:: :func:`socket.getnameinfo` """ return get_asynclib().getnameinfo(sockaddr, flags) def wait_socket_readable(sock: socket.socket) -> Awaitable[None]: """ Wait until the given socket has data to be read. This does **NOT** work on Windows when using the asyncio backend with a proactor event loop (default on py3.8+). .. warning:: Only use this on raw sockets that have not been wrapped by any higher level constructs like socket streams! :param sock: a socket object :raises ~anyio.ClosedResourceError: if the socket was closed while waiting for the socket to become readable :raises ~anyio.BusyResourceError: if another task is already waiting for the socket to become readable """ return get_asynclib().wait_socket_readable(sock) def wait_socket_writable(sock: socket.socket) -> Awaitable[None]: """ Wait until the given socket can be written to. This does **NOT** work on Windows when using the asyncio backend with a proactor event loop (default on py3.8+). .. warning:: Only use this on raw sockets that have not been wrapped by any higher level constructs like socket streams! :param sock: a socket object :raises ~anyio.ClosedResourceError: if the socket was closed while waiting for the socket to become writable :raises ~anyio.BusyResourceError: if another task is already waiting for the socket to become writable """ return get_asynclib().wait_socket_writable(sock) # # Private API # def convert_ipv6_sockaddr( sockaddr: tuple[str, int, int, int] | tuple[str, int] ) -> tuple[str, int]: """ Convert a 4-tuple IPv6 socket address to a 2-tuple (address, port) format. If the scope ID is nonzero, it is added to the address, separated with ``%``. Otherwise the flow id and scope id are simply cut off from the tuple. Any other kinds of socket addresses are returned as-is. :param sockaddr: the result of :meth:`~socket.socket.getsockname` :return: the converted socket address """ # This is more complicated than it should be because of MyPy if isinstance(sockaddr, tuple) and len(sockaddr) == 4: host, port, flowinfo, scope_id = cast(Tuple[str, int, int, int], sockaddr) if scope_id: # PyPy (as of v7.3.11) leaves the interface name in the result, so # we discard it and only get the scope ID from the end # (https://foss.heptapod.net/pypy/pypy/-/issues/3938) host = host.split("%")[0] # Add scope_id to the address return f"{host}%{scope_id}", port else: return host, port else: return cast(Tuple[str, int], sockaddr)
20,663
Python
32.986842
99
0.641436
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/anyio/_backends/_trio.py
from __future__ import annotations import array import math import socket from concurrent.futures import Future from contextvars import copy_context from dataclasses import dataclass from functools import partial from io import IOBase from os import PathLike from signal import Signals from types import TracebackType from typing import ( IO, TYPE_CHECKING, Any, AsyncGenerator, AsyncIterator, Awaitable, Callable, Collection, Coroutine, Generic, Iterable, Mapping, NoReturn, Sequence, TypeVar, cast, ) import sniffio import trio.from_thread from outcome import Error, Outcome, Value from trio.socket import SocketType as TrioSocketType from trio.to_thread import run_sync from .. import CapacityLimiterStatistics, EventStatistics, TaskInfo, abc from .._core._compat import DeprecatedAsyncContextManager, DeprecatedAwaitable from .._core._eventloop import claim_worker_thread from .._core._exceptions import ( BrokenResourceError, BusyResourceError, ClosedResourceError, EndOfStream, ) from .._core._exceptions import ExceptionGroup as BaseExceptionGroup from .._core._sockets import convert_ipv6_sockaddr from .._core._synchronization import CapacityLimiter as BaseCapacityLimiter from .._core._synchronization import Event as BaseEvent from .._core._synchronization import ResourceGuard from .._core._tasks import CancelScope as BaseCancelScope from ..abc import IPSockAddrType, UDPPacketType if TYPE_CHECKING: from trio_typing import TaskStatus try: from trio import lowlevel as trio_lowlevel except ImportError: from trio import hazmat as trio_lowlevel # type: ignore[no-redef] from trio.hazmat import wait_readable, wait_writable else: from trio.lowlevel import wait_readable, wait_writable try: trio_open_process = trio_lowlevel.open_process except AttributeError: # isort: off from trio import ( # type: ignore[attr-defined, no-redef] open_process as trio_open_process, ) T_Retval = TypeVar("T_Retval") T_SockAddr = TypeVar("T_SockAddr", str, IPSockAddrType) # # Event loop # run = trio.run current_token = trio.lowlevel.current_trio_token RunVar = trio.lowlevel.RunVar # # Miscellaneous # sleep = trio.sleep # # Timeouts and cancellation # class CancelScope(BaseCancelScope): def __new__( cls, original: trio.CancelScope | None = None, **kwargs: object ) -> CancelScope: return object.__new__(cls) def __init__(self, original: trio.CancelScope | None = None, **kwargs: Any) -> None: self.__original = original or trio.CancelScope(**kwargs) def __enter__(self) -> CancelScope: self.__original.__enter__() return self def __exit__( self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None, ) -> bool | None: # https://github.com/python-trio/trio-typing/pull/79 return self.__original.__exit__( # type: ignore[func-returns-value] exc_type, exc_val, exc_tb ) def cancel(self) -> DeprecatedAwaitable: self.__original.cancel() return DeprecatedAwaitable(self.cancel) @property def deadline(self) -> float: return self.__original.deadline @deadline.setter def deadline(self, value: float) -> None: self.__original.deadline = value @property def cancel_called(self) -> bool: return self.__original.cancel_called @property def shield(self) -> bool: return self.__original.shield @shield.setter def shield(self, value: bool) -> None: self.__original.shield = value CancelledError = trio.Cancelled checkpoint = trio.lowlevel.checkpoint checkpoint_if_cancelled = trio.lowlevel.checkpoint_if_cancelled cancel_shielded_checkpoint = trio.lowlevel.cancel_shielded_checkpoint current_effective_deadline = trio.current_effective_deadline current_time = trio.current_time # # Task groups # class ExceptionGroup(BaseExceptionGroup, trio.MultiError): pass class TaskGroup(abc.TaskGroup): def __init__(self) -> None: self._active = False self._nursery_manager = trio.open_nursery() self.cancel_scope = None # type: ignore[assignment] async def __aenter__(self) -> TaskGroup: self._active = True self._nursery = await self._nursery_manager.__aenter__() self.cancel_scope = CancelScope(self._nursery.cancel_scope) return self async def __aexit__( self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None, ) -> bool | None: try: return await self._nursery_manager.__aexit__(exc_type, exc_val, exc_tb) except trio.MultiError as exc: raise ExceptionGroup(exc.exceptions) from None finally: self._active = False def start_soon( self, func: Callable[..., Awaitable[Any]], *args: object, name: object = None ) -> None: if not self._active: raise RuntimeError( "This task group is not active; no new tasks can be started." ) self._nursery.start_soon(func, *args, name=name) async def start( self, func: Callable[..., Awaitable[Any]], *args: object, name: object = None ) -> object: if not self._active: raise RuntimeError( "This task group is not active; no new tasks can be started." ) return await self._nursery.start(func, *args, name=name) # # Threads # async def run_sync_in_worker_thread( func: Callable[..., T_Retval], *args: object, cancellable: bool = False, limiter: trio.CapacityLimiter | None = None, ) -> T_Retval: def wrapper() -> T_Retval: with claim_worker_thread("trio"): return func(*args) # TODO: remove explicit context copying when trio 0.20 is the minimum requirement context = copy_context() context.run(sniffio.current_async_library_cvar.set, None) return await run_sync( context.run, wrapper, cancellable=cancellable, limiter=limiter ) # TODO: remove this workaround when trio 0.20 is the minimum requirement def run_async_from_thread( fn: Callable[..., Awaitable[T_Retval]], *args: Any ) -> T_Retval: async def wrapper() -> T_Retval: retval: T_Retval async def inner() -> None: nonlocal retval __tracebackhide__ = True retval = await fn(*args) async with trio.open_nursery() as n: context.run(n.start_soon, inner) __tracebackhide__ = True return retval # noqa: F821 context = copy_context() context.run(sniffio.current_async_library_cvar.set, "trio") return trio.from_thread.run(wrapper) def run_sync_from_thread(fn: Callable[..., T_Retval], *args: Any) -> T_Retval: # TODO: remove explicit context copying when trio 0.20 is the minimum requirement retval = trio.from_thread.run_sync(copy_context().run, fn, *args) return cast(T_Retval, retval) class BlockingPortal(abc.BlockingPortal): def __new__(cls) -> BlockingPortal: return object.__new__(cls) def __init__(self) -> None: super().__init__() self._token = trio.lowlevel.current_trio_token() def _spawn_task_from_thread( self, func: Callable, args: tuple, kwargs: dict[str, Any], name: object, future: Future, ) -> None: context = copy_context() context.run(sniffio.current_async_library_cvar.set, "trio") trio.from_thread.run_sync( context.run, partial(self._task_group.start_soon, name=name), self._call_func, func, args, kwargs, future, trio_token=self._token, ) # # Subprocesses # @dataclass(eq=False) class ReceiveStreamWrapper(abc.ByteReceiveStream): _stream: trio.abc.ReceiveStream async def receive(self, max_bytes: int | None = None) -> bytes: try: data = await self._stream.receive_some(max_bytes) except trio.ClosedResourceError as exc: raise ClosedResourceError from exc.__cause__ except trio.BrokenResourceError as exc: raise BrokenResourceError from exc.__cause__ if data: return data else: raise EndOfStream async def aclose(self) -> None: await self._stream.aclose() @dataclass(eq=False) class SendStreamWrapper(abc.ByteSendStream): _stream: trio.abc.SendStream async def send(self, item: bytes) -> None: try: await self._stream.send_all(item) except trio.ClosedResourceError as exc: raise ClosedResourceError from exc.__cause__ except trio.BrokenResourceError as exc: raise BrokenResourceError from exc.__cause__ async def aclose(self) -> None: await self._stream.aclose() @dataclass(eq=False) class Process(abc.Process): _process: trio.Process _stdin: abc.ByteSendStream | None _stdout: abc.ByteReceiveStream | None _stderr: abc.ByteReceiveStream | None async def aclose(self) -> None: if self._stdin: await self._stdin.aclose() if self._stdout: await self._stdout.aclose() if self._stderr: await self._stderr.aclose() await self.wait() async def wait(self) -> int: return await self._process.wait() def terminate(self) -> None: self._process.terminate() def kill(self) -> None: self._process.kill() def send_signal(self, signal: Signals) -> None: self._process.send_signal(signal) @property def pid(self) -> int: return self._process.pid @property def returncode(self) -> int | None: return self._process.returncode @property def stdin(self) -> abc.ByteSendStream | None: return self._stdin @property def stdout(self) -> abc.ByteReceiveStream | None: return self._stdout @property def stderr(self) -> abc.ByteReceiveStream | None: return self._stderr async def open_process( command: str | bytes | Sequence[str | bytes], *, shell: bool, stdin: int | IO[Any] | None, stdout: int | IO[Any] | None, stderr: int | IO[Any] | None, cwd: str | bytes | PathLike | None = None, env: Mapping[str, str] | None = None, start_new_session: bool = False, ) -> Process: process = await trio_open_process( # type: ignore[misc] command, # type: ignore[arg-type] stdin=stdin, stdout=stdout, stderr=stderr, shell=shell, cwd=cwd, env=env, start_new_session=start_new_session, ) stdin_stream = SendStreamWrapper(process.stdin) if process.stdin else None stdout_stream = ReceiveStreamWrapper(process.stdout) if process.stdout else None stderr_stream = ReceiveStreamWrapper(process.stderr) if process.stderr else None return Process(process, stdin_stream, stdout_stream, stderr_stream) class _ProcessPoolShutdownInstrument(trio.abc.Instrument): def after_run(self) -> None: super().after_run() current_default_worker_process_limiter: RunVar = RunVar( "current_default_worker_process_limiter" ) async def _shutdown_process_pool(workers: set[Process]) -> None: process: Process try: await sleep(math.inf) except trio.Cancelled: for process in workers: if process.returncode is None: process.kill() with CancelScope(shield=True): for process in workers: await process.aclose() def setup_process_pool_exit_at_shutdown(workers: set[Process]) -> None: trio.lowlevel.spawn_system_task(_shutdown_process_pool, workers) # # Sockets and networking # class _TrioSocketMixin(Generic[T_SockAddr]): def __init__(self, trio_socket: TrioSocketType) -> None: self._trio_socket = trio_socket self._closed = False def _check_closed(self) -> None: if self._closed: raise ClosedResourceError if self._trio_socket.fileno() < 0: raise BrokenResourceError @property def _raw_socket(self) -> socket.socket: return self._trio_socket._sock # type: ignore[attr-defined] async def aclose(self) -> None: if self._trio_socket.fileno() >= 0: self._closed = True self._trio_socket.close() def _convert_socket_error(self, exc: BaseException) -> NoReturn: if isinstance(exc, trio.ClosedResourceError): raise ClosedResourceError from exc elif self._trio_socket.fileno() < 0 and self._closed: raise ClosedResourceError from None elif isinstance(exc, OSError): raise BrokenResourceError from exc else: raise exc class SocketStream(_TrioSocketMixin, abc.SocketStream): def __init__(self, trio_socket: TrioSocketType) -> None: super().__init__(trio_socket) self._receive_guard = ResourceGuard("reading from") self._send_guard = ResourceGuard("writing to") async def receive(self, max_bytes: int = 65536) -> bytes: with self._receive_guard: try: data = await self._trio_socket.recv(max_bytes) except BaseException as exc: self._convert_socket_error(exc) if data: return data else: raise EndOfStream async def send(self, item: bytes) -> None: with self._send_guard: view = memoryview(item) while view: try: bytes_sent = await self._trio_socket.send(view) except BaseException as exc: self._convert_socket_error(exc) view = view[bytes_sent:] async def send_eof(self) -> None: self._trio_socket.shutdown(socket.SHUT_WR) class UNIXSocketStream(SocketStream, abc.UNIXSocketStream): async def receive_fds(self, msglen: int, maxfds: int) -> tuple[bytes, list[int]]: if not isinstance(msglen, int) or msglen < 0: raise ValueError("msglen must be a non-negative integer") if not isinstance(maxfds, int) or maxfds < 1: raise ValueError("maxfds must be a positive integer") fds = array.array("i") await checkpoint() with self._receive_guard: while True: try: message, ancdata, flags, addr = await self._trio_socket.recvmsg( msglen, socket.CMSG_LEN(maxfds * fds.itemsize) ) except BaseException as exc: self._convert_socket_error(exc) else: if not message and not ancdata: raise EndOfStream break for cmsg_level, cmsg_type, cmsg_data in ancdata: if cmsg_level != socket.SOL_SOCKET or cmsg_type != socket.SCM_RIGHTS: raise RuntimeError( f"Received unexpected ancillary data; message = {message!r}, " f"cmsg_level = {cmsg_level}, cmsg_type = {cmsg_type}" ) fds.frombytes(cmsg_data[: len(cmsg_data) - (len(cmsg_data) % fds.itemsize)]) return message, list(fds) async def send_fds(self, message: bytes, fds: Collection[int | IOBase]) -> None: if not message: raise ValueError("message must not be empty") if not fds: raise ValueError("fds must not be empty") filenos: list[int] = [] for fd in fds: if isinstance(fd, int): filenos.append(fd) elif isinstance(fd, IOBase): filenos.append(fd.fileno()) fdarray = array.array("i", filenos) await checkpoint() with self._send_guard: while True: try: await self._trio_socket.sendmsg( [message], [ ( socket.SOL_SOCKET, socket.SCM_RIGHTS, # type: ignore[list-item] fdarray, ) ], ) break except BaseException as exc: self._convert_socket_error(exc) class TCPSocketListener(_TrioSocketMixin, abc.SocketListener): def __init__(self, raw_socket: socket.socket): super().__init__(trio.socket.from_stdlib_socket(raw_socket)) self._accept_guard = ResourceGuard("accepting connections from") async def accept(self) -> SocketStream: with self._accept_guard: try: trio_socket, _addr = await self._trio_socket.accept() except BaseException as exc: self._convert_socket_error(exc) trio_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) return SocketStream(trio_socket) class UNIXSocketListener(_TrioSocketMixin, abc.SocketListener): def __init__(self, raw_socket: socket.socket): super().__init__(trio.socket.from_stdlib_socket(raw_socket)) self._accept_guard = ResourceGuard("accepting connections from") async def accept(self) -> UNIXSocketStream: with self._accept_guard: try: trio_socket, _addr = await self._trio_socket.accept() except BaseException as exc: self._convert_socket_error(exc) return UNIXSocketStream(trio_socket) class UDPSocket(_TrioSocketMixin[IPSockAddrType], abc.UDPSocket): def __init__(self, trio_socket: TrioSocketType) -> None: super().__init__(trio_socket) self._receive_guard = ResourceGuard("reading from") self._send_guard = ResourceGuard("writing to") async def receive(self) -> tuple[bytes, IPSockAddrType]: with self._receive_guard: try: data, addr = await self._trio_socket.recvfrom(65536) return data, convert_ipv6_sockaddr(addr) except BaseException as exc: self._convert_socket_error(exc) async def send(self, item: UDPPacketType) -> None: with self._send_guard: try: await self._trio_socket.sendto(*item) except BaseException as exc: self._convert_socket_error(exc) class ConnectedUDPSocket(_TrioSocketMixin[IPSockAddrType], abc.ConnectedUDPSocket): def __init__(self, trio_socket: TrioSocketType) -> None: super().__init__(trio_socket) self._receive_guard = ResourceGuard("reading from") self._send_guard = ResourceGuard("writing to") async def receive(self) -> bytes: with self._receive_guard: try: return await self._trio_socket.recv(65536) except BaseException as exc: self._convert_socket_error(exc) async def send(self, item: bytes) -> None: with self._send_guard: try: await self._trio_socket.send(item) except BaseException as exc: self._convert_socket_error(exc) async def connect_tcp( host: str, port: int, local_address: IPSockAddrType | None = None ) -> SocketStream: family = socket.AF_INET6 if ":" in host else socket.AF_INET trio_socket = trio.socket.socket(family) trio_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) if local_address: await trio_socket.bind(local_address) try: await trio_socket.connect((host, port)) except BaseException: trio_socket.close() raise return SocketStream(trio_socket) async def connect_unix(path: str) -> UNIXSocketStream: trio_socket = trio.socket.socket(socket.AF_UNIX) try: await trio_socket.connect(path) except BaseException: trio_socket.close() raise return UNIXSocketStream(trio_socket) async def create_udp_socket( family: socket.AddressFamily, local_address: IPSockAddrType | None, remote_address: IPSockAddrType | None, reuse_port: bool, ) -> UDPSocket | ConnectedUDPSocket: trio_socket = trio.socket.socket(family=family, type=socket.SOCK_DGRAM) if reuse_port: trio_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) if local_address: await trio_socket.bind(local_address) if remote_address: await trio_socket.connect(remote_address) return ConnectedUDPSocket(trio_socket) else: return UDPSocket(trio_socket) getaddrinfo = trio.socket.getaddrinfo getnameinfo = trio.socket.getnameinfo async def wait_socket_readable(sock: socket.socket) -> None: try: await wait_readable(sock) except trio.ClosedResourceError as exc: raise ClosedResourceError().with_traceback(exc.__traceback__) from None except trio.BusyResourceError: raise BusyResourceError("reading from") from None async def wait_socket_writable(sock: socket.socket) -> None: try: await wait_writable(sock) except trio.ClosedResourceError as exc: raise ClosedResourceError().with_traceback(exc.__traceback__) from None except trio.BusyResourceError: raise BusyResourceError("writing to") from None # # Synchronization # class Event(BaseEvent): def __new__(cls) -> Event: return object.__new__(cls) def __init__(self) -> None: self.__original = trio.Event() def is_set(self) -> bool: return self.__original.is_set() async def wait(self) -> None: return await self.__original.wait() def statistics(self) -> EventStatistics: orig_statistics = self.__original.statistics() return EventStatistics(tasks_waiting=orig_statistics.tasks_waiting) def set(self) -> DeprecatedAwaitable: self.__original.set() return DeprecatedAwaitable(self.set) class CapacityLimiter(BaseCapacityLimiter): def __new__(cls, *args: object, **kwargs: object) -> CapacityLimiter: return object.__new__(cls) def __init__( self, *args: Any, original: trio.CapacityLimiter | None = None ) -> None: self.__original = original or trio.CapacityLimiter(*args) async def __aenter__(self) -> None: return await self.__original.__aenter__() async def __aexit__( self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None, ) -> None: await self.__original.__aexit__(exc_type, exc_val, exc_tb) @property def total_tokens(self) -> float: return self.__original.total_tokens @total_tokens.setter def total_tokens(self, value: float) -> None: self.__original.total_tokens = value @property def borrowed_tokens(self) -> int: return self.__original.borrowed_tokens @property def available_tokens(self) -> float: return self.__original.available_tokens def acquire_nowait(self) -> DeprecatedAwaitable: self.__original.acquire_nowait() return DeprecatedAwaitable(self.acquire_nowait) def acquire_on_behalf_of_nowait(self, borrower: object) -> DeprecatedAwaitable: self.__original.acquire_on_behalf_of_nowait(borrower) return DeprecatedAwaitable(self.acquire_on_behalf_of_nowait) async def acquire(self) -> None: await self.__original.acquire() async def acquire_on_behalf_of(self, borrower: object) -> None: await self.__original.acquire_on_behalf_of(borrower) def release(self) -> None: return self.__original.release() def release_on_behalf_of(self, borrower: object) -> None: return self.__original.release_on_behalf_of(borrower) def statistics(self) -> CapacityLimiterStatistics: orig = self.__original.statistics() return CapacityLimiterStatistics( borrowed_tokens=orig.borrowed_tokens, total_tokens=orig.total_tokens, borrowers=orig.borrowers, tasks_waiting=orig.tasks_waiting, ) _capacity_limiter_wrapper: RunVar = RunVar("_capacity_limiter_wrapper") def current_default_thread_limiter() -> CapacityLimiter: try: return _capacity_limiter_wrapper.get() except LookupError: limiter = CapacityLimiter( original=trio.to_thread.current_default_thread_limiter() ) _capacity_limiter_wrapper.set(limiter) return limiter # # Signal handling # class _SignalReceiver(DeprecatedAsyncContextManager["_SignalReceiver"]): _iterator: AsyncIterator[int] def __init__(self, signals: tuple[Signals, ...]): self._signals = signals def __enter__(self) -> _SignalReceiver: self._cm = trio.open_signal_receiver(*self._signals) self._iterator = self._cm.__enter__() return self def __exit__( self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None, ) -> bool | None: return self._cm.__exit__(exc_type, exc_val, exc_tb) def __aiter__(self) -> _SignalReceiver: return self async def __anext__(self) -> Signals: signum = await self._iterator.__anext__() return Signals(signum) def open_signal_receiver(*signals: Signals) -> _SignalReceiver: return _SignalReceiver(signals) # # Testing and debugging # def get_current_task() -> TaskInfo: task = trio_lowlevel.current_task() parent_id = None if task.parent_nursery and task.parent_nursery.parent_task: parent_id = id(task.parent_nursery.parent_task) return TaskInfo(id(task), parent_id, task.name, task.coro) def get_running_tasks() -> list[TaskInfo]: root_task = trio_lowlevel.current_root_task() task_infos = [TaskInfo(id(root_task), None, root_task.name, root_task.coro)] nurseries = root_task.child_nurseries while nurseries: new_nurseries: list[trio.Nursery] = [] for nursery in nurseries: for task in nursery.child_tasks: task_infos.append( TaskInfo(id(task), id(nursery.parent_task), task.name, task.coro) ) new_nurseries.extend(task.child_nurseries) nurseries = new_nurseries return task_infos def wait_all_tasks_blocked() -> Awaitable[None]: import trio.testing return trio.testing.wait_all_tasks_blocked() class TestRunner(abc.TestRunner): def __init__(self, **options: Any) -> None: from collections import deque from queue import Queue self._call_queue: Queue[Callable[..., object]] = Queue() self._result_queue: deque[Outcome] = deque() self._stop_event: trio.Event | None = None self._nursery: trio.Nursery | None = None self._options = options async def _trio_main(self) -> None: self._stop_event = trio.Event() async with trio.open_nursery() as self._nursery: await self._stop_event.wait() async def _call_func( self, func: Callable[..., Awaitable[object]], args: tuple, kwargs: dict ) -> None: try: retval = await func(*args, **kwargs) except BaseException as exc: self._result_queue.append(Error(exc)) else: self._result_queue.append(Value(retval)) def _main_task_finished(self, outcome: object) -> None: self._nursery = None def _get_nursery(self) -> trio.Nursery: if self._nursery is None: trio.lowlevel.start_guest_run( self._trio_main, run_sync_soon_threadsafe=self._call_queue.put, done_callback=self._main_task_finished, **self._options, ) while self._nursery is None: self._call_queue.get()() return self._nursery def _call( self, func: Callable[..., Awaitable[T_Retval]], *args: object, **kwargs: object ) -> T_Retval: self._get_nursery().start_soon(self._call_func, func, args, kwargs) while not self._result_queue: self._call_queue.get()() outcome = self._result_queue.pop() return outcome.unwrap() def close(self) -> None: if self._stop_event: self._stop_event.set() while self._nursery is not None: self._call_queue.get()() def run_asyncgen_fixture( self, fixture_func: Callable[..., AsyncGenerator[T_Retval, Any]], kwargs: dict[str, Any], ) -> Iterable[T_Retval]: async def fixture_runner(*, task_status: TaskStatus[T_Retval]) -> None: agen = fixture_func(**kwargs) retval = await agen.asend(None) task_status.started(retval) await teardown_event.wait() try: await agen.asend(None) except StopAsyncIteration: pass else: await agen.aclose() raise RuntimeError("Async generator fixture did not stop") teardown_event = trio.Event() fixture_value = self._call(lambda: self._get_nursery().start(fixture_runner)) yield fixture_value teardown_event.set() def run_fixture( self, fixture_func: Callable[..., Coroutine[Any, Any, T_Retval]], kwargs: dict[str, Any], ) -> T_Retval: return self._call(fixture_func, **kwargs) def run_test( self, test_func: Callable[..., Coroutine[Any, Any, Any]], kwargs: dict[str, Any] ) -> None: self._call(test_func, **kwargs)
30,035
Python
29.126379
88
0.613518
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/anyio/_backends/_asyncio.py
from __future__ import annotations import array import asyncio import concurrent.futures import math import socket import sys from asyncio.base_events import _run_until_complete_cb # type: ignore[attr-defined] from collections import OrderedDict, deque from concurrent.futures import Future from contextvars import Context, copy_context from dataclasses import dataclass from functools import partial, wraps from inspect import ( CORO_RUNNING, CORO_SUSPENDED, GEN_RUNNING, GEN_SUSPENDED, getcoroutinestate, getgeneratorstate, ) from io import IOBase from os import PathLike from queue import Queue from socket import AddressFamily, SocketKind from threading import Thread from types import TracebackType from typing import ( IO, Any, AsyncGenerator, Awaitable, Callable, Collection, Coroutine, Generator, Iterable, Mapping, Optional, Sequence, Tuple, TypeVar, Union, cast, ) from weakref import WeakKeyDictionary import sniffio from .. import CapacityLimiterStatistics, EventStatistics, TaskInfo, abc from .._core._compat import DeprecatedAsyncContextManager, DeprecatedAwaitable from .._core._eventloop import claim_worker_thread, threadlocals from .._core._exceptions import ( BrokenResourceError, BusyResourceError, ClosedResourceError, EndOfStream, WouldBlock, ) from .._core._exceptions import ExceptionGroup as BaseExceptionGroup from .._core._sockets import GetAddrInfoReturnType, convert_ipv6_sockaddr from .._core._synchronization import CapacityLimiter as BaseCapacityLimiter from .._core._synchronization import Event as BaseEvent from .._core._synchronization import ResourceGuard from .._core._tasks import CancelScope as BaseCancelScope from ..abc import IPSockAddrType, UDPPacketType from ..lowlevel import RunVar if sys.version_info >= (3, 8): def get_coro(task: asyncio.Task) -> Generator | Awaitable[Any]: return task.get_coro() else: def get_coro(task: asyncio.Task) -> Generator | Awaitable[Any]: return task._coro from asyncio import all_tasks, create_task, current_task, get_running_loop from asyncio import run as native_run def _get_task_callbacks(task: asyncio.Task) -> Iterable[Callable]: return [cb for cb, context in task._callbacks] T_Retval = TypeVar("T_Retval") T_contra = TypeVar("T_contra", contravariant=True) # Check whether there is native support for task names in asyncio (3.8+) _native_task_names = hasattr(asyncio.Task, "get_name") _root_task: RunVar[asyncio.Task | None] = RunVar("_root_task") def find_root_task() -> asyncio.Task: root_task = _root_task.get(None) if root_task is not None and not root_task.done(): return root_task # Look for a task that has been started via run_until_complete() for task in all_tasks(): if task._callbacks and not task.done(): for cb in _get_task_callbacks(task): if ( cb is _run_until_complete_cb or getattr(cb, "__module__", None) == "uvloop.loop" ): _root_task.set(task) return task # Look up the topmost task in the AnyIO task tree, if possible task = cast(asyncio.Task, current_task()) state = _task_states.get(task) if state: cancel_scope = state.cancel_scope while cancel_scope and cancel_scope._parent_scope is not None: cancel_scope = cancel_scope._parent_scope if cancel_scope is not None: return cast(asyncio.Task, cancel_scope._host_task) return task def get_callable_name(func: Callable) -> str: module = getattr(func, "__module__", None) qualname = getattr(func, "__qualname__", None) return ".".join([x for x in (module, qualname) if x]) # # Event loop # _run_vars = ( WeakKeyDictionary() ) # type: WeakKeyDictionary[asyncio.AbstractEventLoop, Any] current_token = get_running_loop def _task_started(task: asyncio.Task) -> bool: """Return ``True`` if the task has been started and has not finished.""" coro = cast(Coroutine[Any, Any, Any], get_coro(task)) try: return getcoroutinestate(coro) in (CORO_RUNNING, CORO_SUSPENDED) except AttributeError: try: return getgeneratorstate(cast(Generator, coro)) in ( GEN_RUNNING, GEN_SUSPENDED, ) except AttributeError: # task coro is async_genenerator_asend https://bugs.python.org/issue37771 raise Exception(f"Cannot determine if task {task} has started or not") def _maybe_set_event_loop_policy( policy: asyncio.AbstractEventLoopPolicy | None, use_uvloop: bool ) -> None: # On CPython, use uvloop when possible if no other policy has been given and if not # explicitly disabled if policy is None and use_uvloop and sys.implementation.name == "cpython": try: import uvloop except ImportError: pass else: # Test for missing shutdown_default_executor() (uvloop 0.14.0 and earlier) if not hasattr( asyncio.AbstractEventLoop, "shutdown_default_executor" ) or hasattr(uvloop.loop.Loop, "shutdown_default_executor"): policy = uvloop.EventLoopPolicy() if policy is not None: asyncio.set_event_loop_policy(policy) def run( func: Callable[..., Awaitable[T_Retval]], *args: object, debug: bool = False, use_uvloop: bool = False, policy: asyncio.AbstractEventLoopPolicy | None = None, ) -> T_Retval: @wraps(func) async def wrapper() -> T_Retval: task = cast(asyncio.Task, current_task()) task_state = TaskState(None, get_callable_name(func), None) _task_states[task] = task_state if _native_task_names: task.set_name(task_state.name) try: return await func(*args) finally: del _task_states[task] _maybe_set_event_loop_policy(policy, use_uvloop) return native_run(wrapper(), debug=debug) # # Miscellaneous # sleep = asyncio.sleep # # Timeouts and cancellation # CancelledError = asyncio.CancelledError class CancelScope(BaseCancelScope): def __new__( cls, *, deadline: float = math.inf, shield: bool = False ) -> CancelScope: return object.__new__(cls) def __init__(self, deadline: float = math.inf, shield: bool = False): self._deadline = deadline self._shield = shield self._parent_scope: CancelScope | None = None self._cancel_called = False self._active = False self._timeout_handle: asyncio.TimerHandle | None = None self._cancel_handle: asyncio.Handle | None = None self._tasks: set[asyncio.Task] = set() self._host_task: asyncio.Task | None = None self._timeout_expired = False self._cancel_calls: int = 0 def __enter__(self) -> CancelScope: if self._active: raise RuntimeError( "Each CancelScope may only be used for a single 'with' block" ) self._host_task = host_task = cast(asyncio.Task, current_task()) self._tasks.add(host_task) try: task_state = _task_states[host_task] except KeyError: task_name = host_task.get_name() if _native_task_names else None task_state = TaskState(None, task_name, self) _task_states[host_task] = task_state else: self._parent_scope = task_state.cancel_scope task_state.cancel_scope = self self._timeout() self._active = True # Start cancelling the host task if the scope was cancelled before entering if self._cancel_called: self._deliver_cancellation() return self def __exit__( self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None, ) -> bool | None: if not self._active: raise RuntimeError("This cancel scope is not active") if current_task() is not self._host_task: raise RuntimeError( "Attempted to exit cancel scope in a different task than it was " "entered in" ) assert self._host_task is not None host_task_state = _task_states.get(self._host_task) if host_task_state is None or host_task_state.cancel_scope is not self: raise RuntimeError( "Attempted to exit a cancel scope that isn't the current tasks's " "current cancel scope" ) self._active = False if self._timeout_handle: self._timeout_handle.cancel() self._timeout_handle = None self._tasks.remove(self._host_task) host_task_state.cancel_scope = self._parent_scope # Restart the cancellation effort in the farthest directly cancelled parent scope if this # one was shielded if self._shield: self._deliver_cancellation_to_parent() if exc_val is not None: exceptions = ( exc_val.exceptions if isinstance(exc_val, ExceptionGroup) else [exc_val] ) if all(isinstance(exc, CancelledError) for exc in exceptions): if self._timeout_expired: return self._uncancel() elif not self._cancel_called: # Task was cancelled natively return None elif not self._parent_cancelled(): # This scope was directly cancelled return self._uncancel() return None def _uncancel(self) -> bool: if sys.version_info < (3, 11) or self._host_task is None: self._cancel_calls = 0 return True # Uncancel all AnyIO cancellations for i in range(self._cancel_calls): self._host_task.uncancel() self._cancel_calls = 0 return not self._host_task.cancelling() def _timeout(self) -> None: if self._deadline != math.inf: loop = get_running_loop() if loop.time() >= self._deadline: self._timeout_expired = True self.cancel() else: self._timeout_handle = loop.call_at(self._deadline, self._timeout) def _deliver_cancellation(self) -> None: """ Deliver cancellation to directly contained tasks and nested cancel scopes. Schedule another run at the end if we still have tasks eligible for cancellation. """ should_retry = False current = current_task() for task in self._tasks: if task._must_cancel: # type: ignore[attr-defined] continue # The task is eligible for cancellation if it has started and is not in a cancel # scope shielded from this one cancel_scope = _task_states[task].cancel_scope while cancel_scope is not self: if cancel_scope is None or cancel_scope._shield: break else: cancel_scope = cancel_scope._parent_scope else: should_retry = True if task is not current and ( task is self._host_task or _task_started(task) ): self._cancel_calls += 1 task.cancel() # Schedule another callback if there are still tasks left if should_retry: self._cancel_handle = get_running_loop().call_soon( self._deliver_cancellation ) else: self._cancel_handle = None def _deliver_cancellation_to_parent(self) -> None: """Start cancellation effort in the farthest directly cancelled parent scope""" scope = self._parent_scope scope_to_cancel: CancelScope | None = None while scope is not None: if scope._cancel_called and scope._cancel_handle is None: scope_to_cancel = scope # No point in looking beyond any shielded scope if scope._shield: break scope = scope._parent_scope if scope_to_cancel is not None: scope_to_cancel._deliver_cancellation() def _parent_cancelled(self) -> bool: # Check whether any parent has been cancelled cancel_scope = self._parent_scope while cancel_scope is not None and not cancel_scope._shield: if cancel_scope._cancel_called: return True else: cancel_scope = cancel_scope._parent_scope return False def cancel(self) -> DeprecatedAwaitable: if not self._cancel_called: if self._timeout_handle: self._timeout_handle.cancel() self._timeout_handle = None self._cancel_called = True if self._host_task is not None: self._deliver_cancellation() return DeprecatedAwaitable(self.cancel) @property def deadline(self) -> float: return self._deadline @deadline.setter def deadline(self, value: float) -> None: self._deadline = float(value) if self._timeout_handle is not None: self._timeout_handle.cancel() self._timeout_handle = None if self._active and not self._cancel_called: self._timeout() @property def cancel_called(self) -> bool: return self._cancel_called @property def shield(self) -> bool: return self._shield @shield.setter def shield(self, value: bool) -> None: if self._shield != value: self._shield = value if not value: self._deliver_cancellation_to_parent() async def checkpoint() -> None: await sleep(0) async def checkpoint_if_cancelled() -> None: task = current_task() if task is None: return try: cancel_scope = _task_states[task].cancel_scope except KeyError: return while cancel_scope: if cancel_scope.cancel_called: await sleep(0) elif cancel_scope.shield: break else: cancel_scope = cancel_scope._parent_scope async def cancel_shielded_checkpoint() -> None: with CancelScope(shield=True): await sleep(0) def current_effective_deadline() -> float: try: cancel_scope = _task_states[current_task()].cancel_scope # type: ignore[index] except KeyError: return math.inf deadline = math.inf while cancel_scope: deadline = min(deadline, cancel_scope.deadline) if cancel_scope._cancel_called: deadline = -math.inf break elif cancel_scope.shield: break else: cancel_scope = cancel_scope._parent_scope return deadline def current_time() -> float: return get_running_loop().time() # # Task states # class TaskState: """ Encapsulates auxiliary task information that cannot be added to the Task instance itself because there are no guarantees about its implementation. """ __slots__ = "parent_id", "name", "cancel_scope" def __init__( self, parent_id: int | None, name: str | None, cancel_scope: CancelScope | None, ): self.parent_id = parent_id self.name = name self.cancel_scope = cancel_scope _task_states = WeakKeyDictionary() # type: WeakKeyDictionary[asyncio.Task, TaskState] # # Task groups # class ExceptionGroup(BaseExceptionGroup): def __init__(self, exceptions: list[BaseException]): super().__init__() self.exceptions = exceptions class _AsyncioTaskStatus(abc.TaskStatus): def __init__(self, future: asyncio.Future, parent_id: int): self._future = future self._parent_id = parent_id def started(self, value: T_contra | None = None) -> None: try: self._future.set_result(value) except asyncio.InvalidStateError: raise RuntimeError( "called 'started' twice on the same task status" ) from None task = cast(asyncio.Task, current_task()) _task_states[task].parent_id = self._parent_id class TaskGroup(abc.TaskGroup): def __init__(self) -> None: self.cancel_scope: CancelScope = CancelScope() self._active = False self._exceptions: list[BaseException] = [] async def __aenter__(self) -> TaskGroup: self.cancel_scope.__enter__() self._active = True return self async def __aexit__( self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None, ) -> bool | None: ignore_exception = self.cancel_scope.__exit__(exc_type, exc_val, exc_tb) if exc_val is not None: self.cancel_scope.cancel() self._exceptions.append(exc_val) while self.cancel_scope._tasks: try: await asyncio.wait(self.cancel_scope._tasks) except asyncio.CancelledError: self.cancel_scope.cancel() self._active = False if not self.cancel_scope._parent_cancelled(): exceptions = self._filter_cancellation_errors(self._exceptions) else: exceptions = self._exceptions try: if len(exceptions) > 1: if all( isinstance(e, CancelledError) and not e.args for e in exceptions ): # Tasks were cancelled natively, without a cancellation message raise CancelledError else: raise ExceptionGroup(exceptions) elif exceptions and exceptions[0] is not exc_val: raise exceptions[0] except BaseException as exc: # Clear the context here, as it can only be done in-flight. # If the context is not cleared, it can result in recursive tracebacks (see #145). exc.__context__ = None raise return ignore_exception @staticmethod def _filter_cancellation_errors( exceptions: Sequence[BaseException], ) -> list[BaseException]: filtered_exceptions: list[BaseException] = [] for exc in exceptions: if isinstance(exc, ExceptionGroup): new_exceptions = TaskGroup._filter_cancellation_errors(exc.exceptions) if len(new_exceptions) > 1: filtered_exceptions.append(exc) elif len(new_exceptions) == 1: filtered_exceptions.append(new_exceptions[0]) elif new_exceptions: new_exc = ExceptionGroup(new_exceptions) new_exc.__cause__ = exc.__cause__ new_exc.__context__ = exc.__context__ new_exc.__traceback__ = exc.__traceback__ filtered_exceptions.append(new_exc) elif not isinstance(exc, CancelledError) or exc.args: filtered_exceptions.append(exc) return filtered_exceptions async def _run_wrapped_task( self, coro: Coroutine, task_status_future: asyncio.Future | None ) -> None: # This is the code path for Python 3.7 on which asyncio freaks out if a task # raises a BaseException. __traceback_hide__ = __tracebackhide__ = True # noqa: F841 task = cast(asyncio.Task, current_task()) try: await coro except BaseException as exc: if task_status_future is None or task_status_future.done(): self._exceptions.append(exc) self.cancel_scope.cancel() else: task_status_future.set_exception(exc) else: if task_status_future is not None and not task_status_future.done(): task_status_future.set_exception( RuntimeError("Child exited without calling task_status.started()") ) finally: if task in self.cancel_scope._tasks: self.cancel_scope._tasks.remove(task) del _task_states[task] def _spawn( self, func: Callable[..., Awaitable[Any]], args: tuple, name: object, task_status_future: asyncio.Future | None = None, ) -> asyncio.Task: def task_done(_task: asyncio.Task) -> None: # This is the code path for Python 3.8+ assert _task in self.cancel_scope._tasks self.cancel_scope._tasks.remove(_task) del _task_states[_task] try: exc = _task.exception() except CancelledError as e: while isinstance(e.__context__, CancelledError): e = e.__context__ exc = e if exc is not None: if task_status_future is None or task_status_future.done(): self._exceptions.append(exc) self.cancel_scope.cancel() else: task_status_future.set_exception(exc) elif task_status_future is not None and not task_status_future.done(): task_status_future.set_exception( RuntimeError("Child exited without calling task_status.started()") ) if not self._active: raise RuntimeError( "This task group is not active; no new tasks can be started." ) options: dict[str, Any] = {} name = get_callable_name(func) if name is None else str(name) if _native_task_names: options["name"] = name kwargs = {} if task_status_future: parent_id = id(current_task()) kwargs["task_status"] = _AsyncioTaskStatus( task_status_future, id(self.cancel_scope._host_task) ) else: parent_id = id(self.cancel_scope._host_task) coro = func(*args, **kwargs) if not asyncio.iscoroutine(coro): raise TypeError( f"Expected an async function, but {func} appears to be synchronous" ) foreign_coro = not hasattr(coro, "cr_frame") and not hasattr(coro, "gi_frame") if foreign_coro or sys.version_info < (3, 8): coro = self._run_wrapped_task(coro, task_status_future) task = create_task(coro, **options) if not foreign_coro and sys.version_info >= (3, 8): task.add_done_callback(task_done) # Make the spawned task inherit the task group's cancel scope _task_states[task] = TaskState( parent_id=parent_id, name=name, cancel_scope=self.cancel_scope ) self.cancel_scope._tasks.add(task) return task def start_soon( self, func: Callable[..., Awaitable[Any]], *args: object, name: object = None ) -> None: self._spawn(func, args, name) async def start( self, func: Callable[..., Awaitable[Any]], *args: object, name: object = None ) -> None: future: asyncio.Future = asyncio.Future() task = self._spawn(func, args, name, future) # If the task raises an exception after sending a start value without a switch point # between, the task group is cancelled and this method never proceeds to process the # completed future. That's why we have to have a shielded cancel scope here. with CancelScope(shield=True): try: return await future except CancelledError: task.cancel() raise # # Threads # _Retval_Queue_Type = Tuple[Optional[T_Retval], Optional[BaseException]] class WorkerThread(Thread): MAX_IDLE_TIME = 10 # seconds def __init__( self, root_task: asyncio.Task, workers: set[WorkerThread], idle_workers: deque[WorkerThread], ): super().__init__(name="AnyIO worker thread") self.root_task = root_task self.workers = workers self.idle_workers = idle_workers self.loop = root_task._loop self.queue: Queue[ tuple[Context, Callable, tuple, asyncio.Future] | None ] = Queue(2) self.idle_since = current_time() self.stopping = False def _report_result( self, future: asyncio.Future, result: Any, exc: BaseException | None ) -> None: self.idle_since = current_time() if not self.stopping: self.idle_workers.append(self) if not future.cancelled(): if exc is not None: if isinstance(exc, StopIteration): new_exc = RuntimeError("coroutine raised StopIteration") new_exc.__cause__ = exc exc = new_exc future.set_exception(exc) else: future.set_result(result) def run(self) -> None: with claim_worker_thread("asyncio"): threadlocals.loop = self.loop while True: item = self.queue.get() if item is None: # Shutdown command received return context, func, args, future = item if not future.cancelled(): result = None exception: BaseException | None = None try: result = context.run(func, *args) except BaseException as exc: exception = exc if not self.loop.is_closed(): self.loop.call_soon_threadsafe( self._report_result, future, result, exception ) self.queue.task_done() def stop(self, f: asyncio.Task | None = None) -> None: self.stopping = True self.queue.put_nowait(None) self.workers.discard(self) try: self.idle_workers.remove(self) except ValueError: pass _threadpool_idle_workers: RunVar[deque[WorkerThread]] = RunVar( "_threadpool_idle_workers" ) _threadpool_workers: RunVar[set[WorkerThread]] = RunVar("_threadpool_workers") async def run_sync_in_worker_thread( func: Callable[..., T_Retval], *args: object, cancellable: bool = False, limiter: CapacityLimiter | None = None, ) -> T_Retval: await checkpoint() # If this is the first run in this event loop thread, set up the necessary variables try: idle_workers = _threadpool_idle_workers.get() workers = _threadpool_workers.get() except LookupError: idle_workers = deque() workers = set() _threadpool_idle_workers.set(idle_workers) _threadpool_workers.set(workers) async with (limiter or current_default_thread_limiter()): with CancelScope(shield=not cancellable): future: asyncio.Future = asyncio.Future() root_task = find_root_task() if not idle_workers: worker = WorkerThread(root_task, workers, idle_workers) worker.start() workers.add(worker) root_task.add_done_callback(worker.stop) else: worker = idle_workers.pop() # Prune any other workers that have been idle for MAX_IDLE_TIME seconds or longer now = current_time() while idle_workers: if now - idle_workers[0].idle_since < WorkerThread.MAX_IDLE_TIME: break expired_worker = idle_workers.popleft() expired_worker.root_task.remove_done_callback(expired_worker.stop) expired_worker.stop() context = copy_context() context.run(sniffio.current_async_library_cvar.set, None) worker.queue.put_nowait((context, func, args, future)) return await future def run_sync_from_thread( func: Callable[..., T_Retval], *args: object, loop: asyncio.AbstractEventLoop | None = None, ) -> T_Retval: @wraps(func) def wrapper() -> None: try: f.set_result(func(*args)) except BaseException as exc: f.set_exception(exc) if not isinstance(exc, Exception): raise f: concurrent.futures.Future[T_Retval] = Future() loop = loop or threadlocals.loop loop.call_soon_threadsafe(wrapper) return f.result() def run_async_from_thread( func: Callable[..., Awaitable[T_Retval]], *args: object ) -> T_Retval: f: concurrent.futures.Future[T_Retval] = asyncio.run_coroutine_threadsafe( func(*args), threadlocals.loop ) return f.result() class BlockingPortal(abc.BlockingPortal): def __new__(cls) -> BlockingPortal: return object.__new__(cls) def __init__(self) -> None: super().__init__() self._loop = get_running_loop() def _spawn_task_from_thread( self, func: Callable, args: tuple, kwargs: dict[str, Any], name: object, future: Future, ) -> None: run_sync_from_thread( partial(self._task_group.start_soon, name=name), self._call_func, func, args, kwargs, future, loop=self._loop, ) # # Subprocesses # @dataclass(eq=False) class StreamReaderWrapper(abc.ByteReceiveStream): _stream: asyncio.StreamReader async def receive(self, max_bytes: int = 65536) -> bytes: data = await self._stream.read(max_bytes) if data: return data else: raise EndOfStream async def aclose(self) -> None: self._stream.feed_eof() @dataclass(eq=False) class StreamWriterWrapper(abc.ByteSendStream): _stream: asyncio.StreamWriter async def send(self, item: bytes) -> None: self._stream.write(item) await self._stream.drain() async def aclose(self) -> None: self._stream.close() @dataclass(eq=False) class Process(abc.Process): _process: asyncio.subprocess.Process _stdin: StreamWriterWrapper | None _stdout: StreamReaderWrapper | None _stderr: StreamReaderWrapper | None async def aclose(self) -> None: if self._stdin: await self._stdin.aclose() if self._stdout: await self._stdout.aclose() if self._stderr: await self._stderr.aclose() await self.wait() async def wait(self) -> int: return await self._process.wait() def terminate(self) -> None: self._process.terminate() def kill(self) -> None: self._process.kill() def send_signal(self, signal: int) -> None: self._process.send_signal(signal) @property def pid(self) -> int: return self._process.pid @property def returncode(self) -> int | None: return self._process.returncode @property def stdin(self) -> abc.ByteSendStream | None: return self._stdin @property def stdout(self) -> abc.ByteReceiveStream | None: return self._stdout @property def stderr(self) -> abc.ByteReceiveStream | None: return self._stderr async def open_process( command: str | bytes | Sequence[str | bytes], *, shell: bool, stdin: int | IO[Any] | None, stdout: int | IO[Any] | None, stderr: int | IO[Any] | None, cwd: str | bytes | PathLike | None = None, env: Mapping[str, str] | None = None, start_new_session: bool = False, ) -> Process: await checkpoint() if shell: process = await asyncio.create_subprocess_shell( cast(Union[str, bytes], command), stdin=stdin, stdout=stdout, stderr=stderr, cwd=cwd, env=env, start_new_session=start_new_session, ) else: process = await asyncio.create_subprocess_exec( *command, stdin=stdin, stdout=stdout, stderr=stderr, cwd=cwd, env=env, start_new_session=start_new_session, ) stdin_stream = StreamWriterWrapper(process.stdin) if process.stdin else None stdout_stream = StreamReaderWrapper(process.stdout) if process.stdout else None stderr_stream = StreamReaderWrapper(process.stderr) if process.stderr else None return Process(process, stdin_stream, stdout_stream, stderr_stream) def _forcibly_shutdown_process_pool_on_exit( workers: set[Process], _task: object ) -> None: """ Forcibly shuts down worker processes belonging to this event loop.""" child_watcher: asyncio.AbstractChildWatcher | None try: child_watcher = asyncio.get_event_loop_policy().get_child_watcher() except NotImplementedError: child_watcher = None # Close as much as possible (w/o async/await) to avoid warnings for process in workers: if process.returncode is None: continue process._stdin._stream._transport.close() # type: ignore[union-attr] process._stdout._stream._transport.close() # type: ignore[union-attr] process._stderr._stream._transport.close() # type: ignore[union-attr] process.kill() if child_watcher: child_watcher.remove_child_handler(process.pid) async def _shutdown_process_pool_on_exit(workers: set[Process]) -> None: """ Shuts down worker processes belonging to this event loop. NOTE: this only works when the event loop was started using asyncio.run() or anyio.run(). """ process: Process try: await sleep(math.inf) except asyncio.CancelledError: for process in workers: if process.returncode is None: process.kill() for process in workers: await process.aclose() def setup_process_pool_exit_at_shutdown(workers: set[Process]) -> None: kwargs: dict[str, Any] = ( {"name": "AnyIO process pool shutdown task"} if _native_task_names else {} ) create_task(_shutdown_process_pool_on_exit(workers), **kwargs) find_root_task().add_done_callback( partial(_forcibly_shutdown_process_pool_on_exit, workers) ) # # Sockets and networking # class StreamProtocol(asyncio.Protocol): read_queue: deque[bytes] read_event: asyncio.Event write_event: asyncio.Event exception: Exception | None = None def connection_made(self, transport: asyncio.BaseTransport) -> None: self.read_queue = deque() self.read_event = asyncio.Event() self.write_event = asyncio.Event() self.write_event.set() cast(asyncio.Transport, transport).set_write_buffer_limits(0) def connection_lost(self, exc: Exception | None) -> None: if exc: self.exception = BrokenResourceError() self.exception.__cause__ = exc self.read_event.set() self.write_event.set() def data_received(self, data: bytes) -> None: self.read_queue.append(data) self.read_event.set() def eof_received(self) -> bool | None: self.read_event.set() return True def pause_writing(self) -> None: self.write_event = asyncio.Event() def resume_writing(self) -> None: self.write_event.set() class DatagramProtocol(asyncio.DatagramProtocol): read_queue: deque[tuple[bytes, IPSockAddrType]] read_event: asyncio.Event write_event: asyncio.Event exception: Exception | None = None def connection_made(self, transport: asyncio.BaseTransport) -> None: self.read_queue = deque(maxlen=100) # arbitrary value self.read_event = asyncio.Event() self.write_event = asyncio.Event() self.write_event.set() def connection_lost(self, exc: Exception | None) -> None: self.read_event.set() self.write_event.set() def datagram_received(self, data: bytes, addr: IPSockAddrType) -> None: addr = convert_ipv6_sockaddr(addr) self.read_queue.append((data, addr)) self.read_event.set() def error_received(self, exc: Exception) -> None: self.exception = exc def pause_writing(self) -> None: self.write_event.clear() def resume_writing(self) -> None: self.write_event.set() class SocketStream(abc.SocketStream): def __init__(self, transport: asyncio.Transport, protocol: StreamProtocol): self._transport = transport self._protocol = protocol self._receive_guard = ResourceGuard("reading from") self._send_guard = ResourceGuard("writing to") self._closed = False @property def _raw_socket(self) -> socket.socket: return self._transport.get_extra_info("socket") async def receive(self, max_bytes: int = 65536) -> bytes: with self._receive_guard: await checkpoint() if ( not self._protocol.read_event.is_set() and not self._transport.is_closing() ): self._transport.resume_reading() await self._protocol.read_event.wait() self._transport.pause_reading() try: chunk = self._protocol.read_queue.popleft() except IndexError: if self._closed: raise ClosedResourceError from None elif self._protocol.exception: raise self._protocol.exception else: raise EndOfStream from None if len(chunk) > max_bytes: # Split the oversized chunk chunk, leftover = chunk[:max_bytes], chunk[max_bytes:] self._protocol.read_queue.appendleft(leftover) # If the read queue is empty, clear the flag so that the next call will block until # data is available if not self._protocol.read_queue: self._protocol.read_event.clear() return chunk async def send(self, item: bytes) -> None: with self._send_guard: await checkpoint() if self._closed: raise ClosedResourceError elif self._protocol.exception is not None: raise self._protocol.exception try: self._transport.write(item) except RuntimeError as exc: if self._transport.is_closing(): raise BrokenResourceError from exc else: raise await self._protocol.write_event.wait() async def send_eof(self) -> None: try: self._transport.write_eof() except OSError: pass async def aclose(self) -> None: if not self._transport.is_closing(): self._closed = True try: self._transport.write_eof() except OSError: pass self._transport.close() await sleep(0) self._transport.abort() class UNIXSocketStream(abc.SocketStream): _receive_future: asyncio.Future | None = None _send_future: asyncio.Future | None = None _closing = False def __init__(self, raw_socket: socket.socket): self.__raw_socket = raw_socket self._loop = get_running_loop() self._receive_guard = ResourceGuard("reading from") self._send_guard = ResourceGuard("writing to") @property def _raw_socket(self) -> socket.socket: return self.__raw_socket def _wait_until_readable(self, loop: asyncio.AbstractEventLoop) -> asyncio.Future: def callback(f: object) -> None: del self._receive_future loop.remove_reader(self.__raw_socket) f = self._receive_future = asyncio.Future() self._loop.add_reader(self.__raw_socket, f.set_result, None) f.add_done_callback(callback) return f def _wait_until_writable(self, loop: asyncio.AbstractEventLoop) -> asyncio.Future: def callback(f: object) -> None: del self._send_future loop.remove_writer(self.__raw_socket) f = self._send_future = asyncio.Future() self._loop.add_writer(self.__raw_socket, f.set_result, None) f.add_done_callback(callback) return f async def send_eof(self) -> None: with self._send_guard: self._raw_socket.shutdown(socket.SHUT_WR) async def receive(self, max_bytes: int = 65536) -> bytes: loop = get_running_loop() await checkpoint() with self._receive_guard: while True: try: data = self.__raw_socket.recv(max_bytes) except BlockingIOError: await self._wait_until_readable(loop) except OSError as exc: if self._closing: raise ClosedResourceError from None else: raise BrokenResourceError from exc else: if not data: raise EndOfStream return data async def send(self, item: bytes) -> None: loop = get_running_loop() await checkpoint() with self._send_guard: view = memoryview(item) while view: try: bytes_sent = self.__raw_socket.send(view) except BlockingIOError: await self._wait_until_writable(loop) except OSError as exc: if self._closing: raise ClosedResourceError from None else: raise BrokenResourceError from exc else: view = view[bytes_sent:] async def receive_fds(self, msglen: int, maxfds: int) -> tuple[bytes, list[int]]: if not isinstance(msglen, int) or msglen < 0: raise ValueError("msglen must be a non-negative integer") if not isinstance(maxfds, int) or maxfds < 1: raise ValueError("maxfds must be a positive integer") loop = get_running_loop() fds = array.array("i") await checkpoint() with self._receive_guard: while True: try: message, ancdata, flags, addr = self.__raw_socket.recvmsg( msglen, socket.CMSG_LEN(maxfds * fds.itemsize) ) except BlockingIOError: await self._wait_until_readable(loop) except OSError as exc: if self._closing: raise ClosedResourceError from None else: raise BrokenResourceError from exc else: if not message and not ancdata: raise EndOfStream break for cmsg_level, cmsg_type, cmsg_data in ancdata: if cmsg_level != socket.SOL_SOCKET or cmsg_type != socket.SCM_RIGHTS: raise RuntimeError( f"Received unexpected ancillary data; message = {message!r}, " f"cmsg_level = {cmsg_level}, cmsg_type = {cmsg_type}" ) fds.frombytes(cmsg_data[: len(cmsg_data) - (len(cmsg_data) % fds.itemsize)]) return message, list(fds) async def send_fds(self, message: bytes, fds: Collection[int | IOBase]) -> None: if not message: raise ValueError("message must not be empty") if not fds: raise ValueError("fds must not be empty") loop = get_running_loop() filenos: list[int] = [] for fd in fds: if isinstance(fd, int): filenos.append(fd) elif isinstance(fd, IOBase): filenos.append(fd.fileno()) fdarray = array.array("i", filenos) await checkpoint() with self._send_guard: while True: try: # The ignore can be removed after mypy picks up # https://github.com/python/typeshed/pull/5545 self.__raw_socket.sendmsg( [message], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, fdarray)] ) break except BlockingIOError: await self._wait_until_writable(loop) except OSError as exc: if self._closing: raise ClosedResourceError from None else: raise BrokenResourceError from exc async def aclose(self) -> None: if not self._closing: self._closing = True if self.__raw_socket.fileno() != -1: self.__raw_socket.close() if self._receive_future: self._receive_future.set_result(None) if self._send_future: self._send_future.set_result(None) class TCPSocketListener(abc.SocketListener): _accept_scope: CancelScope | None = None _closed = False def __init__(self, raw_socket: socket.socket): self.__raw_socket = raw_socket self._loop = cast(asyncio.BaseEventLoop, get_running_loop()) self._accept_guard = ResourceGuard("accepting connections from") @property def _raw_socket(self) -> socket.socket: return self.__raw_socket async def accept(self) -> abc.SocketStream: if self._closed: raise ClosedResourceError with self._accept_guard: await checkpoint() with CancelScope() as self._accept_scope: try: client_sock, _addr = await self._loop.sock_accept(self._raw_socket) except asyncio.CancelledError: # Workaround for https://bugs.python.org/issue41317 try: self._loop.remove_reader(self._raw_socket) except (ValueError, NotImplementedError): pass if self._closed: raise ClosedResourceError from None raise finally: self._accept_scope = None client_sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) transport, protocol = await self._loop.connect_accepted_socket( StreamProtocol, client_sock ) return SocketStream(transport, protocol) async def aclose(self) -> None: if self._closed: return self._closed = True if self._accept_scope: # Workaround for https://bugs.python.org/issue41317 try: self._loop.remove_reader(self._raw_socket) except (ValueError, NotImplementedError): pass self._accept_scope.cancel() await sleep(0) self._raw_socket.close() class UNIXSocketListener(abc.SocketListener): def __init__(self, raw_socket: socket.socket): self.__raw_socket = raw_socket self._loop = get_running_loop() self._accept_guard = ResourceGuard("accepting connections from") self._closed = False async def accept(self) -> abc.SocketStream: await checkpoint() with self._accept_guard: while True: try: client_sock, _ = self.__raw_socket.accept() client_sock.setblocking(False) return UNIXSocketStream(client_sock) except BlockingIOError: f: asyncio.Future = asyncio.Future() self._loop.add_reader(self.__raw_socket, f.set_result, None) f.add_done_callback( lambda _: self._loop.remove_reader(self.__raw_socket) ) await f except OSError as exc: if self._closed: raise ClosedResourceError from None else: raise BrokenResourceError from exc async def aclose(self) -> None: self._closed = True self.__raw_socket.close() @property def _raw_socket(self) -> socket.socket: return self.__raw_socket class UDPSocket(abc.UDPSocket): def __init__( self, transport: asyncio.DatagramTransport, protocol: DatagramProtocol ): self._transport = transport self._protocol = protocol self._receive_guard = ResourceGuard("reading from") self._send_guard = ResourceGuard("writing to") self._closed = False @property def _raw_socket(self) -> socket.socket: return self._transport.get_extra_info("socket") async def aclose(self) -> None: if not self._transport.is_closing(): self._closed = True self._transport.close() async def receive(self) -> tuple[bytes, IPSockAddrType]: with self._receive_guard: await checkpoint() # If the buffer is empty, ask for more data if not self._protocol.read_queue and not self._transport.is_closing(): self._protocol.read_event.clear() await self._protocol.read_event.wait() try: return self._protocol.read_queue.popleft() except IndexError: if self._closed: raise ClosedResourceError from None else: raise BrokenResourceError from None async def send(self, item: UDPPacketType) -> None: with self._send_guard: await checkpoint() await self._protocol.write_event.wait() if self._closed: raise ClosedResourceError elif self._transport.is_closing(): raise BrokenResourceError else: self._transport.sendto(*item) class ConnectedUDPSocket(abc.ConnectedUDPSocket): def __init__( self, transport: asyncio.DatagramTransport, protocol: DatagramProtocol ): self._transport = transport self._protocol = protocol self._receive_guard = ResourceGuard("reading from") self._send_guard = ResourceGuard("writing to") self._closed = False @property def _raw_socket(self) -> socket.socket: return self._transport.get_extra_info("socket") async def aclose(self) -> None: if not self._transport.is_closing(): self._closed = True self._transport.close() async def receive(self) -> bytes: with self._receive_guard: await checkpoint() # If the buffer is empty, ask for more data if not self._protocol.read_queue and not self._transport.is_closing(): self._protocol.read_event.clear() await self._protocol.read_event.wait() try: packet = self._protocol.read_queue.popleft() except IndexError: if self._closed: raise ClosedResourceError from None else: raise BrokenResourceError from None return packet[0] async def send(self, item: bytes) -> None: with self._send_guard: await checkpoint() await self._protocol.write_event.wait() if self._closed: raise ClosedResourceError elif self._transport.is_closing(): raise BrokenResourceError else: self._transport.sendto(item) async def connect_tcp( host: str, port: int, local_addr: tuple[str, int] | None = None ) -> SocketStream: transport, protocol = cast( Tuple[asyncio.Transport, StreamProtocol], await get_running_loop().create_connection( StreamProtocol, host, port, local_addr=local_addr ), ) transport.pause_reading() return SocketStream(transport, protocol) async def connect_unix(path: str) -> UNIXSocketStream: await checkpoint() loop = get_running_loop() raw_socket = socket.socket(socket.AF_UNIX) raw_socket.setblocking(False) while True: try: raw_socket.connect(path) except BlockingIOError: f: asyncio.Future = asyncio.Future() loop.add_writer(raw_socket, f.set_result, None) f.add_done_callback(lambda _: loop.remove_writer(raw_socket)) await f except BaseException: raw_socket.close() raise else: return UNIXSocketStream(raw_socket) async def create_udp_socket( family: socket.AddressFamily, local_address: IPSockAddrType | None, remote_address: IPSockAddrType | None, reuse_port: bool, ) -> UDPSocket | ConnectedUDPSocket: result = await get_running_loop().create_datagram_endpoint( DatagramProtocol, local_addr=local_address, remote_addr=remote_address, family=family, reuse_port=reuse_port, ) transport = result[0] protocol = result[1] if protocol.exception: transport.close() raise protocol.exception if not remote_address: return UDPSocket(transport, protocol) else: return ConnectedUDPSocket(transport, protocol) async def getaddrinfo( host: bytes | str, port: str | int | None, *, family: int | AddressFamily = 0, type: int | SocketKind = 0, proto: int = 0, flags: int = 0, ) -> GetAddrInfoReturnType: # https://github.com/python/typeshed/pull/4304 result = await get_running_loop().getaddrinfo( host, port, family=family, type=type, proto=proto, flags=flags ) return cast(GetAddrInfoReturnType, result) async def getnameinfo(sockaddr: IPSockAddrType, flags: int = 0) -> tuple[str, str]: return await get_running_loop().getnameinfo(sockaddr, flags) _read_events: RunVar[dict[Any, asyncio.Event]] = RunVar("read_events") _write_events: RunVar[dict[Any, asyncio.Event]] = RunVar("write_events") async def wait_socket_readable(sock: socket.socket) -> None: await checkpoint() try: read_events = _read_events.get() except LookupError: read_events = {} _read_events.set(read_events) if read_events.get(sock): raise BusyResourceError("reading from") from None loop = get_running_loop() event = read_events[sock] = asyncio.Event() loop.add_reader(sock, event.set) try: await event.wait() finally: if read_events.pop(sock, None) is not None: loop.remove_reader(sock) readable = True else: readable = False if not readable: raise ClosedResourceError async def wait_socket_writable(sock: socket.socket) -> None: await checkpoint() try: write_events = _write_events.get() except LookupError: write_events = {} _write_events.set(write_events) if write_events.get(sock): raise BusyResourceError("writing to") from None loop = get_running_loop() event = write_events[sock] = asyncio.Event() loop.add_writer(sock.fileno(), event.set) try: await event.wait() finally: if write_events.pop(sock, None) is not None: loop.remove_writer(sock) writable = True else: writable = False if not writable: raise ClosedResourceError # # Synchronization # class Event(BaseEvent): def __new__(cls) -> Event: return object.__new__(cls) def __init__(self) -> None: self._event = asyncio.Event() def set(self) -> DeprecatedAwaitable: self._event.set() return DeprecatedAwaitable(self.set) def is_set(self) -> bool: return self._event.is_set() async def wait(self) -> None: if await self._event.wait(): await checkpoint() def statistics(self) -> EventStatistics: return EventStatistics(len(self._event._waiters)) # type: ignore[attr-defined] class CapacityLimiter(BaseCapacityLimiter): _total_tokens: float = 0 def __new__(cls, total_tokens: float) -> CapacityLimiter: return object.__new__(cls) def __init__(self, total_tokens: float): self._borrowers: set[Any] = set() self._wait_queue: OrderedDict[Any, asyncio.Event] = OrderedDict() self.total_tokens = total_tokens async def __aenter__(self) -> None: await self.acquire() async def __aexit__( self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None, ) -> None: self.release() @property def total_tokens(self) -> float: return self._total_tokens @total_tokens.setter def total_tokens(self, value: float) -> None: if not isinstance(value, int) and not math.isinf(value): raise TypeError("total_tokens must be an int or math.inf") if value < 1: raise ValueError("total_tokens must be >= 1") old_value = self._total_tokens self._total_tokens = value events = [] for event in self._wait_queue.values(): if value <= old_value: break if not event.is_set(): events.append(event) old_value += 1 for event in events: event.set() @property def borrowed_tokens(self) -> int: return len(self._borrowers) @property def available_tokens(self) -> float: return self._total_tokens - len(self._borrowers) def acquire_nowait(self) -> DeprecatedAwaitable: self.acquire_on_behalf_of_nowait(current_task()) return DeprecatedAwaitable(self.acquire_nowait) def acquire_on_behalf_of_nowait(self, borrower: object) -> DeprecatedAwaitable: if borrower in self._borrowers: raise RuntimeError( "this borrower is already holding one of this CapacityLimiter's " "tokens" ) if self._wait_queue or len(self._borrowers) >= self._total_tokens: raise WouldBlock self._borrowers.add(borrower) return DeprecatedAwaitable(self.acquire_on_behalf_of_nowait) async def acquire(self) -> None: return await self.acquire_on_behalf_of(current_task()) async def acquire_on_behalf_of(self, borrower: object) -> None: await checkpoint_if_cancelled() try: self.acquire_on_behalf_of_nowait(borrower) except WouldBlock: event = asyncio.Event() self._wait_queue[borrower] = event try: await event.wait() except BaseException: self._wait_queue.pop(borrower, None) raise self._borrowers.add(borrower) else: try: await cancel_shielded_checkpoint() except BaseException: self.release() raise def release(self) -> None: self.release_on_behalf_of(current_task()) def release_on_behalf_of(self, borrower: object) -> None: try: self._borrowers.remove(borrower) except KeyError: raise RuntimeError( "this borrower isn't holding any of this CapacityLimiter's " "tokens" ) from None # Notify the next task in line if this limiter has free capacity now if self._wait_queue and len(self._borrowers) < self._total_tokens: event = self._wait_queue.popitem(last=False)[1] event.set() def statistics(self) -> CapacityLimiterStatistics: return CapacityLimiterStatistics( self.borrowed_tokens, self.total_tokens, tuple(self._borrowers), len(self._wait_queue), ) _default_thread_limiter: RunVar[CapacityLimiter] = RunVar("_default_thread_limiter") def current_default_thread_limiter() -> CapacityLimiter: try: return _default_thread_limiter.get() except LookupError: limiter = CapacityLimiter(40) _default_thread_limiter.set(limiter) return limiter # # Operating system signals # class _SignalReceiver(DeprecatedAsyncContextManager["_SignalReceiver"]): def __init__(self, signals: tuple[int, ...]): self._signals = signals self._loop = get_running_loop() self._signal_queue: deque[int] = deque() self._future: asyncio.Future = asyncio.Future() self._handled_signals: set[int] = set() def _deliver(self, signum: int) -> None: self._signal_queue.append(signum) if not self._future.done(): self._future.set_result(None) def __enter__(self) -> _SignalReceiver: for sig in set(self._signals): self._loop.add_signal_handler(sig, self._deliver, sig) self._handled_signals.add(sig) return self def __exit__( self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None, ) -> bool | None: for sig in self._handled_signals: self._loop.remove_signal_handler(sig) return None def __aiter__(self) -> _SignalReceiver: return self async def __anext__(self) -> int: await checkpoint() if not self._signal_queue: self._future = asyncio.Future() await self._future return self._signal_queue.popleft() def open_signal_receiver(*signals: int) -> _SignalReceiver: return _SignalReceiver(signals) # # Testing and debugging # def _create_task_info(task: asyncio.Task) -> TaskInfo: task_state = _task_states.get(task) if task_state is None: name = task.get_name() if _native_task_names else None parent_id = None else: name = task_state.name parent_id = task_state.parent_id return TaskInfo(id(task), parent_id, name, get_coro(task)) def get_current_task() -> TaskInfo: return _create_task_info(current_task()) # type: ignore[arg-type] def get_running_tasks() -> list[TaskInfo]: return [_create_task_info(task) for task in all_tasks() if not task.done()] async def wait_all_tasks_blocked() -> None: await checkpoint() this_task = current_task() while True: for task in all_tasks(): if task is this_task: continue if task._fut_waiter is None or task._fut_waiter.done(): # type: ignore[attr-defined] await sleep(0.1) break else: return class TestRunner(abc.TestRunner): def __init__( self, debug: bool = False, use_uvloop: bool = False, policy: asyncio.AbstractEventLoopPolicy | None = None, ): self._exceptions: list[BaseException] = [] _maybe_set_event_loop_policy(policy, use_uvloop) self._loop = asyncio.new_event_loop() self._loop.set_debug(debug) self._loop.set_exception_handler(self._exception_handler) asyncio.set_event_loop(self._loop) def _cancel_all_tasks(self) -> None: to_cancel = all_tasks(self._loop) if not to_cancel: return for task in to_cancel: task.cancel() self._loop.run_until_complete( asyncio.gather(*to_cancel, return_exceptions=True) ) for task in to_cancel: if task.cancelled(): continue if task.exception() is not None: raise cast(BaseException, task.exception()) def _exception_handler( self, loop: asyncio.AbstractEventLoop, context: dict[str, Any] ) -> None: if isinstance(context.get("exception"), Exception): self._exceptions.append(context["exception"]) else: loop.default_exception_handler(context) def _raise_async_exceptions(self) -> None: # Re-raise any exceptions raised in asynchronous callbacks if self._exceptions: exceptions, self._exceptions = self._exceptions, [] if len(exceptions) == 1: raise exceptions[0] elif exceptions: raise ExceptionGroup(exceptions) def close(self) -> None: try: self._cancel_all_tasks() self._loop.run_until_complete(self._loop.shutdown_asyncgens()) finally: asyncio.set_event_loop(None) self._loop.close() def run_asyncgen_fixture( self, fixture_func: Callable[..., AsyncGenerator[T_Retval, Any]], kwargs: dict[str, Any], ) -> Iterable[T_Retval]: async def fixture_runner() -> None: agen = fixture_func(**kwargs) try: retval = await agen.asend(None) self._raise_async_exceptions() except BaseException as exc: f.set_exception(exc) return else: f.set_result(retval) await event.wait() try: await agen.asend(None) except StopAsyncIteration: pass else: await agen.aclose() raise RuntimeError("Async generator fixture did not stop") f = self._loop.create_future() event = asyncio.Event() fixture_task = self._loop.create_task(fixture_runner()) self._loop.run_until_complete(f) yield f.result() event.set() self._loop.run_until_complete(fixture_task) self._raise_async_exceptions() def run_fixture( self, fixture_func: Callable[..., Coroutine[Any, Any, T_Retval]], kwargs: dict[str, Any], ) -> T_Retval: retval = self._loop.run_until_complete(fixture_func(**kwargs)) self._raise_async_exceptions() return retval def run_test( self, test_func: Callable[..., Coroutine[Any, Any, Any]], kwargs: dict[str, Any] ) -> None: try: self._loop.run_until_complete(test_func(**kwargs)) except Exception as exc: self._exceptions.append(exc) self._raise_async_exceptions()
67,056
Python
30.660529
97
0.577711
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/fastapi/exceptions.py
from typing import Any, Dict, Optional, Sequence, Type from pydantic import BaseModel, ValidationError, create_model from pydantic.error_wrappers import ErrorList from starlette.exceptions import HTTPException as StarletteHTTPException from starlette.exceptions import WebSocketException as WebSocketException # noqa: F401 class HTTPException(StarletteHTTPException): def __init__( self, status_code: int, detail: Any = None, headers: Optional[Dict[str, Any]] = None, ) -> None: super().__init__(status_code=status_code, detail=detail, headers=headers) RequestErrorModel: Type[BaseModel] = create_model("Request") WebSocketErrorModel: Type[BaseModel] = create_model("WebSocket") class FastAPIError(RuntimeError): """ A generic, FastAPI-specific error. """ class RequestValidationError(ValidationError): def __init__(self, errors: Sequence[ErrorList], *, body: Any = None) -> None: self.body = body super().__init__(errors, RequestErrorModel) class WebSocketRequestValidationError(ValidationError): def __init__(self, errors: Sequence[ErrorList]) -> None: super().__init__(errors, WebSocketErrorModel)
1,205
Python
30.736841
87
0.711203
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/fastapi/datastructures.py
from typing import Any, Callable, Dict, Iterable, Type, TypeVar from starlette.datastructures import URL as URL # noqa: F401 from starlette.datastructures import Address as Address # noqa: F401 from starlette.datastructures import FormData as FormData # noqa: F401 from starlette.datastructures import Headers as Headers # noqa: F401 from starlette.datastructures import QueryParams as QueryParams # noqa: F401 from starlette.datastructures import State as State # noqa: F401 from starlette.datastructures import UploadFile as StarletteUploadFile class UploadFile(StarletteUploadFile): @classmethod def __get_validators__(cls: Type["UploadFile"]) -> Iterable[Callable[..., Any]]: yield cls.validate @classmethod def validate(cls: Type["UploadFile"], v: Any) -> Any: if not isinstance(v, StarletteUploadFile): raise ValueError(f"Expected UploadFile, received: {type(v)}") return v @classmethod def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: field_schema.update({"type": "string", "format": "binary"}) class DefaultPlaceholder: """ You shouldn't use this class directly. It's used internally to recognize when a default value has been overwritten, even if the overridden default value was truthy. """ def __init__(self, value: Any): self.value = value def __bool__(self) -> bool: return bool(self.value) def __eq__(self, o: object) -> bool: return isinstance(o, DefaultPlaceholder) and o.value == self.value DefaultType = TypeVar("DefaultType") def Default(value: DefaultType) -> DefaultType: """ You shouldn't use this function directly. It's used internally to recognize when a default value has been overwritten, even if the overridden default value was truthy. """ return DefaultPlaceholder(value) # type: ignore
1,905
Python
32.438596
85
0.700262
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/fastapi/responses.py
from typing import Any from starlette.responses import FileResponse as FileResponse # noqa from starlette.responses import HTMLResponse as HTMLResponse # noqa from starlette.responses import JSONResponse as JSONResponse # noqa from starlette.responses import PlainTextResponse as PlainTextResponse # noqa from starlette.responses import RedirectResponse as RedirectResponse # noqa from starlette.responses import Response as Response # noqa from starlette.responses import StreamingResponse as StreamingResponse # noqa try: import ujson except ImportError: # pragma: nocover ujson = None # type: ignore try: import orjson except ImportError: # pragma: nocover orjson = None # type: ignore class UJSONResponse(JSONResponse): def render(self, content: Any) -> bytes: assert ujson is not None, "ujson must be installed to use UJSONResponse" return ujson.dumps(content, ensure_ascii=False).encode("utf-8") class ORJSONResponse(JSONResponse): media_type = "application/json" def render(self, content: Any) -> bytes: assert orjson is not None, "orjson must be installed to use ORJSONResponse" return orjson.dumps( content, option=orjson.OPT_NON_STR_KEYS | orjson.OPT_SERIALIZE_NUMPY )
1,279
Python
33.594594
83
0.745895
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/fastapi/background.py
from starlette.background import BackgroundTasks as BackgroundTasks # noqa
76
Python
37.499981
75
0.855263