code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
---|---|---|---|---|---|
__doc__ = """
SCons compatibility package for old Python versions
This subpackage holds modules that provide backwards-compatible
implementations of various things that we'd like to use in SCons but which
only show up in later versions of Python than the early, old version(s)
we still support.
Other code will not generally reference things in this package through
the SCons.compat namespace. The modules included here add things to
the builtins namespace or the global module list so that the rest
of our code can use the objects and names imported here regardless of
Python version.
Simply enough, things that go in the builtins name space come from
our _scons_builtins module.
The rest of the things here will be in individual compatibility modules
that are either: 1) suitably modified copies of the future modules that
we want to use; or 2) backwards compatible re-implementations of the
specific portions of a future module's API that we want to use.
GENERAL WARNINGS: Implementations of functions in the SCons.compat
modules are *NOT* guaranteed to be fully compliant with these functions in
later versions of Python. We are only concerned with adding functionality
that we actually use in SCons, so be wary if you lift this code for
other uses. (That said, making these more nearly the same as later,
official versions is still a desirable goal, we just don't need to be
obsessive about it.)
We name the compatibility modules with an initial '_scons_' (for example,
_scons_subprocess.py is our compatibility module for subprocess) so
that we can still try to import the real module name and fall back to
our compatibility module if we get an ImportError. The import_as()
function defined below loads the module as the "real" name (without the
'_scons'), after which all of the "import {module}" statements in the
rest of our code will find our pre-loaded compatibility module.
"""
__revision__ = "src/engine/SCons/compat/__init__.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
import os
import sys
import imp # Use the "imp" module to protect imports from fixers.
def import_as(module, name):
"""
Imports the specified module (from our local directory) as the
specified name, returning the loaded module object.
"""
dir = os.path.split(__file__)[0]
return imp.load_module(name, *imp.find_module(module, [dir]))
def rename_module(new, old):
"""
Attempts to import the old module and load it under the new name.
Used for purely cosmetic name changes in Python 3.x.
"""
try:
sys.modules[new] = imp.load_module(old, *imp.find_module(old))
return True
except ImportError:
return False
rename_module('builtins', '__builtin__')
import _scons_builtins
try:
import hashlib
except ImportError:
# Pre-2.5 Python has no hashlib module.
try:
import_as('_scons_hashlib', 'hashlib')
except ImportError:
# If we failed importing our compatibility module, it probably
# means this version of Python has no md5 module. Don't do
# anything and let the higher layer discover this fact, so it
# can fall back to using timestamp.
pass
try:
set
except NameError:
# Pre-2.4 Python has no native set type
import_as('_scons_sets', 'sets')
import builtins, sets
builtins.set = sets.Set
try:
import collections
except ImportError:
# Pre-2.4 Python has no collections module.
import_as('_scons_collections', 'collections')
else:
try:
collections.UserDict
except AttributeError:
exec('from UserDict import UserDict as _UserDict')
collections.UserDict = _UserDict
del _UserDict
try:
collections.UserList
except AttributeError:
exec('from UserList import UserList as _UserList')
collections.UserList = _UserList
del _UserList
try:
collections.UserString
except AttributeError:
exec('from UserString import UserString as _UserString')
collections.UserString = _UserString
del _UserString
try:
import io
except ImportError:
# Pre-2.6 Python has no io module.
import_as('_scons_io', 'io')
try:
os.devnull
except AttributeError:
# Pre-2.4 Python has no os.devnull attribute
_names = sys.builtin_module_names
if 'posix' in _names:
os.devnull = '/dev/null'
elif 'nt' in _names:
os.devnull = 'nul'
os.path.devnull = os.devnull
try:
os.path.lexists
except AttributeError:
# Pre-2.4 Python has no os.path.lexists function
def lexists(path):
return os.path.exists(path) or os.path.islink(path)
os.path.lexists = lexists
# When we're using the '-3' option during regression tests, importing
# cPickle gives a warning no matter how it's done, so always use the
# real profile module, whether it's fast or not.
if os.environ.get('SCONS_HORRIBLE_REGRESSION_TEST_HACK') is None:
# Not a regression test with '-3', so try to use faster version.
# In 3.x, 'pickle' automatically loads the fast version if available.
rename_module('pickle', 'cPickle')
# In 3.x, 'profile' automatically loads the fast version if available.
rename_module('profile', 'cProfile')
# Before Python 3.0, the 'queue' module was named 'Queue'.
rename_module('queue', 'Queue')
# Before Python 3.0, the 'winreg' module was named '_winreg'
rename_module('winreg', '_winreg')
try:
import subprocess
except ImportError:
# Pre-2.4 Python has no subprocess module.
import_as('_scons_subprocess', 'subprocess')
try:
sys.intern
except AttributeError:
# Pre-2.6 Python has no sys.intern() function.
import builtins
try:
sys.intern = builtins.intern
except AttributeError:
# Pre-2.x Python has no builtin intern() function.
def intern(x):
return x
sys.intern = intern
del intern
try:
sys.maxsize
except AttributeError:
# Pre-2.6 Python has no sys.maxsize attribute
# Wrapping sys in () is silly, but protects it from 2to3 renames fixer
sys.maxsize = (sys).maxint
if os.environ.get('SCONS_HORRIBLE_REGRESSION_TEST_HACK') is not None:
# We can't apply the 'callable' fixer until the floor is 2.6, but the
# '-3' option to Python 2.6 and 2.7 generates almost ten thousand
# warnings. This hack allows us to run regression tests with the '-3'
# option by replacing the callable() built-in function with a hack
# that performs the same function but doesn't generate the warning.
# Note that this hack is ONLY intended to be used for regression
# testing, and should NEVER be used for real runs.
from types import ClassType
def callable(obj):
if hasattr(obj, '__call__'): return True
if isinstance(obj, (ClassType, type)): return True
return False
import builtins
builtins.callable = callable
del callable
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
/sc0ns-2.2.0-1.zip/sc0ns-2.2.0-1/SCons/compat/__init__.py
| 0.557002 | 0.346237 |
__init__.py
|
pypi
|
__revision__ = "src/engine/SCons/Scanner/Dir.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
import SCons.Node.FS
import SCons.Scanner
def only_dirs(nodes):
is_Dir = lambda n: isinstance(n.disambiguate(), SCons.Node.FS.Dir)
return list(filter(is_Dir, nodes))
def DirScanner(**kw):
"""Return a prototype Scanner instance for scanning
directories for on-disk files"""
kw['node_factory'] = SCons.Node.FS.Entry
kw['recursive'] = only_dirs
return SCons.Scanner.Base(scan_on_disk, "DirScanner", **kw)
def DirEntryScanner(**kw):
"""Return a prototype Scanner instance for "scanning"
directory Nodes for their in-memory entries"""
kw['node_factory'] = SCons.Node.FS.Entry
kw['recursive'] = None
return SCons.Scanner.Base(scan_in_memory, "DirEntryScanner", **kw)
skip_entry = {}
skip_entry_list = [
'.',
'..',
'.sconsign',
# Used by the native dblite.py module.
'.sconsign.dblite',
# Used by dbm and dumbdbm.
'.sconsign.dir',
# Used by dbm.
'.sconsign.pag',
# Used by dumbdbm.
'.sconsign.dat',
'.sconsign.bak',
# Used by some dbm emulations using Berkeley DB.
'.sconsign.db',
]
for skip in skip_entry_list:
skip_entry[skip] = 1
skip_entry[SCons.Node.FS._my_normcase(skip)] = 1
do_not_scan = lambda k: k not in skip_entry
def scan_on_disk(node, env, path=()):
"""
Scans a directory for on-disk files and directories therein.
Looking up the entries will add these to the in-memory Node tree
representation of the file system, so all we have to do is just
that and then call the in-memory scanning function.
"""
try:
flist = node.fs.listdir(node.abspath)
except (IOError, OSError):
return []
e = node.Entry
for f in filter(do_not_scan, flist):
# Add ./ to the beginning of the file name so if it begins with a
# '#' we don't look it up relative to the top-level directory.
e('./' + f)
return scan_in_memory(node, env, path)
def scan_in_memory(node, env, path=()):
"""
"Scans" a Node.FS.Dir for its in-memory entries.
"""
try:
entries = node.entries
except AttributeError:
# It's not a Node.FS.Dir (or doesn't look enough like one for
# our purposes), which can happen if a target list containing
# mixed Node types (Dirs and Files, for example) has a Dir as
# the first entry.
return []
entry_list = sorted(filter(do_not_scan, list(entries.keys())))
return [entries[n] for n in entry_list]
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
/sc0ns-2.2.0-1.zip/sc0ns-2.2.0-1/SCons/Scanner/Dir.py
| 0.607314 | 0.182845 |
Dir.py
|
pypi
|
# $Id: sc14n.py $
# $Date: 2019-12-28 20:53:00 $
# ************************** LICENSE *****************************************
# Copyright (C) 2017-19 David Ireland, DI Management Services Pty Limited.
# <www.di-mgt.com.au> <www.cryptosys.net>
# This code is provided 'as-is' without any express or implied warranty.
# Free license is hereby granted to use this code as part of an application
# provided this license notice is left intact. You are *not* licensed to
# share any of this code in any form of mass distribution, including, but not
# limited to, reposting on other web sites or in any source code repository.
# ****************************************************************************
# Requires `Sc14n` to be installed on your system,
# available from <https://cryptosys.net/sc14n/>.
from ctypes import windll, create_string_buffer, c_char_p, c_int
__version__ = "2.1.1"
# Version 2.1.1 is version 2.1.0 converted from Python 2 to Python 3.
# OUR EXPORTED CLASSES
__all__ = (
'C14n',
'Tran', 'TranMethod', 'DigAlg', 'AdvOptions',
'Gen', 'Err',
'Error',
)
# Our global DLL object
_didll = windll.diSc14n
def _isanint(v):
try: v = int(v)
except: pass
return isinstance(v, int)
class Error(Exception):
"""Raised when a call to a core library function returns an error,
or some obviously wrong parameter is detected."""
# Google Python Style Guide: "The base exception for a module should be called Error."
def __init__(self, value):
"""."""
self.value = value
def __str__(self):
"""Behave differently if value is an integer or not."""
if _isanint(self.value):
n = int(self.value)
s1 = "ERROR CODE %d: %s" % (n, Err.error_lookup(n))
else:
s1 = "ERROR: %s" % self.value
se = Err.last_error()
return "%s%s" % (s1, ": " + se if se else "")
class Gen:
"""General info about the core library DLL."""
@staticmethod
def version():
"""Return the release version of the core library DLL as an integer value."""
return _didll.SC14N_Gen_Version()
@staticmethod
def compile_time():
"""Return date and time the core library DLL was last compiled."""
nchars = _didll.SC14N_Gen_CompileTime(None, 0)
buf = create_string_buffer(nchars + 1)
nchars = _didll.SC14N_Gen_CompileTime(buf, nchars)
return buf.value.decode()
@staticmethod
def module_name():
"""Return full path name of the current process's core library DLL."""
nchars = _didll.SC14N_Gen_ModuleName(None, 0, 0)
buf = create_string_buffer(nchars + 1)
nchars = _didll.SC14N_Gen_ModuleName(buf, nchars, 0)
return buf.value.decode()
@staticmethod
def core_platform():
"""Return the platform of the core library DLL: ``Win32`` or ``Win64``."""
nchars = _didll.SC14N_Gen_Platform(None, 0)
buf = create_string_buffer(nchars + 1)
nchars = _didll.SC14N_Gen_Platform(buf, nchars)
return buf.value.decode()[:nchars]
@staticmethod
def licence_type():
"""Return licence type: ``D`` = Developer ``T`` = Trial."""
n = _didll.SC14N_Gen_LicenceType()
return chr(n)
class Err():
"""Details of errors returned by the core library."""
@staticmethod
def last_error():
"""Return the last error message set by the toolkit, if any."""
nchars = _didll.SC14N_Err_LastError(None, 0)
buf = create_string_buffer(nchars + 1)
nchars = _didll.SC14N_Err_LastError(buf, nchars)
return buf.value.decode()
@staticmethod
def error_lookup(n):
"""Return a description of error code ``n``."""
nchars = _didll.SC14N_Err_ErrorLookup(None, 0, c_int(n))
buf = create_string_buffer(nchars + 1)
nchars = _didll.SC14N_Err_ErrorLookup(buf, nchars, c_int(n))
return buf.value.decode()
class DigAlg:
"""Message digest algorithms."""
DEFAULT = 0 #: Use default digest algorithm.
SHA1 = 0x0 #: Use SHA-1 digest (default)
SHA256 = 0x2000 #: Use SHA-256 digest
class Tran:
"""Transformation options.
**See also:** remarks for :py:func:`C14n.file2file`.
"""
ENTIRE = 0 #: Transform the entire document.
OMITBYTAG = 0x01 #: Omit (exclude) the element with the given tag name.
SUBSETBYTAG = 0x02 #: Transform the subset with the given tag name.
OMITBYID = 0x11 #: Omit (exclude) the element with the given Id.
SUBSETBYID = 0x12 #: Transform the subset with the given Id.
class TranMethod:
"""Transformation methods."""
INCLUSIVE = 0 #: Inclusive c14n without comments from RFC 3076 (default).
EXCLUSIVE = 0x100 #: Exclusive c14n without comments from RFC 3741.
INCLUSIVE_WITHCOMMENTS = 0x800 #: Inclusive C14N with comments from RFC 3076.
EXCLUSIVE_WITHCOMMENTS = 0x900 #: Exclusive C14N with comments from RFC 3741.
class AdvOptions:
"""Advanced option flags."""
DEFAULT = 0 #: Use default options.
FLATTEN = 0x10000 #: Flatten the XML - remove all ignorable whitespace between tags.
class C14n:
"""Perform C14N transformation of XML document."""
@staticmethod
def file2file(outfile, xmlfile, nameorid="", tranopt=Tran.ENTIRE, tranmethod=TranMethod.INCLUSIVE, exclparams="", advopts=AdvOptions.DEFAULT):
"""Perform C14N transformation of XML document (file-to-file).
Args:
outfile (str): Name of output file to create.
xmlfile (str): Name of input XML file.
nameorid (str): To specify the tag name or Id.
tranopt (Tran): Transformation option.
tranmethod (TranMethod): Transformation method.
exclparams (str): InclusiveNamespaces PrefixList parameter for exclusive c14n.
advopts (AdvOptions): Advanced option flags.
Returns:
bool: True if successful, False otherwise.
Remarks:
Use the ``nameorid`` parameter to specify the element of the XML document to include or exclude.
With options :py:const:`Tran.OMITBYTAG` or :py:const:`Tran.SUBSETBYTAG`, ``nameorid`` specifies the element's tag name.
* By default, the first element with a matching tag name will be chosen.
* To specify the Nth element, write as ``tagname[N]`` where ``N=1,2,3,...``
With options :py:const:`Tran.OMITBYID` or :py:const:`Tran.SUBSETBYID`, ``nameorid`` specifies the element's Id.
* The default Id attribute name is ``Id``, so the argument ``myvalue`` will find the element with attribute ``Id="myvalue"``.
* To use a different attribute name - for example ``ID`` - write in the form ``ID=myvalue`` with no quotes.
Exactly one element will be excluded or included.
Tag names and Id values are case sensitive.
It is an error (`NO_DATA_ERROR`) if no matching element is found.
Examples:
>>> # Example 1. Excludes the first element with the tag name <Signature>
>>> r = C14n.file2file("c14nfile1.txt", "input.xml", "Signature", Tran.OMITBYTAG)
True
>>> # Example 2. Finds and transforms the first element with the tag name <SignedInfo>
>>> r = C14n.file2file("c14nfile2.txt", "input.xml", "SignedInfo", Tran.SUBSETBYTAG)
True
>>> # Example 3. Finds and transforms the third element with the tag name <Data>
>>> r = C14n.file2file("c14nfile3.txt", "input.xml", "Data[3]", Tran.SUBSETBYTAG)
True
>>> # Example 4. Finds and transforms the element with attribute Id="foo"
>>> r = C14n.file2file("c14nfile4.txt", "input.xml", "foo", Tran.SUBSETBYID)
True
>>> # Example 5. Finds and transforms the element with attribute ID="bar"
>>> r = C14n.file2file("c14nfile5.txt", "input.xml", "ID=bar", Tran.SUBSETBYID)
True
>>> # Example 6. Excludes element with attribute Id="thesig"
>>> r = C14n.file2file("c14nfile6.txt", "input.xml", "thesig", Tran.OMITBYID)
True
"""
opts = int(tranopt) + int(tranmethod) + int(advopts)
n = _didll.C14N_File2File(outfile.encode(), xmlfile.encode(), nameorid.encode(), exclparams.encode(), opts)
if (n != 0): raise Error(n)
return (n == 0)
@staticmethod
def file2string(xmlfile, nameorid="", tranopt=Tran.ENTIRE, tranmethod=TranMethod.INCLUSIVE, exclparams="", advopts=AdvOptions.DEFAULT):
"""Perform C14N transformation of XML document (file-to-string).
Args:
xmlfile (str): Name of input XML file.
nameorid (str): To specify the tag name or Id. See remarks for :py:func:`C14n.file2file`.
tranopt (Tran): Transformation option.
tranmethod (TranMethod): Transformation method.
exclparams (str): InclusiveNamespaces PrefixList parameter for exclusive c14n.
advopts (AdvOptions): Advanced option flags.
Returns:
str: UTF-8-encoded string.
"""
opts = int(tranopt) + int(tranmethod) + int(advopts)
nc = _didll.C14N_File2String(None, 0, xmlfile.encode(), nameorid.encode(), exclparams.encode(), opts)
if (nc < 0): raise Error(-nc)
if (nc == 0): return ""
buf = create_string_buffer(nc + 1)
nc = _didll.C14N_File2String(buf, nc, xmlfile.encode(), nameorid.encode(), exclparams.encode(), opts)
return buf.value.decode('utf-8')
@staticmethod
def file2digest(xmlfile, nameorid="", tranopt=Tran.ENTIRE, digalg=0, tranmethod=TranMethod.INCLUSIVE, exclparams="", advopts=AdvOptions.DEFAULT):
"""Compute digest value of C14N transformation of XML document (file-to-digest).
Args:
xmlfile (str): Name of input XML file.
nameorid (str): To specify the tag name or Id. See remarks for :py:func:`C14n.file2file`.
tranopt (Tran): Transformation option.
digalg (DigAlg): Digest algorithm.
tranmethod (TranMethod): Transformation method.
exclparams (str): InclusiveNamespaces PrefixList parameter for exclusive c14n.
advopts (AdvOptions): Advanced option flags.
Returns:
str: Message digest in base64-encoded string.
"""
opts = int(tranopt) + int(tranmethod) + int(digalg) + int(advopts) # Unexpected type warning?
nc = _didll.C14N_File2Digest(None, 0, xmlfile.encode(), nameorid.encode(), exclparams.encode(), opts)
if (nc < 0): raise Error(-nc)
if (nc == 0): return ""
buf = create_string_buffer(nc + 1)
nc = _didll.C14N_File2Digest(buf, nc, xmlfile.encode(), nameorid.encode(), exclparams.encode(), opts)
return buf.value.decode()
@staticmethod
def string2string(xmldata, nameorid="", tranopt=Tran.ENTIRE, tranmethod=TranMethod.INCLUSIVE, exclparams="", advopts=AdvOptions.DEFAULT):
"""Perform C14N transformation of XML document (string-to-string).
Args:
xmldata (str): XML data to be processed.
nameorid (str): To specify the tag name or Id. See remarks for :py:func:`C14n.file2file`.
tranopt (Tran): Transformation option.
tranmethod (TranMethod): Transformation method.
exclparams (str): InclusiveNamespaces PrefixList parameter for exclusive c14n.
advopts (AdvOptions): Advanced option flags.
Returns:
str: UTF-8-encoded string.
"""
opts = int(tranopt) + int(tranmethod) + int(advopts)
d = xmldata.encode()
nc = _didll.C14N_String2String(None, 0, d, len(d), nameorid.encode(), exclparams.encode(), opts)
if (nc < 0): raise Error(-nc)
if (nc == 0): return ""
buf = create_string_buffer(nc + 1)
nc = _didll.C14N_String2String(buf, nc, d, len(d), nameorid.encode(), exclparams.encode(), opts)
return buf.value.decode('utf-8')
@staticmethod
def string2digest(xmldata, nameorid="", tranopt=Tran.ENTIRE, digalg=0, tranmethod=TranMethod.INCLUSIVE, exclparams="", advopts=AdvOptions.DEFAULT):
"""Compute digest value of C14N transformation of XML document (string-to-digest).
Args:
xmldata (str): XML data to be processed.
nameorid (str): To specify the tag name or Id. See remarks for :py:func:`C14n.file2file`.
tranopt (Tran): Transformation option.
digalg (DigAlg): Digest algorithm.
tranmethod (TranMethod): Transformation method.
exclparams (str): InclusiveNamespaces PrefixList parameter for exclusive c14n.
advopts (AdvOptions): Advanced option flags.
Returns:
str: Message digest in base64-encoded string.
"""
opts = int(tranopt) + int(tranmethod) + int(digalg) + int(advopts) # Unexpected type warning?
d = xmldata.encode()
nc = _didll.C14N_String2Digest(None, 0, d, len(d), nameorid.encode(), exclparams.encode(), opts)
if (nc < 0): raise Error(-nc)
if (nc == 0): return ""
buf = create_string_buffer(nc + 1)
nc = _didll.C14N_String2Digest(buf, nc, d, len(d), nameorid.encode(), exclparams.encode(), opts)
return buf.value.decode()
class _NotUsed:
"""Dummy for parsing."""
pass
# PROTOTYPES (derived from diSc14n.h)
# If wrong argument type is passed, these will raise an `ArgumentError` exception
# ArgumentError: argument 1: <type 'exceptions.TypeError'>: wrong type
_didll.SC14N_Gen_Version.argtypes = []
_didll.SC14N_Gen_CompileTime.argtypes = [c_char_p, c_int]
_didll.SC14N_Gen_ModuleName.argtypes = [c_char_p, c_int, c_int]
_didll.SC14N_Gen_LicenceType.argtypes = []
_didll.SC14N_Gen_Platform.argtypes = [c_char_p, c_int]
_didll.SC14N_Err_LastError.argtypes = [c_char_p, c_int]
_didll.SC14N_Err_ErrorLookup.argtypes = [c_char_p, c_int, c_int]
_didll.C14N_File2File.argtypes = [c_char_p, c_char_p, c_char_p, c_char_p, c_int]
_didll.C14N_File2String.argtypes = [c_char_p, c_int, c_char_p, c_char_p, c_char_p, c_int]
_didll.C14N_File2Digest.argtypes = [c_char_p, c_int, c_char_p, c_char_p, c_char_p, c_int]
_didll.C14N_String2String.argtypes = [c_char_p, c_int, c_char_p, c_int, c_char_p, c_char_p, c_int]
_didll.C14N_String2Digest.argtypes = [c_char_p, c_int, c_char_p, c_int, c_char_p, c_char_p, c_int]
|
/sc14npy-2.1.1.zip/sc14npy-2.1.1/sc14n.py
| 0.749546 | 0.195249 |
sc14n.py
|
pypi
|
from typing import Tuple, Set, FrozenSet, Sequence, Generator
from copy import deepcopy
import itertools
from .position import Point2, Size, Rect
from .pixel_map import PixelMap
from .player import Player
from typing import List, Dict, Set, Tuple, Any, Optional, Union # mypy type checking
class Ramp:
def __init__(self, points: Set[Point2], game_info: "GameInfo"):
self._points: Set[Point2] = points
self.__game_info = game_info
# tested by printing actual building locations vs calculated depot positions
self.x_offset = 0.5 # might be errors with the pixelmap?
self.y_offset = -0.5
@property
def _height_map(self):
return self.__game_info.terrain_height
@property
def _placement_grid(self):
return self.__game_info.placement_grid
@property
def size(self) -> int:
return len(self._points)
def height_at(self, p: Point2) -> int:
return self._height_map[p]
@property
def points(self) -> Set[Point2]:
return self._points.copy()
@property
def upper(self) -> Set[Point2]:
""" Returns the upper points of a ramp. """
max_height = max([self.height_at(p) for p in self._points])
return {
p
for p in self._points
if self.height_at(p) == max_height
}
@property
def upper2_for_ramp_wall(self) -> Set[Point2]:
""" Returns the 2 upper ramp points of the main base ramp required for the supply depot and barracks placement properties used in this file. """
upper2 = sorted(list(self.upper), key=lambda x: x.distance_to(self.bottom_center), reverse=True)
while len(upper2) > 2:
upper2.pop()
return set(upper2)
@property
def top_center(self) -> Point2:
pos = Point2((sum([p.x for p in self.upper]) / len(self.upper), \
sum([p.y for p in self.upper]) / len(self.upper)))
return pos
@property
def lower(self) -> Set[Point2]:
min_height = min([self.height_at(p) for p in self._points])
return {
p
for p in self._points
if self.height_at(p) == min_height
}
@property
def bottom_center(self) -> Point2:
pos = Point2((sum([p.x for p in self.lower]) / len(self.lower), \
sum([p.y for p in self.lower]) / len(self.lower)))
return pos
@property
def barracks_in_middle(self) -> Point2:
""" Barracks position in the middle of the 2 depots """
if len(self.upper2_for_ramp_wall) == 2:
points = self.upper2_for_ramp_wall
p1 = points.pop().offset((self.x_offset, self.y_offset))
p2 = points.pop().offset((self.x_offset, self.y_offset))
# Offset from top point to barracks center is (2, 1)
intersects = p1.circle_intersection(p2, (2**2 + 1**2)**0.5)
anyLowerPoint = next(iter(self.lower))
return max(intersects, key=lambda p: p.distance_to(anyLowerPoint))
raise Exception('Not implemented. Trying to access a ramp that has a wrong amount of upper points.')
@property
def depot_in_middle(self) -> Point2:
""" Depot in the middle of the 3 depots """
if len(self.upper2_for_ramp_wall) == 2:
points = self.upper2_for_ramp_wall
p1 = points.pop().offset((self.x_offset, self.y_offset)) # still an error with pixelmap?
p2 = points.pop().offset((self.x_offset, self.y_offset))
# Offset from top point to depot center is (1.5, 0.5)
intersects = p1.circle_intersection(p2, (1.5**2 + 0.5**2)**0.5)
anyLowerPoint = next(iter(self.lower))
return max(intersects, key=lambda p: p.distance_to(anyLowerPoint))
raise Exception('Not implemented. Trying to access a ramp that has a wrong amount of upper points.')
@property
def corner_depots(self) -> Set[Point2]:
""" Finds the 2 depot positions on the outside """
if len(self.upper2_for_ramp_wall) == 2:
points = self.upper2_for_ramp_wall
p1 = points.pop().offset((self.x_offset, self.y_offset)) # still an error with pixelmap?
p2 = points.pop().offset((self.x_offset, self.y_offset))
center = p1.towards(p2, p1.distance_to(p2) / 2)
depotPosition = self.depot_in_middle
# Offset from middle depot to corner depots is (2, 1)
intersects = center.circle_intersection(depotPosition, (2**2 + 1**2)**0.5)
return intersects
raise Exception('Not implemented. Trying to access a ramp that has a wrong amount of upper points.')
@property
def barracks_can_fit_addon(self) -> bool:
""" Test if a barracks can fit an addon at natural ramp """
# https://i.imgur.com/4b2cXHZ.png
if len(self.upper2_for_ramp_wall) == 2:
return self.barracks_in_middle.x + 1 > max(self.corner_depots, key=lambda depot: depot.x).x
raise Exception('Not implemented. Trying to access a ramp that has a wrong amount of upper points.')
@property
def barracks_correct_placement(self) -> Point2:
""" Corrected placement so that an addon can fit """
if len(self.upper2_for_ramp_wall) == 2:
if self.barracks_can_fit_addon:
return self.barracks_in_middle
else:
return self.barracks_in_middle.offset((-2, 0))
raise Exception('Not implemented. Trying to access a ramp that has a wrong amount of upper points.')
class GameInfo(object):
def __init__(self, proto):
# TODO: this might require an update during the game because placement grid and playable grid are greyed out on minerals, start locations and ramps (debris)
self._proto = proto
self.players: List[Player] = [Player.from_proto(p) for p in proto.player_info]
self.map_size: Size = Size.from_proto(proto.start_raw.map_size)
self.pathing_grid: PixelMap = PixelMap(proto.start_raw.pathing_grid)
self.terrain_height: PixelMap = PixelMap(proto.start_raw.terrain_height)
self.placement_grid: PixelMap = PixelMap(proto.start_raw.placement_grid)
self.playable_area = Rect.from_proto(proto.start_raw.playable_area)
self.map_ramps: List[Ramp] = self._find_ramps()
self.player_races: Dict[int, "Race"] = {p.player_id: p.race_actual or p.race_requested for p in proto.player_info}
self.start_locations: List[Point2] = [Point2.from_proto(sl) for sl in proto.start_raw.start_locations]
self.player_start_location: Point2 = None # Filled later by BotAI._prepare_first_step
@property
def map_center(self) -> Point2:
return self.playable_area.center
def _find_ramps(self) -> List[Ramp]:
"""Calculate (self.pathing_grid - self.placement_grid) (for sets) and then find ramps by comparing heights."""
rampDict = {
Point2((x, y)): self.pathing_grid[(x, y)] == 0 and self.placement_grid[(x, y)] == 0
for x in range(self.pathing_grid.width)
for y in range(self.pathing_grid.height)
}
rampPoints = {p for p in rampDict if rampDict[p]} # filter only points part of ramp
rampGroups = self._find_groups(rampPoints)
return [Ramp(group, self) for group in rampGroups]
def _find_groups(self, points: Set[Point2], minimum_points_per_group: int=8, max_distance_between_points: int=2) -> List[Set[Point2]]:
""" From a set/list of points, this function will try to group points together """
foundGroups = []
currentGroup = set()
newlyAdded = set()
pointsPool = set(points)
while pointsPool or currentGroup:
if not currentGroup:
randomPoint = pointsPool.pop()
currentGroup.add(randomPoint)
newlyAdded.add(randomPoint)
newlyAddedOld = newlyAdded
newlyAdded = set()
for p1 in newlyAddedOld:
# create copy as we change set size during iteration
for p2 in pointsPool.copy():
if abs(p1.x - p2.x) + abs(p1.y - p2.y) <= max_distance_between_points:
currentGroup.add(p2)
newlyAdded.add(p2)
pointsPool.discard(p2)
# Check if all connected points were found
if not newlyAdded:
# Add to group if number of points reached threshold - discard group if not enough points
if len(currentGroup) >= minimum_points_per_group:
foundGroups.append(currentGroup)
currentGroup = set()
""" Returns groups of points as list
[{p1, p2, p3}, {p4, p5, p6, p7, p8}]
"""
return foundGroups
|
/sc2_X-1.0.4-py3-none-any.whl/sc2_X/game_info.py
| 0.850469 | 0.373019 |
game_info.py
|
pypi
|
import enum
from s2clientprotocol import (
sc2_Xapi_pb2 as sc_pb,
raw_pb2 as raw_pb,
data_pb2 as data_pb,
common_pb2 as common_pb,
error_pb2 as error_pb
)
from typing import List, Dict, Set, Tuple, Any, Optional, Union # mypy type checking
from .ids.unit_typeid import UnitTypeId
from .ids.ability_id import AbilityId
""" For the list of enums, see here
https://github.com/Blizzard/s2client-api/blob/d9ba0a33d6ce9d233c2a4ee988360c188fbe9dbf/include/sc2_Xapi/sc2_X_gametypes.h
https://github.com/Blizzard/s2client-api/blob/d9ba0a33d6ce9d233c2a4ee988360c188fbe9dbf/include/sc2_Xapi/sc2_X_action.h
https://github.com/Blizzard/s2client-api/blob/d9ba0a33d6ce9d233c2a4ee988360c188fbe9dbf/include/sc2_Xapi/sc2_X_unit.h
https://github.com/Blizzard/s2client-api/blob/d9ba0a33d6ce9d233c2a4ee988360c188fbe9dbf/include/sc2_Xapi/sc2_X_data.h
"""
CreateGameError = enum.Enum("CreateGameError", sc_pb.ResponseCreateGame.Error.items())
PlayerType = enum.Enum("PlayerType", sc_pb.PlayerType.items())
Difficulty = enum.Enum("Difficulty", sc_pb.Difficulty.items())
Status = enum.Enum("Status", sc_pb.Status.items())
Result = enum.Enum("Result", sc_pb.Result.items())
Alert = enum.Enum("Alert", sc_pb.Alert.items())
ChatChannel = enum.Enum("ChatChannel", sc_pb.ActionChat.Channel.items())
Race = enum.Enum("Race", common_pb.Race.items())
DisplayType = enum.Enum("DisplayType", raw_pb.DisplayType.items())
Alliance = enum.Enum("Alliance", raw_pb.Alliance.items())
CloakState = enum.Enum("CloakState", raw_pb.CloakState.items())
Attribute = enum.Enum("Attribute", data_pb.Attribute.items())
TargetType = enum.Enum("TargetType", data_pb.Weapon.TargetType.items())
Target = enum.Enum("Target", data_pb.AbilityData.Target.items())
ActionResult = enum.Enum("ActionResult", error_pb.ActionResult.items())
race_worker: Dict[Race, UnitTypeId] = {
Race.Protoss: UnitTypeId.PROBE,
Race.Terran: UnitTypeId.SCV,
Race.Zerg: UnitTypeId.DRONE
}
race_townhalls: Dict[Race, Set[UnitTypeId]] = {
Race.Protoss: {UnitTypeId.NEXUS},
Race.Terran: {UnitTypeId.COMMANDCENTER, UnitTypeId.ORBITALCOMMAND, UnitTypeId.PLANETARYFORTRESS},
Race.Zerg: {UnitTypeId.HATCHERY, UnitTypeId.LAIR, UnitTypeId.HIVE}
}
warpgate_abilities: Dict[AbilityId, AbilityId] = {
AbilityId.GATEWAYTRAIN_ZEALOT: AbilityId.WARPGATETRAIN_ZEALOT,
AbilityId.GATEWAYTRAIN_STALKER: AbilityId.WARPGATETRAIN_STALKER,
AbilityId.GATEWAYTRAIN_HIGHTEMPLAR: AbilityId.WARPGATETRAIN_HIGHTEMPLAR,
AbilityId.GATEWAYTRAIN_DARKTEMPLAR: AbilityId.WARPGATETRAIN_DARKTEMPLAR,
AbilityId.GATEWAYTRAIN_SENTRY: AbilityId.WARPGATETRAIN_SENTRY,
AbilityId.TRAIN_ADEPT: AbilityId.TRAINWARP_ADEPT
}
race_gas: Dict[Race, UnitTypeId] = {
Race.Protoss: UnitTypeId.ASSIMILATOR,
Race.Terran: UnitTypeId.REFINERY,
Race.Zerg: UnitTypeId.EXTRACTOR
}
|
/sc2_X-1.0.4-py3-none-any.whl/sc2_X/data.py
| 0.6137 | 0.222172 |
data.py
|
pypi
|
import random
from .unit import Unit
from .ids.unit_typeid import UnitTypeId
from .position import Point2, Point3
from typing import List, Dict, Set, Tuple, Any, Optional, Union # mypy type checking
class Units(list):
"""A collection for units. Makes it easy to select units by selectors."""
@classmethod
def from_proto(cls, units, game_data):
return cls(
(Unit(u, game_data) for u in units),
game_data
)
def __init__(self, units, game_data):
super().__init__(units)
self.game_data = game_data
def __call__(self, *args, **kwargs):
return UnitSelection(self, *args, **kwargs)
def select(self, *args, **kwargs):
return UnitSelection(self, *args, **kwargs)
def __or__(self, other: "Units") -> "Units":
tags = {unit.tag for unit in self}
units = self + [unit for unit in other if unit.tag not in tags]
return Units(units, self.game_data)
def __and__(self, other: "Units") -> "Units":
tags = {unit.tag for unit in self}
units = [unit for unit in other if unit.tag in tags]
return Units(units, self.game_data)
def __sub__(self, other: "Units") -> "Units":
tags = {unit.tag for unit in other}
units = [unit for unit in self if unit.tag not in tags]
return Units(units, self.game_data)
@property
def amount(self) -> int:
return len(self)
@property
def empty(self) -> bool:
return self.amount == 0
@property
def exists(self) -> bool:
return not self.empty
def find_by_tag(self, tag) -> Optional[Unit]:
for unit in self:
if unit.tag == tag:
return unit
return None
def by_tag(self, tag):
unit = self.find_by_tag(tag)
if unit is None:
raise KeyError("Unit not found")
return unit
@property
def first(self) -> Unit:
assert self.exists
return self[0]
def take(self, n: int, require_all: bool=True) -> "Units":
assert (not require_all) or len(self) >= n
return self[:n]
@property
def random(self) -> Unit:
assert self.exists
return random.choice(self)
def random_or(self, other: any) -> Unit:
if self.exists:
return random.choice(self)
else:
return other
def random_group_of(self, n):
assert 0 <= n <= self.amount
if n == 0:
return self.subgroup([])
elif self.amount == n:
return self
else:
return self.subgroup(random.sample(self, n))
def in_attack_range_of(self, unit: Unit, bonus_distance: Union[int, float]=0) -> "Units":
""" Filters units that are in attack range of the unit in parameter """
return self.filter(lambda x: unit.target_in_range(x, bonus_distance=bonus_distance))
def closest_distance_to(self, position: Union[Unit, Point2, Point3]) -> Union[int, float]:
""" Returns the distance between the closest unit from this group to the target unit """
assert self.exists
if isinstance(position, Unit):
position = position.position
return position.distance_to_closest([u.position for u in self]) # Note: list comprehension creation is 0-5% faster than set comprehension
def furthest_distance_to(self, position: Union[Unit, Point2, Point3]) -> Union[int, float]:
""" Returns the distance between the furthest unit from this group to the target unit """
assert self.exists
if isinstance(position, Unit):
position = position.position
return position.distance_to_furthest([u.position for u in self])
def closest_to(self, position: Union[Unit, Point2, Point3]) -> Unit:
assert self.exists
if isinstance(position, Unit):
position = position.position
return position.closest(self)
def furthest_to(self, position: Union[Unit, Point2, Point3]) -> Unit:
assert self.exists
if isinstance(position, Unit):
position = position.position
return position.furthest(self)
def closer_than(self, distance: Union[int, float], position: Union[Unit, Point2, Point3]) -> "Units":
if isinstance(position, Unit):
position = position.position
return self.filter(lambda unit: unit.position.distance_to_point2(position.to2) < distance)
def further_than(self, distance: Union[int, float], position: Union[Unit, Point2, Point3]) -> "Units":
if isinstance(position, Unit):
position = position.position
return self.filter(lambda unit: unit.position.distance_to_point2(position.to2) > distance)
def subgroup(self, units):
return Units(list(units), self.game_data)
def filter(self, pred: callable) -> "Units":
return self.subgroup(filter(pred, self))
def sorted(self, keyfn: callable, reverse: bool=False) -> "Units":
return self.subgroup(sorted(self, key=keyfn, reverse=reverse))
def sorted_by_distance_to(self, position: Union[Unit, Point2], reverse: bool=False) -> "Units":
""" This function should be a bit faster than using units.sorted(keyfn=lambda u: u.distance_to(position)) """
position = position.position
return self.sorted(keyfn=lambda unit: unit.position._distance_squared(position), reverse=reverse)
def tags_in(self, other: Union[Set[int], List[int], Dict[int, Any]]) -> "Units":
""" Filters all units that have their tags in the 'other' set/list/dict """
# example: self.units(QUEEN).tags_in(self.queen_tags_assigned_to_do_injects)
if isinstance(other, list):
other = set(other)
return self.filter(lambda unit: unit.tag in other)
def tags_not_in(self, other: Union[Set[int], List[int], Dict[int, Any]]) -> "Units":
""" Filters all units that have their tags not in the 'other' set/list/dict """
# example: self.units(QUEEN).tags_not_in(self.queen_tags_assigned_to_do_injects)
if isinstance(other, list):
other = set(other)
return self.filter(lambda unit: unit.tag not in other)
def of_type(self, other: Union[UnitTypeId, Set[UnitTypeId], List[UnitTypeId], Dict[UnitTypeId, Any]]) -> "Units":
""" Filters all units that are of a specific type """
# example: self.units.of_type([ZERGLING, ROACH, HYDRALISK, BROODLORD])
if isinstance(other, UnitTypeId):
other = {other}
if isinstance(other, list):
other = set(other)
return self.filter(lambda unit: unit.type_id in other)
def exclude_type(self, other: Union[UnitTypeId, Set[UnitTypeId], List[UnitTypeId], Dict[UnitTypeId, Any]]) -> "Units":
""" Filters all units that are not of a specific type """
# example: self.known_enemy_units.exclude_type([OVERLORD])
if isinstance(other, UnitTypeId):
other = {other}
if isinstance(other, list):
other = set(other)
return self.filter(lambda unit: unit.type_id not in other)
def same_tech(self, other: Union[UnitTypeId, Set[UnitTypeId], List[UnitTypeId], Dict[UnitTypeId, Any]]) -> "Units":
""" Usage:
'self.units.same_tech(UnitTypeId.COMMANDCENTER)' or 'self.units.same_tech(UnitTypeId.ORBITALCOMMAND)'
returns all CommandCenter, CommandCenterFlying, OrbitalCommand, OrbitalCommandFlying, PlanetaryFortress
This also works with a set/list/dict parameter, e.g. 'self.units.same_tech({UnitTypeId.COMMANDCENTER, UnitTypeId.SUPPLYDEPOT})'
Untested: This should return the equivalents for Hatchery, WarpPrism, Observer, Overseer, SupplyDepot and others
"""
if isinstance(other, UnitTypeId):
other = {other}
tech_alias_types = set(other)
for unitType in other:
tech_alias = self.game_data.units[unitType.value].tech_alias
if tech_alias:
for same in tech_alias:
tech_alias_types.add(same)
return self.filter(lambda unit:
unit.type_id in tech_alias_types
or unit._type_data.tech_alias is not None
and any(same in tech_alias_types for same in unit._type_data.tech_alias))
def same_unit(self, other: Union[UnitTypeId, Set[UnitTypeId], List[UnitTypeId], Dict[UnitTypeId, Any]]) -> "Units":
""" Usage:
'self.units.same_tech(UnitTypeId.COMMANDCENTER)'
returns CommandCenter and CommandCenterFlying,
'self.units.same_tech(UnitTypeId.ORBITALCOMMAND)'
returns OrbitalCommand and OrbitalCommandFlying
This also works with a set/list/dict parameter, e.g. 'self.units.same_tech({UnitTypeId.COMMANDCENTER, UnitTypeId.SUPPLYDEPOT})'
Untested: This should return the equivalents for WarpPrism, Observer, Overseer, SupplyDepot and others
"""
if isinstance(other, UnitTypeId):
other = {other}
unit_alias_types = set(other)
for unitType in other:
unit_alias = self.game_data.units[unitType.value].unit_alias
if unit_alias:
unit_alias_types.add(unit_alias)
return self.filter(lambda unit:
unit.type_id in unit_alias_types
or unit._type_data.unit_alias is not None
and unit._type_data.unit_alias in unit_alias_types)
@property
def center(self) -> Point2:
""" Returns the central point of all units in this list """
assert self.exists
pos = Point2((sum([unit.position.x for unit in self]) / self.amount, \
sum([unit.position.y for unit in self]) / self.amount))
return pos
@property
def selected(self) -> "Units":
return self.filter(lambda unit: unit.is_selected)
@property
def tags(self) -> Set[int]:
return {unit.tag for unit in self}
@property
def ready(self) -> "Units":
return self.filter(lambda unit: unit.is_ready)
@property
def not_ready(self) -> "Units":
return self.filter(lambda unit: not unit.is_ready)
@property
def noqueue(self) -> "Units":
return self.filter(lambda unit: unit.noqueue)
@property
def idle(self) -> "Units":
return self.filter(lambda unit: unit.is_idle)
@property
def owned(self) -> "Units":
return self.filter(lambda unit: unit.is_mine)
@property
def enemy(self) -> "Units":
return self.filter(lambda unit: unit.is_enemy)
@property
def flying(self) -> "Units":
return self.filter(lambda unit: unit.is_flying)
@property
def not_flying(self) -> "Units":
return self.filter(lambda unit: not unit.is_flying)
@property
def structure(self) -> "Units":
return self.filter(lambda unit: unit.is_structure)
@property
def not_structure(self) -> "Units":
return self.filter(lambda unit: not unit.is_structure)
@property
def gathering(self) -> "Units":
return self.filter(lambda unit: unit.is_gathering)
@property
def returning(self) -> "Units":
return self.filter(lambda unit: unit.is_returning)
@property
def collecting(self) -> "Units":
return self.filter(lambda unit: unit.is_collecting)
@property
def mineral_field(self) -> "Units":
return self.filter(lambda unit: unit.is_mineral_field)
@property
def vespene_geyser(self) -> "Units":
return self.filter(lambda unit: unit.is_vespene_geyser)
@property
def prefer_idle(self) -> "Units":
return self.sorted(lambda unit: unit.is_idle, reverse=True)
def prefer_close_to(self, p: Union[Unit, Point2, Point3]) -> "Units":
return self.sorted(lambda unit: unit.distance_to(p))
class UnitSelection(Units):
def __init__(self, parent, unit_type_id=None):
assert unit_type_id is None or isinstance(unit_type_id, (UnitTypeId, set))
if isinstance(unit_type_id, set):
assert all(isinstance(t, UnitTypeId) for t in unit_type_id)
self.unit_type_id = unit_type_id
super().__init__([u for u in parent if self.matches(u)], parent.game_data)
def matches(self, unit):
if self.unit_type_id is None:
# empty selector matches everything
return True
elif isinstance(self.unit_type_id, set):
return unit.type_id in self.unit_type_id
else:
return self.unit_type_id == unit.type_id
|
/sc2_X-1.0.4-py3-none-any.whl/sc2_X/units.py
| 0.900218 | 0.565359 |
units.py
|
pypi
|
from .data import PlayerType, Race, Difficulty
from .bot_ai import BotAI
class AbstractPlayer(object):
def __init__(self, type, race=None, difficulty=None):
assert isinstance(type, PlayerType)
if type == PlayerType.Computer:
assert isinstance(difficulty, Difficulty)
elif type == PlayerType.Observer:
assert race is None
assert difficulty is None
else:
assert isinstance(race, Race)
assert difficulty is None
self.type = type
if race is not None:
self.race = race
if type == PlayerType.Computer:
self.difficulty = difficulty
class Human(AbstractPlayer):
def __init__(self, race):
super().__init__(PlayerType.Participant, race)
def __str__(self):
return f"Human({self.race})"
class Bot(AbstractPlayer):
def __init__(self, race, ai):
"""
AI can be None if this player object is just used to inform the
server about player types.
"""
assert isinstance(ai, BotAI) or ai is None
super().__init__(PlayerType.Participant, race)
self.ai = ai
def __str__(self):
return f"Bot({self.race}, {self.ai})"
class Computer(AbstractPlayer):
def __init__(self, race, difficulty=Difficulty.Easy):
super().__init__(PlayerType.Computer, race, difficulty)
def __str__(self):
return f"Computer({self.race}, {self.difficulty})"
class Observer(AbstractPlayer):
def __init__(self):
super().__init__(PlayerType.Observer)
def __str__(self):
return f"Observer()"
class Player(AbstractPlayer):
@classmethod
def from_proto(cls, proto):
if PlayerType(proto.type) == PlayerType.Observer:
return cls(proto.player_id, PlayerType(proto.type), None, None, None)
return cls(
proto.player_id,
PlayerType(proto.type),
Race(proto.race_requested),
Difficulty(proto.difficulty) if proto.HasField("difficulty") else None,
Race(proto.race_actual) if proto.HasField("race_actual") else None
)
def __init__(self, player_id, type, requested_race, difficulty=None, actual_race=None):
super().__init__(type, requested_race, difficulty)
self.id: int = player_id
self.actual_race: Race = actual_race
|
/sc2_X-1.0.4-py3-none-any.whl/sc2_X/player.py
| 0.756358 | 0.359392 |
player.py
|
pypi
|
from functools import lru_cache, reduce
from typing import List, Dict, Set, Tuple, Any, Optional, Union # mypy type checking
from .data import Attribute, Race
from .unit_command import UnitCommand
from .ids.unit_typeid import UnitTypeId
from .ids.ability_id import AbilityId
from .constants import ZERGLING
FREE_MORPH_ABILITY_CATEGORIES = [
"Lower", "Raise", # SUPPLYDEPOT
"Land", "Lift", # Flying buildings
]
def split_camel_case(text) -> list:
"""Splits words from CamelCase text."""
return list(reduce(
lambda a, b: (a + [b] if b.isupper() else a[:-1] + [a[-1] + b]),
text,
[]
))
class GameData(object):
def __init__(self, data):
self.abilities = {a.ability_id: AbilityData(self, a) for a in data.abilities if AbilityData.id_exists(a.ability_id)}
self.units = {u.unit_id: UnitTypeData(self, u) for u in data.units if u.available}
self.upgrades = {u.upgrade_id: UpgradeData(self, u) for u in data.upgrades}
@lru_cache(maxsize=256)
def calculate_ability_cost(self, ability) -> "Cost":
if isinstance(ability, AbilityId):
ability = self.abilities[ability.value]
elif isinstance(ability, UnitCommand):
ability = self.abilities[ability.ability.value]
assert isinstance(ability, AbilityData), f"C: {ability}"
for unit in self.units.values():
if unit.creation_ability is None:
continue
if not AbilityData.id_exists(unit.creation_ability.id.value):
continue
if unit.creation_ability.is_free_morph:
continue
if unit.creation_ability == ability:
if unit.id == ZERGLING:
# HARD CODED: zerglings are generated in pairs
return Cost(
unit.cost.minerals * 2,
unit.cost.vespene * 2,
unit.cost.time
)
# Correction for morphing units, e.g. orbital would return 550/0 instead of actual 150/0
morph_cost = unit.morph_cost
if morph_cost: # can be None
return morph_cost
# Correction for zerg structures without morph: Extractor would return 75 instead of actual 25
return unit.cost_zerg_corrected
for upgrade in self.upgrades.values():
if upgrade.research_ability == ability:
return upgrade.cost
return Cost(0, 0)
class AbilityData(object):
@staticmethod
def id_exists(ability_id: int) -> bool:
assert isinstance(ability_id, int), f"Wrong type: {ability_id} is not int"
return ability_id != 0 and ability_id in (a.value for a in AbilityId)
def __init__(self, game_data, proto):
self._game_data = game_data
self._proto = proto
assert self.id != 0
def __repr__(self) -> str:
return f"AbilityData(name={self._proto.button_name})"
@property
def id(self) -> AbilityId:
if self._proto.remaps_to_ability_id:
return AbilityId(self._proto.remaps_to_ability_id)
return AbilityId(self._proto.ability_id)
@property
def link_name(self) -> str:
""" For Stimpack this returns 'BarracksTechLabResearch' """
# TODO: this may be wrong as it returns the same as the property below, ".button_name"
return self._proto.button_name
@property
def button_name(self) -> str:
""" For Stimpack this returns 'Stimpack' """
return self._proto.button_name
@property
def friendly_name(self) -> str:
""" For Stimpack this returns 'Research Stimpack' """
return self._proto.friendly_name
@property
def is_free_morph(self) -> bool:
parts = split_camel_case(self._proto.link_name)
for p in parts:
if p in FREE_MORPH_ABILITY_CATEGORIES:
return True
return False
@property
def cost(self) -> "Cost":
return self._game_data.calculate_ability_cost(self.id)
class UnitTypeData(object):
def __init__(self, game_data, proto):
self._game_data = game_data
self._proto = proto
def __repr__(self) -> str:
return "UnitTypeData(name={})".format(self.name)
@property
def id(self) -> UnitTypeId:
return UnitTypeId(self._proto.unit_id)
@property
def name(self) -> str:
return self._proto.name
@property
def creation_ability(self) -> AbilityData:
if self._proto.ability_id == 0:
return None
if self._proto.ability_id not in self._game_data.abilities:
return None
return self._game_data.abilities[self._proto.ability_id]
@property
def attributes(self) -> List[Attribute]:
return self._proto.attributes
def has_attribute(self, attr) -> bool:
assert isinstance(attr, Attribute)
return attr in self.attributes
@property
def has_minerals(self) -> bool:
return self._proto.has_minerals
@property
def has_vespene(self) -> bool:
return self._proto.has_vespene
@property
def cargo_size(self) -> int:
""" How much cargo this unit uses up in cargo_space """
return self._proto.cargo_size
@property
def tech_requirement(self) -> Optional[UnitTypeId]:
""" Tech-building requirement of buildings - may work for units but unreliably """
if self._proto.tech_requirement == 0:
return None
if self._proto.tech_requirement not in self._game_data.units:
return None
return UnitTypeId(self._proto.tech_requirement)
@property
def tech_alias(self) -> Optional[List[UnitTypeId]]:
""" Building tech equality, e.g. OrbitalCommand is the same as CommandCenter """
""" Building tech equality, e.g. Hive is the same as Lair and Hatchery """
return_list = []
for tech_alias in self._proto.tech_alias:
if tech_alias in self._game_data.units:
return_list.append(UnitTypeId(tech_alias))
""" For Hive, this returns [UnitTypeId.Hatchery, UnitTypeId.Lair] """
""" For SCV, this returns None """
if return_list:
return return_list
return None
@property
def unit_alias(self) -> Optional[UnitTypeId]:
""" Building type equality, e.g. FlyingOrbitalCommand is the same as OrbitalCommand """
if self._proto.unit_alias == 0:
return None
if self._proto.unit_alias not in self._game_data.units:
return None
""" For flying OrbitalCommand, this returns UnitTypeId.OrbitalCommand """
return UnitTypeId(self._proto.unit_alias)
@property
def race(self) -> Race:
return Race(self._proto.race)
@property
def cost(self) -> "Cost":
return Cost(
self._proto.mineral_cost,
self._proto.vespene_cost,
self._proto.build_time
)
@property
def cost_zerg_corrected(self) -> "Cost":
""" This returns 25 for extractor and 200 for spawning pool instead of 75 and 250 respectively """
if self.race == Race.Zerg and Attribute.Structure.value in self.attributes:
# a = self._game_data.units(UnitTypeId.ZERGLING)
# print(a)
# print(vars(a))
return Cost(
self._proto.mineral_cost - 50,
self._proto.vespene_cost,
self._proto.build_time
)
else:
return self.cost
@property
def morph_cost(self) -> Optional["Cost"]:
""" This returns 150 minerals for OrbitalCommand instead of 550 """
# Fix for BARRACKSREACTOR which has tech alias [REACTOR] which has (0, 0) cost
if self.tech_alias is None or self.tech_alias[0] in {UnitTypeId.TECHLAB, UnitTypeId.REACTOR}:
return None
# Morphing a HIVE would have HATCHERY and LAIR in the tech alias - now subtract HIVE cost from LAIR cost instead of from HATCHERY cost
tech_alias_cost_minerals = max([self._game_data.units[tech_alias.value].cost.minerals for tech_alias in self.tech_alias])
tech_alias_cost_vespene = max([self._game_data.units[tech_alias.value].cost.vespene for tech_alias in self.tech_alias])
return Cost(
self._proto.mineral_cost - tech_alias_cost_minerals,
self._proto.vespene_cost - tech_alias_cost_vespene,
self._proto.build_time
)
class UpgradeData(object):
def __init__(self, game_data, proto):
self._game_data = game_data
self._proto = proto
def __repr__(self):
return "UpgradeData({} - research ability: {}, {})".format(self.name, self.research_ability, self.cost)
@property
def name(self) -> str:
return self._proto.name
@property
def research_ability(self) -> Optional[AbilityData]:
if self._proto.ability_id == 0:
return None
if self._proto.ability_id not in self._game_data.abilities:
return None
return self._game_data.abilities[self._proto.ability_id]
@property
def cost(self) -> "Cost":
return Cost(
self._proto.mineral_cost,
self._proto.vespene_cost,
self._proto.research_time
)
class Cost(object):
def __init__(self, minerals, vespene, time=None):
self.minerals = minerals
self.vespene = vespene
self.time = time
def __repr__(self) -> str:
return f"Cost({self.minerals}, {self.vespene})"
def __eq__(self, other) -> bool:
return self.minerals == other.minerals and self.vespene == other.vespene
def __ne__(self, other) -> bool:
return self.minerals != other.minerals or self.vespene != other.vespene
|
/sc2_X-1.0.4-py3-none-any.whl/sc2_X/game_data.py
| 0.851119 | 0.399402 |
game_data.py
|
pypi
|
from s2clientprotocol import (
sc2_Xapi_pb2 as sc_pb,
common_pb2 as common_pb,
query_pb2 as query_pb,
debug_pb2 as debug_pb,
raw_pb2 as raw_pb,
)
import logging
from sc2_X.ids.ability_id import AbilityId
from sc2_X.ids.unit_typeid import UnitTypeId
logger = logging.getLogger(__name__)
from .cache import method_cache_forever
from .protocol import Protocol, ProtocolError
from .game_info import GameInfo
from .game_data import GameData, AbilityData
from .data import Status, Result
from .data import Race, ActionResult, ChatChannel
from .action import combine_actions
from .position import Point2, Point3
from .unit import Unit
from .units import Units
from typing import List, Dict, Set, Tuple, Any, Optional, Union # mypy type checking
class Client(Protocol):
def __init__(self, ws):
super().__init__(ws)
self.game_step = 8
self._player_id = None
self._game_result = None
self._debug_texts = list()
self._debug_lines = list()
self._debug_boxes = list()
self._debug_spheres = list()
@property
def in_game(self):
return self._status == Status.in_game
async def join_game(self, race=None, observed_player_id=None, portconfig=None):
ifopts = sc_pb.InterfaceOptions(raw=True, score=True)
if race is None:
assert isinstance(observed_player_id, int)
# join as observer
req = sc_pb.RequestJoinGame(
observed_player_id=observed_player_id,
options=ifopts
)
else:
assert isinstance(race, Race)
req = sc_pb.RequestJoinGame(
race=race.value,
options=ifopts
)
if portconfig:
req.shared_port = portconfig.shared
req.server_ports.game_port = portconfig.server[0]
req.server_ports.base_port = portconfig.server[1]
for ppc in portconfig.players:
p = req.client_ports.add()
p.game_port = ppc[0]
p.base_port = ppc[1]
result = await self._execute(join_game=req)
self._game_result = None
self._player_id = result.join_game.player_id
return result.join_game.player_id
async def leave(self):
""" You can use 'await self._client.leave()' to surrender midst game. """
is_resign = self._game_result is None
if is_resign:
# For all clients that can leave, result of leaving the game either
# loss, or the client will ignore the result
self._game_result = {self._player_id: Result.Defeat}
try:
await self._execute(leave_game=sc_pb.RequestLeaveGame())
except ProtocolError:
if is_resign:
raise
async def save_replay(self, path):
logger.debug(f"Requesting replay from server")
result = await self._execute(save_replay=sc_pb.RequestSaveReplay())
with open(path, "wb") as f:
f.write(result.save_replay.data)
logger.info(f"Saved replay to {path}")
async def observation(self):
result = await self._execute(observation=sc_pb.RequestObservation())
if (not self.in_game) or len(result.observation.player_result) > 0:
# Sometimes game ends one step before results are available
if len(result.observation.player_result) == 0:
result = await self._execute(observation=sc_pb.RequestObservation())
assert len(result.observation.player_result) > 0
player_id_to_result = {}
for pr in result.observation.player_result:
player_id_to_result[pr.player_id] = Result(pr.result)
self._game_result = player_id_to_result
return result
async def step(self):
""" EXPERIMENTAL: Change self._client.game_step during the step function to increase or decrease steps per second """
result = await self._execute(step=sc_pb.RequestStep(count=self.game_step))
return result
async def get_game_data(self) -> GameData:
result = await self._execute(data=sc_pb.RequestData(
ability_id=True,
unit_type_id=True,
upgrade_id=True
))
return GameData(result.data)
async def get_game_info(self) -> GameInfo:
result = await self._execute(game_info=sc_pb.RequestGameInfo())
return GameInfo(result.game_info)
async def actions(self, actions, game_data, return_successes=False):
if not isinstance(actions, list):
res = await self.actions([actions], game_data, return_successes)
if res:
return res[0]
else:
return None
else:
actions = combine_actions(actions, game_data)
res = await self._execute(action=sc_pb.RequestAction(
actions=[sc_pb.Action(action_raw=a) for a in actions]
))
res = [ActionResult(r) for r in res.action.result]
if return_successes:
return res
else:
return [r for r in res if r != ActionResult.Success]
async def query_pathing(self, start: Union[Unit, Point2, Point3], end: Union[Point2, Point3]) -> Optional[Union[int, float]]:
""" Caution: returns 0 when path not found """
assert isinstance(start, (Point2, Unit))
assert isinstance(end, Point2)
if isinstance(start, Point2):
result = await self._execute(query=query_pb.RequestQuery(
pathing=[query_pb.RequestQueryPathing(
start_pos=common_pb.Point2D(x=start.x, y=start.y),
end_pos=common_pb.Point2D(x=end.x, y=end.y)
)]
))
else:
result = await self._execute(query=query_pb.RequestQuery(
pathing=[query_pb.RequestQueryPathing(
unit_tag=start.tag,
end_pos=common_pb.Point2D(x=end.x, y=end.y)
)]
))
distance = float(result.query.pathing[0].distance)
if distance <= 0.0:
return None
return distance
async def query_pathings(self, zipped_list: List[List[Union[Unit, Point2, Point3]]]) -> List[Union[float, int]]:
""" Usage: await self.query_pathings([[unit1, target2], [unit2, target2]])
-> returns [distance1, distance2]
Caution: returns 0 when path not found
Might merge this function with the function above
"""
assert isinstance(zipped_list, list)
assert len(zipped_list) > 0
assert isinstance(zipped_list[0], list)
assert len(zipped_list[0]) == 2
assert isinstance(zipped_list[0][0], (Point2, Unit))
assert isinstance(zipped_list[0][1], Point2)
if isinstance(zipped_list[0][0], Point2):
results = await self._execute(query=query_pb.RequestQuery(
pathing=[query_pb.RequestQueryPathing(
start_pos=common_pb.Point2D(x=p1.x, y=p1.y),
end_pos=common_pb.Point2D(x=p2.x, y=p2.y)
) for p1, p2 in zipped_list]
))
else:
results = await self._execute(query=query_pb.RequestQuery(
pathing=[query_pb.RequestQueryPathing(
unit_tag=p1.tag,
end_pos=common_pb.Point2D(x=p2.x, y=p2.y)
) for p1, p2 in zipped_list]
))
results = [float(d.distance) for d in results.query.pathing]
return results
async def query_building_placement(self, ability: AbilityId, positions: List[Union[Unit, Point2, Point3]], ignore_resources: bool=True) -> List[ActionResult]:
assert isinstance(ability, AbilityData)
result = await self._execute(query=query_pb.RequestQuery(
placements=[query_pb.RequestQueryBuildingPlacement(
ability_id=ability.id.value,
target_pos=common_pb.Point2D(x=position.x, y=position.y)
) for position in positions],
ignore_resource_requirements=ignore_resources
))
return [ActionResult(p.result) for p in result.query.placements]
async def query_available_abilities(self, units: Union[List[Unit], "Units"], ignore_resource_requirements: bool=False) -> List[List[AbilityId]]:
""" Query abilities of multiple units """
if not isinstance(units, list):
""" Deprecated, accepting a single unit may be removed in the future, query a list of units instead """
assert isinstance(units, Unit)
units = [units]
input_was_a_list = False
else:
input_was_a_list = True
assert len(units) > 0
result = await self._execute(query=query_pb.RequestQuery(
abilities=[query_pb.RequestQueryAvailableAbilities(
unit_tag=unit.tag) for unit in units],
ignore_resource_requirements=ignore_resource_requirements)
)
""" Fix for bots that only query a single unit """
if not input_was_a_list:
return [[AbilityId(a.ability_id) for a in b.abilities] for b in result.query.abilities][0]
return [[AbilityId(a.ability_id) for a in b.abilities] for b in result.query.abilities]
async def chat_send(self, message: str, team_only: bool):
""" Writes a message to the chat """
ch = ChatChannel.Team if team_only else ChatChannel.Broadcast
r = await self._execute(action=sc_pb.RequestAction(
actions=[sc_pb.Action(action_chat=sc_pb.ActionChat(
channel=ch.value,
message=message
))]
))
async def debug_create_unit(self, unit_spawn_commands: List[List[Union[UnitTypeId, int, Point2, Point3]]]):
""" Usage example (will spawn 1 marine in the center of the map for player ID 1):
await self._client.debug_create_unit([[UnitTypeId.MARINE, 1, self._game_info.map_center, 1]]) """
assert isinstance(unit_spawn_commands, list)
assert len(unit_spawn_commands) > 0
assert isinstance(unit_spawn_commands[0], list)
assert len(unit_spawn_commands[0]) == 4
assert isinstance(unit_spawn_commands[0][0], UnitTypeId)
assert 0 < unit_spawn_commands[0][1] # careful, in realtime=True this function may create more units
assert isinstance(unit_spawn_commands[0][2], (Point2, Point3))
assert 1 <= unit_spawn_commands[0][3] <= 2
await self._execute(debug=sc_pb.RequestDebug(
debug=[debug_pb.DebugCommand(create_unit=debug_pb.DebugCreateUnit(
unit_type=unit_type.value,
owner=owner_id,
pos=common_pb.Point2D(x=position.x, y=position.y),
quantity=amount_of_units
)) for unit_type, amount_of_units, position, owner_id in unit_spawn_commands]
))
async def debug_kill_unit(self, unit_tags: Union[Units, List[int], Set[int]]):
if isinstance(unit_tags, Units):
unit_tags = unit_tags.tags
assert len(unit_tags) > 0
await self._execute(debug=sc_pb.RequestDebug(
debug=[debug_pb.DebugCommand(kill_unit=debug_pb.DebugKillUnit(
tag=unit_tags
))]
))
async def move_camera(self, position: Union[Unit, Point2, Point3]):
""" Moves camera to the target position """
assert isinstance(position, (Unit, Point2, Point3))
if isinstance(position, Unit):
position = position.position
await self._execute(action=sc_pb.RequestAction(
action=[sc_pb.Action(
action_raw=raw_pb.ActionRaw(
camera_move=raw_pb.ActionRawCameraMove(
center_world_space=common_pb.Point(x=position.x, y=position.y)
)
)
)]
))
async def debug_text(self, texts: Union[str, list], positions: Union[list, set], color=(0, 255, 0), size_px=16):
""" Deprecated, may be removed soon """
if isinstance(positions, (set, list)):
if not positions:
return
if isinstance(texts, str):
texts = [texts] * len(positions)
assert len(texts) == len(positions)
await self._execute(debug=sc_pb.RequestDebug(
debug=[debug_pb.DebugCommand(draw=debug_pb.DebugDraw(
text=[debug_pb.DebugText(
text=t,
color=debug_pb.Color(r=color[0], g=color[1], b=color[2]),
world_pos=common_pb.Point(x=p.x, y=p.y, z=getattr(p, "z", 10)),
size=size_px
) for t, p in zip(texts, positions)]
))]
))
else:
await self.debug_text([texts], [positions], color)
def debug_text_simple(self, text: str):
""" Draws a text in the top left corner of the screen (up to a max of 6 messages it seems). Don't forget to add 'await self._client.send_debug'. """
self._debug_texts.append(self.to_debug_message(text))
def debug_text_screen(self, text: str, pos: Union[Point2, Point3, tuple, list], color=None, size: int=8):
""" Draws a text on the screen with coordinates 0 <= x, y <= 1. Don't forget to add 'await self._client.send_debug'. """
assert len(pos) >= 2
assert 0 <= pos[0] <= 1
assert 0 <= pos[1] <= 1
pos = Point2((pos[0], pos[1]))
self._debug_texts.append(self.to_debug_message(text, color, pos, size))
def debug_text_2d(self, text: str, pos: Union[Point2, Point3, tuple, list], color=None, size: int=8):
return self.debug_text_screen(text, pos, color, size)
def debug_text_world(self, text: str, pos: Union[Unit, Point2, Point3], color=None, size: int=8):
""" Draws a text at Point3 position. Don't forget to add 'await self._client.send_debug'.
To grab a unit's 3d position, use unit.position3d
Usually the Z value of a Point3 is between 8 and 14 (except for flying units)
"""
if isinstance(pos, Point2) and not isinstance(pos, Point3): # a Point3 is also a Point2
pos = Point3((pos.x, pos.y, 0))
self._debug_texts.append(self.to_debug_message(text, color, pos, size))
def debug_text_3d(self, text: str, pos: Union[Unit, Point2, Point3], color=None, size: int=8):
return self.debug_text_world(text, pos, color, size)
def debug_line_out(self, p0: Union[Unit, Point2, Point3], p1: Union[Unit, Point2, Point3], color=None):
""" Draws a line from p0 to p1. Don't forget to add 'await self._client.send_debug'. """
self._debug_lines.append(debug_pb.DebugLine(
line=debug_pb.Line(p0=self.to_debug_point(p0), p1=self.to_debug_point(p1)),
color=self.to_debug_color(color)))
def debug_box_out(self, p_min: Union[Unit, Point2, Point3], p_max: Union[Unit, Point2, Point3], color=None):
""" Draws a box with p_min and p_max as corners. Don't forget to add 'await self._client.send_debug'. """
self._debug_boxes.append(debug_pb.DebugBox(
min=self.to_debug_point(p_min),
max=self.to_debug_point(p_max),
color=self.to_debug_color(color)
))
def debug_sphere_out(self, p: Union[Unit, Point2, Point3], r: Union[int, float], color=None):
""" Draws a sphere at point p with radius r. Don't forget to add 'await self._client.send_debug'. """
self._debug_spheres.append(debug_pb.DebugSphere(
p=self.to_debug_point(p),
r=r,
color=self.to_debug_color(color)
))
async def send_debug(self):
""" Sends the debug draw execution. Put this after your debug creation functions. """
await self._execute(debug=sc_pb.RequestDebug(
debug=[debug_pb.DebugCommand(draw=debug_pb.DebugDraw(
text=self._debug_texts if len(self._debug_texts) > 0 else None,
lines=self._debug_lines if len(self._debug_lines) > 0 else None,
boxes=self._debug_boxes if len(self._debug_boxes) > 0 else None,
spheres=self._debug_spheres if len(self._debug_spheres) > 0 else None
))]))
self._debug_texts.clear()
self._debug_lines.clear()
self._debug_boxes.clear()
self._debug_spheres.clear()
def to_debug_color(self, color):
""" Helper function for color conversion """
if color is None:
return debug_pb.Color(r=255, g=255, b=255)
else:
r = getattr(color, "r", getattr(color, "x", 255))
g = getattr(color, "g", getattr(color, "y", 255))
b = getattr(color, "b", getattr(color, "z", 255))
if max(r, g, b) <= 1:
r *= 255
g *= 255
b *= 255
return debug_pb.Color(r=int(r), g=int(g), b=int(b))
def to_debug_point(self, point: Union[Unit, Point2, Point3]) -> common_pb.Point:
""" Helper function for point conversion """
if isinstance(point, Unit):
point = point.position3d
return common_pb.Point(x=point.x, y=point.y, z=getattr(point, "z", 0))
def to_debug_message(self, text: str, color=None, pos: Optional[Union[Point2, Point3]]=None, size: int=8) -> debug_pb.DebugText:
""" Helper function to create debug texts """
color = self.to_debug_color(color)
pt3d = self.to_debug_point(pos) if isinstance(pos, Point3) else None
virtual_pos = self.to_debug_point(pos) if not isinstance(pos, Point3) else None
return debug_pb.DebugText(
color=color,
text=text,
virtual_pos=virtual_pos,
world_pos=pt3d,
size=size
)
|
/sc2_X-1.0.4-py3-none-any.whl/sc2_X/client.py
| 0.73173 | 0.248249 |
client.py
|
pypi
|
from s2clientprotocol import sc2_Xapi_pb2 as sc_pb, raw_pb2 as raw_pb
from sc2_X.ids.buff_id import BuffId
from .position import Point2, Point3
from .data import Alliance, Attribute, DisplayType, warpgate_abilities, TargetType, Race, CloakState
from .game_data import GameData
from .ids.unit_typeid import UnitTypeId
from .ids.ability_id import AbilityId
from . import unit_command
from typing import List, Dict, Set, Tuple, Any, Optional, Union # mypy type checking
class Unit(object):
def __init__(self, proto_data, game_data):
assert isinstance(proto_data, raw_pb.Unit)
assert isinstance(game_data, GameData)
self._proto = proto_data
self._game_data = game_data
@property
def type_id(self) -> UnitTypeId:
return UnitTypeId(self._proto.unit_type)
@property
def _type_data(self) -> "UnitTypeData":
return self._game_data.units[self._proto.unit_type]
@property
def is_snapshot(self) -> bool:
return self._proto.display_type == DisplayType.Snapshot.value
@property
def is_visible(self) -> bool:
return self._proto.display_type == DisplayType.Visible.value
@property
def alliance(self) -> Alliance:
return self._proto.alliance
@property
def is_mine(self) -> bool:
return self._proto.alliance == Alliance.Self.value
@property
def is_enemy(self) -> bool:
return self._proto.alliance == Alliance.Enemy.value
@property
def tag(self) -> int:
return self._proto.tag
@property
def owner_id(self) -> int:
return self._proto.owner
@property
def position(self) -> Point2:
"""2d position of the unit."""
return self.position3d.to2
@property
def position3d(self) -> Point3:
"""3d position of the unit."""
return Point3.from_proto(self._proto.pos)
def distance_to(self, p: Union["Unit", Point2, Point3]) -> Union[int, float]:
""" Using the 2d distance between self and p. To calculate the 3d distance, use unit.position3d.distance_to(p) """
return self.position.distance_to_point2(p.position)
@property
def facing(self) -> Union[int, float]:
return self._proto.facing
@property
def radius(self) -> Union[int, float]:
return self._proto.radius
@property
def detect_range(self) -> Union[int, float]:
return self._proto.detect_range
@property
def radar_range(self) -> Union[int, float]:
return self._proto.radar_range
@property
def build_progress(self) -> Union[int, float]:
return self._proto.build_progress
@property
def is_ready(self) -> bool:
return self.build_progress == 1.0
@property
def cloak(self) -> CloakState:
return self._proto.cloak
@property
def is_blip(self) -> bool:
""" Detected by sensor tower. """
return self._proto.is_blip
@property
def is_powered(self) -> bool:
""" Is powered by a pylon nearby. """
return self._proto.is_powered
@property
def is_burrowed(self) -> bool:
return self._proto.is_burrowed
@property
def is_flying(self) -> bool:
return self._proto.is_flying
@property
def is_structure(self) -> bool:
return Attribute.Structure.value in self._type_data.attributes
@property
def is_light(self) -> bool:
return Attribute.Light.value in self._type_data.attributes
@property
def is_armored(self) -> bool:
return Attribute.Armored.value in self._type_data.attributes
@property
def is_biological(self) -> bool:
return Attribute.Biological.value in self._type_data.attributes
@property
def is_mechanical(self) -> bool:
return Attribute.Mechanical.value in self._type_data.attributes
@property
def is_robotic(self) -> bool:
return Attribute.Robotic.value in self._type_data.attributes
@property
def is_massive(self) -> bool:
return Attribute.Massive.value in self._type_data.attributes
@property
def is_psionic(self) -> bool:
return Attribute.Psionic.value in self._type_data.attributes
@property
def is_mineral_field(self) -> bool:
return self._type_data.has_minerals
@property
def is_vespene_geyser(self) -> bool:
return self._type_data.has_vespene
@property
def tech_alias(self) -> Optional[List[UnitTypeId]]:
""" Building tech equality, e.g. OrbitalCommand is the same as CommandCenter """
""" For Hive, this returns [UnitTypeId.Hatchery, UnitTypeId.Lair] """
""" For SCV, this returns None """
return self._type_data.tech_alias
@property
def unit_alias(self) -> Optional[UnitTypeId]:
""" Building type equality, e.g. FlyingOrbitalCommand is the same as OrbitalCommand """
""" For flying OrbitalCommand, this returns UnitTypeId.OrbitalCommand """
""" For SCV, this returns None """
return self._type_data.unit_alias
@property
def race(self) -> Race:
return Race(self._type_data._proto.race)
@property
def health(self) -> Union[int, float]:
return self._proto.health
@property
def health_max(self) -> Union[int, float]:
return self._proto.health_max
@property
def health_percentage(self) -> Union[int, float]:
if self._proto.health_max == 0:
return 0
return self._proto.health / self._proto.health_max
@property
def shield(self) -> Union[int, float]:
return self._proto.shield
@property
def shield_max(self) -> Union[int, float]:
return self._proto.shield_max
@property
def shield_percentage(self) -> Union[int, float]:
if self._proto.shield_max == 0:
return 0
return self._proto.shield / self._proto.shield_max
@property
def energy(self) -> Union[int, float]:
return self._proto.energy
@property
def energy_max(self) -> Union[int, float]:
return self._proto.energy_max
@property
def energy_percentage(self) -> Union[int, float]:
if self._proto.energy_max == 0:
return 0
return self._proto.energy / self._proto.energy_max
@property
def mineral_contents(self) -> int:
""" How many minerals a mineral field has left to mine from """
return self._proto.mineral_contents
@property
def vespene_contents(self) -> int:
""" How much gas is remaining in a geyser """
return self._proto.vespene_contents
@property
def has_vespene(self) -> bool:
""" Checks if a geyser has any gas remaining (can't build extractors on empty geysers), useful for lategame """
return self._proto.vespene_contents > 0
@property
def weapon_cooldown(self) -> Union[int, float]:
""" Returns some time (more than game loops) until the unit can fire again, returns -1 for units that can't attack
Usage:
if unit.weapon_cooldown == 0:
await self.do(unit.attack(target))
elif unit.weapon_cooldown < 0:
await self.do(unit.move(closest_allied_unit_because_cant_attack))
else:
await self.do(unit.move(retreatPosition))
"""
if self.can_attack_ground or self.can_attack_air:
return self._proto.weapon_cooldown
return -1
@property
def cargo_size(self) -> Union[float, int]:
""" How much cargo this unit uses up in cargo_space """
return self._type_data.cargo_size
@property
def has_cargo(self) -> bool:
""" If this unit has units loaded """
return self._proto.cargo_space_taken > 0
@property
def cargo_used(self) -> Union[float, int]:
""" How much cargo space is used (some units take up more than 1 space) """
return self._proto.cargo_space_taken
@property
def cargo_max(self) -> Union[float, int]:
""" How much cargo space is totally available - CC: 5, Bunker: 4, Medivac: 8 and Bunker can only load infantry, CC only SCVs """
return self._proto.cargo_space_max
@property
def passengers(self) -> Set["PassengerUnit"]:
""" Units inside a Bunker, CommandCenter, Nydus, Medivac, WarpPrism, Overlord """
return {PassengerUnit(unit, self._game_data) for unit in self._proto.passengers}
@property
def passengers_tags(self) -> Set[int]:
return {unit.tag for unit in self._proto.passengers}
@property
def can_attack_ground(self) -> bool:
if hasattr(self._type_data._proto, "weapons"):
weapons = self._type_data._proto.weapons
weapon = next((weapon for weapon in weapons if weapon.type in {TargetType.Ground.value, TargetType.Any.value}), None)
return weapon is not None
return False
@property
def ground_dps(self) -> Union[int, float]:
""" Does not include upgrades """
if hasattr(self._type_data._proto, "weapons"):
weapons = self._type_data._proto.weapons
weapon = next((weapon for weapon in weapons if weapon.type in {TargetType.Ground.value, TargetType.Any.value}), None)
if weapon:
return (weapon.damage * weapon.attacks) / weapon.speed
return 0
@property
def ground_range(self) -> Union[int, float]:
""" Does not include upgrades """
if hasattr(self._type_data._proto, "weapons"):
weapons = self._type_data._proto.weapons
weapon = next((weapon for weapon in weapons if weapon.type in {TargetType.Ground.value, TargetType.Any.value}), None)
if weapon:
return weapon.range
return 0
@property
def can_attack_air(self) -> bool:
""" Does not include upgrades """
if hasattr(self._type_data._proto, "weapons"):
weapons = self._type_data._proto.weapons
weapon = next((weapon for weapon in weapons if weapon.type in {TargetType.Air.value, TargetType.Any.value}), None)
return weapon is not None
return False
@property
def air_dps(self) -> Union[int, float]:
""" Does not include upgrades """
if hasattr(self._type_data._proto, "weapons"):
weapons = self._type_data._proto.weapons
weapon = next((weapon for weapon in weapons if weapon.type in {TargetType.Air.value, TargetType.Any.value}), None)
if weapon:
return (weapon.damage * weapon.attacks) / weapon.speed
return 0
@property
def air_range(self) -> Union[int, float]:
""" Does not include upgrades """
if hasattr(self._type_data._proto, "weapons"):
weapons = self._type_data._proto.weapons
weapon = next((weapon for weapon in weapons if weapon.type in {TargetType.Air.value, TargetType.Any.value}), None)
if weapon:
return weapon.range
return 0
def target_in_range(self, target: "Unit", bonus_distance: Union[int, float]=0) -> bool:
""" Includes the target's radius when calculating distance to target """
if self.can_attack_ground and not target.is_flying:
unit_attack_range = self.ground_range
elif self.can_attack_air and target.is_flying:
unit_attack_range = self.air_range
else:
unit_attack_range = -1
return self.distance_to(target) + bonus_distance <= self.radius + target.radius + unit_attack_range
@property
def armor(self) -> Union[int, float]:
""" Does not include upgrades """
return self._type_data._proto.armor
@property
def sight_range(self) -> Union[int, float]:
return self._type_data._proto.sight_range
@property
def movement_speed(self) -> Union[int, float]:
return self._type_data._proto.movement_speed
@property
def is_carrying_minerals(self) -> bool:
""" Checks if a worker (or MULE) is carrying (gold-)minerals. """
return self.has_buff(BuffId.CARRYMINERALFIELDMINERALS) or self.has_buff(BuffId.CARRYHIGHYIELDMINERALFIELDMINERALS)
@property
def is_carrying_vespene(self) -> bool:
""" Checks if a worker is carrying vespene. """
return self.has_buff(BuffId.CARRYHARVESTABLEVESPENEGEYSERGAS) or self.has_buff(BuffId.CARRYHARVESTABLEVESPENEGEYSERGASPROTOSS) or self.has_buff(BuffId.CARRYHARVESTABLEVESPENEGEYSERGASZERG)
@property
def is_selected(self) -> bool:
return self._proto.is_selected
@property
def orders(self) -> List["UnitOrder"]:
return [UnitOrder.from_proto(o, self._game_data) for o in self._proto.orders]
@property
def noqueue(self) -> bool:
return len(self.orders) == 0
@property
def is_moving(self) -> bool:
return len(self.orders) > 0 and self.orders[0].ability.id in [AbilityId.MOVE]
@property
def is_attacking(self) -> bool:
return len(self.orders) > 0 and self.orders[0].ability.id in [AbilityId.ATTACK, AbilityId.ATTACK_ATTACK, AbilityId.ATTACK_ATTACKTOWARDS, AbilityId.ATTACK_ATTACKBARRAGE, AbilityId.SCAN_MOVE]
@property
def is_gathering(self) -> bool:
""" Checks if a unit is on its way to a mineral field / vespene geyser to mine. """
return len(self.orders) > 0 and self.orders[0].ability.id in {AbilityId.HARVEST_GATHER}
@property
def is_returning(self) -> bool:
""" Checks if a unit is returning from mineral field / vespene geyser to deliver resources to townhall. """
return len(self.orders) > 0 and self.orders[0].ability.id in {AbilityId.HARVEST_RETURN}
@property
def is_collecting(self) -> bool:
""" Combines the two properties above. """
return len(self.orders) > 0 and self.orders[0].ability.id in {AbilityId.HARVEST_GATHER, AbilityId.HARVEST_RETURN}
@property
def is_constructing_scv(self) -> bool:
""" Checks if the unit is an SCV that is currently building. """
return self.orders and self.orders[0].ability.id in {
AbilityId.TERRANBUILD_ARMORY,
AbilityId.TERRANBUILD_BARRACKS,
AbilityId.TERRANBUILD_BUNKER,
AbilityId.TERRANBUILD_COMMANDCENTER,
AbilityId.TERRANBUILD_ENGINEERINGBAY,
AbilityId.TERRANBUILD_FACTORY,
AbilityId.TERRANBUILD_FUSIONCORE,
AbilityId.TERRANBUILD_GHOSTACADEMY,
AbilityId.TERRANBUILD_MISSILETURRET,
AbilityId.TERRANBUILD_REFINERY,
AbilityId.TERRANBUILD_SENSORTOWER,
AbilityId.TERRANBUILD_STARPORT,
AbilityId.TERRANBUILD_SUPPLYDEPOT,
}
@property
def is_repairing(self) -> bool:
return len(self.orders) > 0 and self.orders[0].ability.id in {
AbilityId.EFFECT_REPAIR,
AbilityId.EFFECT_REPAIR_MULE,
AbilityId.EFFECT_REPAIR_SCV,
}
@property
def order_target(self) -> Optional[Union[int, Point2]]:
""" Returns the target tag (if it is a Unit) or Point2 (if it is a Position) from the first order, reutrn None if the unit is idle """
if len(self.orders) > 0:
if isinstance(self.orders[0].target, int):
return self.orders[0].target
else:
return Point2.from_proto(self.orders[0].target)
return None
@property
def is_idle(self) -> bool:
return not self.orders
@property
def add_on_tag(self) -> int:
return self._proto.add_on_tag
@property
def add_on_land_position(self) -> Point2:
""" If unit is addon (techlab or reactor), returns the position where a terran building has to land to connect to addon """
return self.position.offset(Point2((-2.5, 0.5)))
@property
def has_add_on(self) -> bool:
return self.add_on_tag != 0
@property
def assigned_harvesters(self) -> int:
return self._proto.assigned_harvesters
@property
def ideal_harvesters(self) -> int:
return self._proto.ideal_harvesters
@property
def surplus_harvesters(self) -> int:
""" Returns a positive number if it has too many harvesters mining, a negative number if it has too few mining """
return -(self._proto.ideal_harvesters - self._proto.assigned_harvesters)
@property
def name(self) -> str:
return self._type_data.name
def train(self, unit, *args, **kwargs):
return self(self._game_data.units[unit.value].creation_ability.id, *args, **kwargs)
def build(self, unit, *args, **kwargs):
return self(self._game_data.units[unit.value].creation_ability.id, *args, **kwargs)
def research(self, upgrade, *args, **kwargs):
""" Requires UpgradeId to be passed instead of AbilityId """
return self(self._game_data.upgrades[upgrade.value].research_ability.id, *args, **kwargs)
def has_buff(self, buff):
assert isinstance(buff, BuffId)
return buff.value in self._proto.buff_ids
def warp_in(self, unit, placement, *args, **kwargs):
normal_creation_ability = self._game_data.units[unit.value].creation_ability.id
return self(warpgate_abilities[normal_creation_ability], placement, *args, **kwargs)
def attack(self, *args, **kwargs):
return self(AbilityId.ATTACK, *args, **kwargs)
def gather(self, *args, **kwargs):
return self(AbilityId.HARVEST_GATHER, *args, **kwargs)
def return_resource(self, *args, **kwargs):
return self(AbilityId.HARVEST_RETURN, *args, **kwargs)
def move(self, *args, **kwargs):
return self(AbilityId.MOVE, *args, **kwargs)
def hold_position(self, *args, **kwargs):
return self(AbilityId.HOLDPOSITION, *args, **kwargs)
def stop(self, *args, **kwargs):
return self(AbilityId.STOP, *args, **kwargs)
def repair(self, *args, **kwargs):
return self(AbilityId.EFFECT_REPAIR, *args, **kwargs)
def __hash__(self):
return hash(self.tag)
def __call__(self, ability, *args, **kwargs):
return unit_command.UnitCommand(ability, self, *args, **kwargs)
def __repr__(self):
return f"Unit(name={self.name !r}, tag={self.tag})"
class UnitOrder(object):
@classmethod
def from_proto(cls, proto, game_data):
return cls(
game_data.abilities[proto.ability_id],
(proto.target_world_space_pos
if proto.HasField("target_world_space_pos") else
proto.target_unit_tag),
proto.progress
)
def __init__(self, ability, target, progress=None):
self.ability = ability
self.target = target
self.progress = progress
def __repr__(self):
return f"UnitOrder({self.ability}, {self.target}, {self.progress})"
class PassengerUnit(object):
def __init__(self, proto_data, game_data):
assert isinstance(game_data, GameData)
self._proto = proto_data
self._game_data = game_data
def __repr__(self):
return f"PassengerUnit(name={self.name !r}, tag={self.tag})"
@property
def type_id(self) -> UnitTypeId:
return UnitTypeId(self._proto.unit_type)
@property
def _type_data(self) -> "UnitTypeData":
return self._game_data.units[self._proto.unit_type]
@property
def name(self) -> str:
return self._type_data.name
@property
def race(self) -> Race:
return Race(self._type_data._proto.race)
@property
def tag(self) -> int:
return self._proto.tag
@property
def is_structure(self) -> bool:
return Attribute.Structure.value in self._type_data.attributes
@property
def is_light(self) -> bool:
return Attribute.Light.value in self._type_data.attributes
@property
def is_armored(self) -> bool:
return Attribute.Armored.value in self._type_data.attributes
@property
def is_biological(self) -> bool:
return Attribute.Biological.value in self._type_data.attributes
@property
def is_mechanical(self) -> bool:
return Attribute.Mechanical.value in self._type_data.attributes
@property
def is_robotic(self) -> bool:
return Attribute.Robotic.value in self._type_data.attributes
@property
def is_massive(self) -> bool:
return Attribute.Massive.value in self._type_data.attributes
@property
def cargo_size(self) -> Union[float, int]:
""" How much cargo this unit uses up in cargo_space """
return self._type_data.cargo_size
@property
def can_attack_ground(self) -> bool:
if hasattr(self._type_data._proto, "weapons"):
weapons = self._type_data._proto.weapons
weapon = next((weapon for weapon in weapons if weapon.type in [TargetType.Ground.value, TargetType.Any.value]), None)
return weapon is not None
return False
@property
def ground_dps(self) -> Union[int, float]:
""" Does not include upgrades """
if hasattr(self._type_data._proto, "weapons"):
weapons = self._type_data._proto.weapons
weapon = next((weapon for weapon in weapons if weapon.type in [TargetType.Ground.value, TargetType.Any.value]), None)
if weapon:
return (weapon.damage * weapon.attacks) / weapon.speed
return 0
@property
def ground_range(self) -> Union[int, float]:
""" Does not include upgrades """
if hasattr(self._type_data._proto, "weapons"):
weapons = self._type_data._proto.weapons
weapon = next((weapon for weapon in weapons if weapon.type in [TargetType.Ground.value, TargetType.Any.value]), None)
if weapon:
return weapon.range
return 0
@property
def can_attack_air(self) -> bool:
""" Does not include upgrades """
if hasattr(self._type_data._proto, "weapons"):
weapons = self._type_data._proto.weapons
weapon = next((weapon for weapon in weapons if weapon.type in [TargetType.Air.value, TargetType.Any.value]), None)
return weapon is not None
return False
@property
def air_dps(self) -> Union[int, float]:
""" Does not include upgrades """
if hasattr(self._type_data._proto, "weapons"):
weapons = self._type_data._proto.weapons
weapon = next((weapon for weapon in weapons if weapon.type in [TargetType.Air.value, TargetType.Any.value]), None)
if weapon:
return (weapon.damage * weapon.attacks) / weapon.speed
return 0
@property
def air_range(self) -> Union[int, float]:
""" Does not include upgrades """
if hasattr(self._type_data._proto, "weapons"):
weapons = self._type_data._proto.weapons
weapon = next((weapon for weapon in weapons if weapon.type in [TargetType.Air.value, TargetType.Any.value]), None)
if weapon:
return weapon.range
return 0
@property
def armor(self) -> Union[int, float]:
""" Does not include upgrades """
return self._type_data._proto.armor
@property
def sight_range(self) -> Union[int, float]:
return self._type_data._proto.sight_range
@property
def movement_speed(self) -> Union[int, float]:
return self._type_data._proto.movement_speed
@property
def health(self) -> Union[int, float]:
return self._proto.health
@property
def health_max(self) -> Union[int, float]:
return self._proto.health_max
@property
def health_percentage(self) -> Union[int, float]:
if self._proto.health_max == 0:
return 0
return self._proto.health / self._proto.health_max
@property
def shield(self) -> Union[int, float]:
return self._proto.shield
@property
def shield_max(self) -> Union[int, float]:
return self._proto.shield_max
@property
def shield_percentage(self) -> Union[int, float]:
if self._proto.shield_max == 0:
return 0
return self._proto.shield / self._proto.shield_max
@property
def energy(self) -> Union[int, float]:
return self._proto.energy
@property
def energy_max(self) -> Union[int, float]:
return self._proto.energy_max
@property
def energy_percentage(self) -> Union[int, float]:
if self._proto.energy_max == 0:
return 0
return self._proto.energy / self._proto.energy_max
|
/sc2_X-1.0.4-py3-none-any.whl/sc2_X/unit.py
| 0.895762 | 0.511961 |
unit.py
|
pypi
|
from typing import Callable, Set, FrozenSet, List
from .position import Point2
class PixelMap(object):
def __init__(self, proto):
self._proto = proto
assert self.bits_per_pixel % 8 == 0, "Unsupported pixel density"
assert self.width * self.height * self.bits_per_pixel / 8 == len(self._proto.data)
self.data = bytearray(self._proto.data)
@property
def width(self):
return self._proto.size.x
@property
def height(self):
return self._proto.size.y
@property
def bits_per_pixel(self):
return self._proto.bits_per_pixel
@property
def bytes_per_pixel(self):
return self._proto.bits_per_pixel // 8
def __getitem__(self, pos):
x, y = pos
assert 0 <= x < self.width
assert 0 <= y < self.height
index = -self.width * y + x
# print(f"INDEX IS {index} FOR {pos}")
start = index * self.bytes_per_pixel
data = self.data[start : start + self.bytes_per_pixel]
return int.from_bytes(data, byteorder="little", signed=False)
def __setitem__(self, pos, val):
x, y = pos
assert 0 <= x < self.width
assert 0 <= y < self.height
index = self.width * y + x
start = index * self.bytes_per_pixel
self.data[start : start + self.bytes_per_pixel] = val
def is_set(self, p):
return self[p] != 0
def is_empty(self, p):
return not self.is_set(p)
def invert(self):
raise NotImplementedError
def flood_fill(self, start_point: Point2, pred: Callable[[int], bool]) -> Set[Point2]:
nodes: Set[Point2] = set()
queue: List[Point2] = [start_point]
while queue:
x, y = queue.pop()
if not (0 <= x < self.width and 0 <= y < self.height):
continue
if Point2((x, y)) in nodes:
continue
if pred(self[x, y]):
nodes.add(Point2((x, y)))
queue.append(Point2((x+1, y)))
queue.append(Point2((x-1, y)))
queue.append(Point2((x, y+1)))
queue.append(Point2((x, y-1)))
return nodes
def flood_fill_all(self, pred: Callable[[int], bool]) -> Set[FrozenSet[Point2]]:
groups: Set[FrozenSet[Point2]] = set()
for x in range(self.width):
for y in range(self.height):
if any((x, y) in g for g in groups):
continue
if pred(self[x, y]):
groups.add(frozenset(self.flood_fill(Point2((x, y)), pred)))
return groups
def print(self, wide=False):
for y in range(self.height):
for x in range(self.width):
print("#" if self.is_set((x, y)) else " ", end=(" " if wide else ""))
print("")
def save_image(self, filename):
data = [(0,0,self[x, y]) for y in range(self.height) for x in range(self.width)]
from PIL import Image
im= Image.new('RGB', (self.width, self.height))
im.putdata(data)
im.save(filename)
|
/sc2_X-1.0.4-py3-none-any.whl/sc2_X/pixel_map.py
| 0.735452 | 0.505615 |
pixel_map.py
|
pypi
|
class ScoreDetails(object):
""" Accessable in self.state.score during step function
For more information, see https://github.com/Blizzard/s2client-proto/blob/master/s2clientprotocol/score.proto
"""
def __init__(self, proto):
self._data = proto
self._proto = proto.score_details
@property
def score_type(self):
return self._data.score_type
@property
def score(self):
return self._data.score
@property
def idle_production_time(self):
return self._proto.idle_production_time
@property
def idle_worker_time(self):
return self._proto.idle_worker_time
@property
def total_value_units(self):
return self._proto.total_value_units
@property
def total_value_structures(self):
return self._proto.total_value_structures
@property
def killed_value_units(self):
return self._proto.killed_value_units
@property
def killed_value_structures(self):
return self._proto.killed_value_structures
@property
def collected_minerals(self):
return self._proto.collected_minerals
@property
def collected_vespene(self):
return self._proto.collected_vespene
@property
def collection_rate_minerals(self):
return self._proto.collection_rate_minerals
@property
def collection_rate_vespene(self):
return self._proto.collection_rate_vespene
@property
def spent_minerals(self):
return self._proto.spent_minerals
@property
def spent_vespene(self):
return self._proto.spent_vespene
@property
def food_used_none(self):
return self._proto.food_used.none
@property
def food_used_army(self):
return self._proto.food_used.army
@property
def food_used_economy(self):
return self._proto.food_used.economy
@property
def food_used_technology(self):
return self._proto.food_used.technology
@property
def food_used_upgrade(self):
return self._proto.food_used.upgrade
@property
def killed_minerals_none(self):
return self._proto.killed_minerals.none
@property
def killed_minerals_army(self):
return self._proto.killed_minerals.army
@property
def killed_minerals_economy(self):
return self._proto.killed_minerals.economy
@property
def killed_minerals_technology(self):
return self._proto.killed_minerals.technology
@property
def killed_minerals_upgrade(self):
return self._proto.killed_minerals.upgrade
@property
def killed_vespene_none(self):
return self._proto.killed_vespene.none
@property
def killed_vespene_army(self):
return self._proto.killed_vespene.army
@property
def killed_vespene_economy(self):
return self._proto.killed_vespene.economy
@property
def killed_vespene_technology(self):
return self._proto.killed_vespene.technology
@property
def killed_vespene_upgrade(self):
return self._proto.killed_vespene.upgrade
@property
def lost_minerals_none(self):
return self._proto.lost_minerals.none
@property
def lost_minerals_army(self):
return self._proto.lost_minerals.army
@property
def lost_minerals_economy(self):
return self._proto.lost_minerals.economy
@property
def lost_minerals_technology(self):
return self._proto.lost_minerals.technology
@property
def lost_minerals_upgrade(self):
return self._proto.lost_minerals.upgrade
@property
def lost_vespene_none(self):
return self._proto.lost_vespene.none
@property
def lost_vespene_army(self):
return self._proto.lost_vespene.army
@property
def lost_vespene_economy(self):
return self._proto.lost_vespene.economy
@property
def lost_vespene_technology(self):
return self._proto.lost_vespene.technology
@property
def lost_vespene_upgrade(self):
return self._proto.lost_vespene.upgrade
@property
def friendly_fire_minerals_none(self):
return self._proto.friendly_fire_minerals.none
@property
def friendly_fire_minerals_army(self):
return self._proto.friendly_fire_minerals.army
@property
def friendly_fire_minerals_economy(self):
return self._proto.friendly_fire_minerals.economy
@property
def friendly_fire_minerals_technology(self):
return self._proto.friendly_fire_minerals.technology
@property
def friendly_fire_minerals_upgrade(self):
return self._proto.friendly_fire_minerals.upgrade
@property
def friendly_fire_vespene_none(self):
return self._proto.friendly_fire_vespene.none
@property
def friendly_fire_vespene_army(self):
return self._proto.friendly_fire_vespene.army
@property
def friendly_fire_vespene_economy(self):
return self._proto.friendly_fire_vespene.economy
@property
def friendly_fire_vespene_technology(self):
return self._proto.friendly_fire_vespene.technology
@property
def friendly_fire_vespene_upgrade(self):
return self._proto.friendly_fire_vespene.upgrade
@property
def used_minerals_none(self):
return self._proto.used_minerals.none
@property
def used_minerals_army(self):
return self._proto.used_minerals.army
@property
def used_minerals_economy(self):
return self._proto.used_minerals.economy
@property
def used_minerals_technology(self):
return self._proto.used_minerals.technology
@property
def used_minerals_upgrade(self):
return self._proto.used_minerals.upgrade
@property
def used_vespene_none(self):
return self._proto.used_vespene.none
@property
def used_vespene_army(self):
return self._proto.used_vespene.army
@property
def used_vespene_economy(self):
return self._proto.used_vespene.economy
@property
def used_vespene_technology(self):
return self._proto.used_vespene.technology
@property
def used_vespene_upgrade(self):
return self._proto.used_vespene.upgrade
@property
def total_used_minerals_none(self):
return self._proto.total_used_minerals.none
@property
def total_used_minerals_army(self):
return self._proto.total_used_minerals.army
@property
def total_used_minerals_economy(self):
return self._proto.total_used_minerals.economy
@property
def total_used_minerals_technology(self):
return self._proto.total_used_minerals.technology
@property
def total_used_minerals_upgrade(self):
return self._proto.total_used_minerals.upgrade
@property
def total_used_vespene_none(self):
return self._proto.total_used_vespene.none
@property
def total_used_vespene_army(self):
return self._proto.total_used_vespene.army
@property
def total_used_vespene_economy(self):
return self._proto.total_used_vespene.economy
@property
def total_used_vespene_technology(self):
return self._proto.total_used_vespene.technology
@property
def total_used_vespene_upgrade(self):
return self._proto.total_used_vespene.upgrade
@property
def total_damage_dealt_life(self):
return self._proto.total_damage_dealt.life
@property
def total_damage_dealt_shields(self):
return self._proto.total_damage_dealt.shields
@property
def total_damage_dealt_energy(self):
return self._proto.total_damage_dealt.energy
@property
def total_damage_taken_life(self):
return self._proto.total_damage_taken.life
@property
def total_damage_taken_shields(self):
return self._proto.total_damage_taken.shields
@property
def total_damage_taken_energy(self):
return self._proto.total_damage_taken.energy
@property
def total_healed_life(self):
return self._proto.total_healed.life
@property
def total_healed_shields(self):
return self._proto.total_healed.shields
@property
def total_healed_energy(self):
return self._proto.total_healed.energy
|
/sc2_X-1.0.4-py3-none-any.whl/sc2_X/score.py
| 0.831143 | 0.30461 |
score.py
|
pypi
|
EXAMPLE_SYNTHETIC_REPLAYPACKS = [
(
"2022_TestReplaypack",
"https://github.com/Kaszanas/SC2EGSet_Dataset/raw/main/tests/test_files/2022_TestReplaypack.zip", # noqa
)
]
EXAMPLE_REAL_REPLAYPACKS = [
(
"2016_IEM_10_Taipei",
"https://zenodo.org/record/6903505/files/2016_IEM_10_Taipei.zip?download=1",
),
(
"2016_IEM_11_Shanghai",
"https://zenodo.org/record/6903505/files/2016_IEM_11_Shanghai.zip?download=1",
),
]
SC2EGSET_DATASET_REPLAYPACKS = [
(
"2016_IEM_10_Taipei",
"https://zenodo.org/record/6903505/files/2016_IEM_10_Taipei.zip?download=1",
),
(
"2016_IEM_11_Shanghai",
"https://zenodo.org/record/6903505/files/2016_IEM_11_Shanghai.zip?download=1",
),
(
"2016_WCS_Winter",
"https://zenodo.org/record/6903505/files/2016_WCS_Winter.zip?download=1",
),
(
"2017_HomeStory_Cup_XV",
"https://zenodo.org/record/6903505/files/2017_HomeStory_Cup_XV.zip?download=1",
),
(
"2017_HomeStory_Cup_XVI",
"https://zenodo.org/record/6903505/files/2017_HomeStory_Cup_XVI.zip?download=1",
),
(
"2017_IEM_Shanghai",
"https://zenodo.org/record/6903505/files/2017_IEM_Shanghai.zip?download=1",
),
(
"2017_IEM_XI_World_Championship_Katowice",
"https://zenodo.org/record/6903505/files/2017_IEM_XI_World_Championship_Katowice.zip?download=1", # noqa
),
(
"2017_WCS_Austin",
"https://zenodo.org/record/6903505/files/2017_WCS_Austin.zip?download=1",
),
(
"2017_WCS_Global_Finals",
"https://zenodo.org/record/6903505/files/2017_WCS_Global_Finals.zip?download=1",
),
(
"2017_WCS_Jonkoping",
"https://zenodo.org/record/6903505/files/2017_WCS_Jonkoping.zip?download=1",
),
(
"2017_WCS_Montreal",
"https://zenodo.org/record/6903505/files/2017_WCS_Montreal.zip?download=1",
),
(
"2017_WESG_Barcelona",
"https://zenodo.org/record/6903505/files/2017_WESG_Barcelona.zip?download=1",
),
(
"2017_WESG_Haikou",
"https://zenodo.org/record/6903505/files/2017_WESG_Haikou.zip?download=1",
),
(
"2018_Cheeseadelphia_8",
"https://zenodo.org/record/6903505/files/2018_Cheeseadelphia_8.zip?download=1",
),
(
"2018_HomeStory_Cup_XVII",
"https://zenodo.org/record/6903505/files/2018_HomeStory_Cup_XVII.zip?download=1",
),
(
"2018_HomeStory_Cup_XVIII",
"https://zenodo.org/record/6903505/files/2018_HomeStory_Cup_XVII.zip?download=1",
),
(
"2018_IEM_Katowice",
"https://zenodo.org/record/6903505/files/2018_IEM_Katowice.zip?download=1",
),
(
"2018_IEM_PyeongChang",
"https://zenodo.org/record/6903505/files/2018_IEM_PyeongChang.zip?download=1",
),
(
"2018_WCS_Austin",
"https://zenodo.org/record/6903505/files/2018_WCS_Austin.zip?download=1",
),
(
"2018_WCS_Global_Finals",
"https://zenodo.org/record/6903505/files/2018_WCS_Global_Finals.zip?download=1",
),
(
"2018_WCS_Leipzig",
"https://zenodo.org/record/6903505/files/2018_WCS_Leipzig.zip?download=1",
),
(
"2018_WCS_Montreal",
"https://zenodo.org/record/6903505/files/2018_WCS_Montreal.zip?download=1",
),
(
"2018_WCS_Valencia",
"https://zenodo.org/record/6903505/files/2018_WCS_Valencia.zip?download=1",
),
(
"2018_WESG_Grand_Finals",
"https://zenodo.org/record/6903505/files/2018_WESG_Grand_Finals.zip?download=1",
),
(
"2019_Assembly_Summer",
"https://zenodo.org/record/6903505/files/2019_Assembly_Summer.zip?download=1",
),
(
"2019_HomeStory_Cup_XIX",
"https://zenodo.org/record/6903505/files/2019_HomeStory_Cup_XIX.zip?download=1",
),
(
"2019_HomeStory_Cup_XX",
"https://zenodo.org/record/6903505/files/2019_HomeStory_Cup_XX.zip?download=1",
),
(
"2019_IEM_Katowice",
"https://zenodo.org/record/6903505/files/2019_IEM_Katowice.zip?download=1",
),
(
"2019_WCS_Fall",
"https://zenodo.org/record/6903505/files/2019_WCS_Fall.zip?download=1",
),
(
"2019_WCS_Grand_Finals",
"https://zenodo.org/record/6903505/files/2019_WCS_Grand_Finals.zip?download=1",
),
(
"2019_WCS_Spring",
"https://zenodo.org/record/6903505/files/2019_WCS_Spring.zip?download=1",
),
(
"2019_WCS_Summer",
"https://zenodo.org/record/6903505/files/2019_WCS_Summer.zip?download=1",
),
(
"2019_WCS_Winter",
"https://zenodo.org/record/6903505/files/2019_WCS_Winter.zip?download=1",
),
(
"2020_05_Dreamhack_Last_Chance",
"https://zenodo.org/record/6903505/files/2020_05_Dreamhack_Last_Chance.zip?download=1",
),
(
"2020_ASUS_ROG_Online",
"https://zenodo.org/record/6903505/files/2020_ASUS_ROG_Online.zip?download=1",
),
(
"2020_Dreamhack_SC2_Masters_Fall",
"https://zenodo.org/record/6903505/files/2020_Dreamhack_SC2_Masters_Fall.zip?download=1",
),
(
"2020_Dreamhack_SC2_Masters_Summer",
"https://zenodo.org/record/6903505/files/2020_Dreamhack_SC2_Masters_Summer.zip?download=1",
),
(
"2020_Dreamhack_SC2_Masters_Winter",
"https://zenodo.org/record/6903505/files/2020_Dreamhack_SC2_Masters_Summer.zip?download=1",
),
(
"2020_IEM_Katowice",
"https://zenodo.org/record/6903505/files/2020_IEM_Katowice.zip?download=1",
),
(
"2020_StayAtHome_Story_Cup_1",
"https://zenodo.org/record/6903505/files/2020_StayAtHome_Story_Cup_1.zip?download=1",
),
(
"2020_StayAtHome_Story_Cup_2",
"https://zenodo.org/record/6903505/files/2020_StayAtHome_Story_Cup_2.zip?download=1",
),
(
"2020_TSL5",
"https://zenodo.org/record/6903505/files/2020_TSL5.zip?download=1",
),
(
"2020_TSL6",
"https://zenodo.org/record/6903505/files/2020_TSL6.zip?download=1",
),
(
"2021_ASUS_ROG_Fall",
"https://zenodo.org/record/6903505/files/2021_ASUS_ROG_Fall.zip?download=1",
),
(
"2021_Cheeseadelphia_Winter_Championship",
"https://zenodo.org/record/6903505/files/2021_Cheeseadelphia_Winter_Championship.zip?download=1", # noqa
),
(
"2021_Dreamhack_SC2_Masters_Fall",
"https://zenodo.org/record/6903505/files/2021_Dreamhack_SC2_Masters_Fall.zip?download=1",
),
(
"2021_Dreamhack_SC2_Masters_Summer",
"https://zenodo.org/record/6903505/files/2021_Dreamhack_SC2_Masters_Summer.zip?download=1",
),
(
"2021_Dreamhack_SC2_Masters_Winter",
"https://zenodo.org/record/6903505/files/2021_Dreamhack_SC2_Masters_Winter.zip?download=1",
),
(
"2021_IEM_Katowice",
"https://zenodo.org/record/6903505/files/2021_IEM_Katowice.zip?download=1",
),
(
"2021_StayAtHome_Story_Cup_3",
"https://zenodo.org/record/6903505/files/2021_StayAtHome_Story_Cup_3.zip?download=1",
),
(
"2021_StayAtHome_Story_Cup_4",
"https://zenodo.org/record/6903505/files/2021_StayAtHome_Story_Cup_4.zip?download=1",
),
(
"2021_TSL7",
"https://zenodo.org/record/6903505/files/2021_TSL7.zip?download=1",
),
(
"2021_TSL8",
"https://zenodo.org/record/6903505/files/2021_TSL8.zip?download=1",
),
(
"2022_Dreamhack_SC2_Masters_Last_Chance2021",
"https://zenodo.org/record/6903505/files/2022_Dreamhack_SC2_Masters_Last_Chance2021.zip?download=1", # noqa
),
(
"2022_IEM_Katowice",
"https://zenodo.org/record/6903505/files/2022_IEM_Katowice.zip?download=1",
),
]
|
/sc2_datasets-1.0.2-py3-none-any.whl/sc2_datasets/available_replaypacks.py
| 0.537041 | 0.492859 |
available_replaypacks.py
|
pypi
|
from typing import Any, Dict
from sc2_datasets.replay_parser.toon_player_desc_map.color import Color
class ToonPlayerInfo:
"""
Specifies ToonPlayerInfo class representation
:param nickname: Specifies player name in the game
:type nickname: str
:param playerID: Specifies player id number in the game, example: in 1v1 game: [1,2]
:type playerID: int
:param userID: Specifies id number of player in the game, example: in 1v1 game: [0,1]
:type userID: int
:param SQ: Specifies spending quotient value,\
ratio between resources mining and spending,\
more information: https://tl.net/forum/starcraft-2/266019-do-you-macro-like-a-pro
:type SQ: int
:param supplyCappedPercent: Specifies a 'supply block' percent of game time\
that a player was supply capped.
:type supplyCappedPercent: int
:param startDir: Specifies the start direction of the player,\
expressed in clock
:type startDir: int
:param startLocX: Specifies x coordinate of player's starting location
:type startLocX: int
:param startLocY: Specifies y coordinate of player's starting location
:type startLocY: int
:param race: Specifies race the player was playing
:type race: str
:param selectedRace: Specifies race the player selected, might be random
:type selectedRace: str
:param APM: Specifies average action per minute value of the player in the game
:type APM: int
:param MMR: Specifies the value of matchmaking ratio of the player
:type MMR: int
:param result: Specifies an information if player has/has not won the game
:type result: str
:param region: Specifies the information on which location the game was played on
:type region: str
:param realm: Specifies the information on which server of the location game was played
:type realm: str
:param highestLeague: Specifies the player's highest league ever achieved
:type highestLeague: str
:param isInClan: Specifies if the player was a member of the clan
:type isInClan: bool
:param clanTag: Specifies the shortcut of the player's clan
:type clanTag: str
:param handicap: Specifies a percentage value of units and structures maximum health points
:type handicap: int
:param color: Specifies the color RGBA palette of the player
:type color: Color
"""
@staticmethod
def from_dict(d: Dict[str, Any]) -> "ToonPlayerInfo":
"""
Static method returning initialized ToonPlayerInfo class from a dictionary.
This helps with the original JSON parsing.
:param d: Specifies a dictionary as available in the JSON file\
that is a result of pre-processing some .SC2Replay file.
:type d: Dict[str, Any]
:return: Returns an initialized ToonPlayerInfo class.
:rtype: ToonPlayerInfo
"""
return ToonPlayerInfo(
nickname=d["nickname"],
playerID=d["playerID"],
userID=d["userID"],
SQ=d["SQ"],
supplyCappedPercent=d["supplyCappedPercent"],
startDir=d["startDir"],
startLocX=d["startLocX"],
startLocY=d["startLocY"],
race=d["race"],
selectedRace=d["selectedRace"],
APM=d["APM"],
MMR=d["MMR"],
result=d["result"],
region=d["region"],
realm=d["realm"],
highestLeague=d["highestLeague"],
isInClan=d["isInClan"],
clanTag=d["clanTag"],
handicap=d["handicap"],
color=Color.from_dict(d=d["color"]),
)
def __init__(
self,
nickname: str,
playerID: int,
userID: int,
SQ: int,
supplyCappedPercent: int,
startDir: int,
startLocX: int,
startLocY: int,
race: str,
selectedRace: str,
APM: int,
MMR: int,
result: str,
region: str,
realm: str,
highestLeague: str,
isInClan: bool,
clanTag: str,
handicap: int,
color: Color,
) -> None:
self.nickname = nickname
self.playerID = playerID
self.userID = userID
self.SQ = SQ
self.supplyCappedPercent = supplyCappedPercent
self.startDir = startDir
self.startLocX = startLocX
self.startLocY = startLocY
self.race = race
self.selectedRace = selectedRace
self.APM = APM
self.MMR = MMR
self.result = result
self.region = region
self.realm = realm
self.highestLeague = highestLeague
self.isInClan = isInClan
self.clanTag = clanTag
self.handicap = handicap
self.color = color
|
/sc2_datasets-1.0.2-py3-none-any.whl/sc2_datasets/replay_parser/toon_player_desc_map/toon_player_info.py
| 0.933381 | 0.655477 |
toon_player_info.py
|
pypi
|
from typing import Any, Dict
from sc2_datasets.replay_parser.init_data.game_description import GameDescription
class InitData:
"""
Data type containing some "init data" information about StarCraft II game.
:param gameDescription: Specifies the object that contains list
of parameters which are describing the game
:type gameDescription: GameDescription
"""
@staticmethod
def from_dict(d: Dict[str, Any]) -> "InitData":
"""
Static method returning initialized InitData class from a dictionary.
This helps with the original JSON parsing.
:param d: Specifies a dictionary as available in the JSON file\
that is a result of pre-processing some .SC2Replay file.
:type d: Dict[str, Any]
:return: Returns an initialized InitData class.
:rtype: InitData
**Correct Usage Examples:**
Using from_dict factory method provides ease of use when parsing a replay pre-processed with SC2InfoExtractorGo_
This method requires a dictionary representation of data to be passed as a parameter because of the built in json parser provided by the Python standard library.
_SC2InfoExtractorGo: https://github.com/Kaszanas/SC2InfoExtractorGo
The use of this method is intended to get initialization information from the game's json representation.
>>> from sc2egset_dataset.dataset.replay_data.replay_parser.init_data.game_options import GameOptions
>>> from sc2egset_dataset.dataset.replay_data.replay_parser.init_data.game_description import GameDescription # noqa
>>> gameDescription_dict ={
... "gameOptions": {
... "advancedSharedControl": False,
... "amm": False,
... "battleNet": True,
... "clientDebugFlags": 265,
... "competitive": False,
... "cooperative": False,
... "fog": 0,
... "heroDuplicatesAllowed": True,
... "lockTeams": True,
... "noVictoryOrDefeat": False,
... "observers": 0,
... "practice": False,
... "randomRaces": False,
... "teamsTogether": False,
... "userDifficulty": 0
... },
... "gameSpeed": "Faster",
... "isBlizzardMap": True,
... "mapAuthorName": "98-S2-1-26",
... "mapFileSyncChecksum": 2133219109,
... "mapSizeX": 144,
... "mapSizeY": 160,
... "maxPlayers": 2
... }
... }
...
>>> init_data_object = InitData.from_dict(d=gameDescription_dict)
...
>>> assert isinstance(init_data_object, InitData)
>>> assert isinstance(init_data_object.gameDescription, GameDescription)
>>> assert isinstance(init_data_object.gameDescription.gameOptions, GameOptions)
...
>>> assert isinstance(init_data_object.gameDescription.gameOptions.advancedSharedControl, bool)
>>> assert isinstance(init_data_object.gameDescription.gameOptions.amm, bool)
>>> assert isinstance(init_data_object.gameDescription.gameOptions.battleNet, bool)
>>> assert isinstance(init_data_object.gameDescription.gameOptions.clientDebugFlags, int)
>>> assert isinstance(init_data_object.gameDescription.gameOptions.competitive, bool)
>>> assert isinstance(init_data_object.gameDescription.gameOptions.cooperative, bool)
>>> assert isinstance(init_data_object.gameDescription.gameOptions.fog, int)
>>> assert isinstance(init_data_object.gameDescription.gameOptions.heroDuplicatesAllowed, bool)
>>> assert isinstance(init_data_object.gameDescription.gameOptions.lockTeams, bool)
>>> assert isinstance(init_data_object.gameDescription.gameOptions.noVictoryOrDefeat, bool)
>>> assert isinstance(init_data_object.gameDescription.gameOptions.observers, int)
>>> assert isinstance(init_data_object.gameDescription.gameOptions.practice, bool)
>>> assert isinstance(init_data_object.gameDescription.gameOptions.randomRaces, bool)
>>> assert isinstance(init_data_object.gameDescription.gameOptions.teamsTogether, bool)
>>> assert isinstance(init_data_object.gameDescription.gameOptions.userDifficulty, int)
...
>>> assert init_data_object.gameDescription.gameOptions.advancedSharedControl == False
>>> assert init_data_object.gameDescription.gameOptions.amm == False
>>> assert init_data_object.gameDescription.gameOptions.battleNet == False
>>> assert init_data_object.gameDescription.gameOptions.clientDebugFlags == 265
>>> assert init_data_object.gameDescription.gameOptions.competitive == False
>>> assert init_data_object.gameDescription.gameOptions.cooperative == False
>>> assert init_data_object.gameDescription.gameOptions.fog == 0
>>> assert init_data_object.gameDescription.gameOptions.heroDuplicatesAllowed == True
>>> assert init_data_object.gameDescription.gameOptions.lockTeams == True
>>> assert init_data_object.gameDescription.gameOptions.noVictoryOrDefeat == False
>>> assert init_data_object.gameDescription.gameOptions.observers == 0
>>> assert init_data_object.gameDescription.gameOptions.practice == False
>>> assert init_data_object.gameDescription.gameOptions.randomRaces == False
>>> assert init_data_object.gameDescription.gameOptions.teamsTogether == False
>>> assert init_data_object.gameDescription.gameOptions.userDifficulty == 0
...
>>> assert init_data_object.gameDescription.gameOptions.clientDebugFlags >= 0
>>> assert init_data_object.gameDescription.gameOptions.fog >= 0
>>> assert init_data_object.gameDescription.gameOptions.observers >= 0
>>> assert init_data_object.gameDescription.gameOptions.userDifficulty >= 0
...
>>> assert isinstance(init_data_object.gameDescription.gameSpeed, str)
>>> assert isinstance(init_data_object.gameDescription.isBlizzardMap, bool)
>>> assert isinstance(init_data_object.gameDescription.mapAuthorName, str)
>>> assert isinstance(init_data_object.gameDescription.mapFileSyncChecksum, int)
>>> assert isinstance(init_data_object.gameDescription.mapSizeX, int)
>>> assert isinstance(init_data_object.gameDescription.mapSizeY, int)
>>> assert isinstance(init_data_object.gameDescription.maxPlayers, int)
>>> assert isinstance(init_data_object.gameDescription.maxPlayers, int)
...
>>> assert init_data_object.gameDescription.gameSpeed == "Faster"
>>> assert init_data_object.gameDescription.isBlizzardMap == True
>>> assert init_data_object.gameDescription.mapAuthorName == "98-S2-1-26"
>>> assert init_data_object.gameDescription.mapFileSyncChecksum == 2133219109
>>> assert init_data_object.gameDescription.mapSizeX == 144
>>> assert init_data_object.gameDescription.mapSizeY == 160
>>> assert init_data_object.gameDescription.maxPlayers == 2
...
>>> assert init_data_object.gameDescription.mapFileSyncChecksum > 0
>>> assert init_data_object.gameDescription.mapSizeX > 0
>>> assert init_data_object.gameDescription.mapSizeY > 0
>>> assert init_data_object.gameDescription.maxPlayers > 0
**Incorrect Usage Examples:**
>>> gameDescription_wrong = False
>>> InitData(
... gameDescription=gameDescription_wrong)
Traceback (most recent call last):
...
TypeError: unsupported operand type(s) ...
"""
return InitData(
gameDescription=GameDescription.from_dict(d=d["gameDescription"])
)
def __init__(self, gameDescription: GameDescription) -> None:
self.gameDescription = gameDescription
|
/sc2_datasets-1.0.2-py3-none-any.whl/sc2_datasets/replay_parser/init_data/init_data.py
| 0.84228 | 0.447823 |
init_data.py
|
pypi
|
from typing import Any, Dict
from sc2_datasets.replay_parser.init_data.game_options import GameOptions
class GameDescription:
"""
GameDescription specifies an information about some basic parameters
of a StarCraft II replay.
:param gameOptions: Specifies options in the game,\
for example you can set: fog, random races, competitive, etc.
:type gameOptions: GameOptions
:param gameSpeed: Specifies the speed at which your game runs.\
Enum: [Slower, Slow, Normal, Fast, Faster]. Default is Faster
:type gameSpeed: str
:param isBlizzardMap: Specifies if map have been created by Blizzard
:type isBlizzardMap: bool
:param mapAuthorName: Nickname or fullname of the map's author
:type mapAuthorName: str
:param mapFileSyncChecksum: Specifies the map file sync checksum
:type mapFileSyncChecksum: int
:param mapSizeX: X coordinate size of map in pixels.
:type mapSizeX: int
:param mapSizeY: Y coordinate size of map in pixels.
:type mapSizeY: int
:param maxPlayers: Specifies how many players can play on this map at once.
:type maxPlayers: int
"""
@staticmethod
def from_dict(d: Dict[str, Any]) -> "GameDescription":
"""
Static method returning initialized GameDescription class from a dictionary.
This helps with the original JSON parsing.
:param d: Specifies a dictionary as available in the JSON file that is\
a result of pre-processing some .SC2Replay file.
:type d: Dict[str, Any]
:return: Returns an initialized GameDescription class.
:rtype: GameDescription
**Correct Usage Examples:**
Using from_dict factory method provides ease of use when parsing
a replay pre-processed with SC2InfoExtractorGo_
This method requires a dictionary representation of data to be passed
as a parameter because of the built in json parser provided by the Python standard library.
_SC2InfoExtractorGo: https://github.com/Kaszanas/SC2InfoExtractorGo
The use of this method is intended to get game description information
from the game's json representation.
>>> from sc2egset_dataset.dataset.replay_data.replay_parser.init_data.game_options import GameOptions #noqa
>>> game_options_object = {
... "advancedSharedControl": False,
... "amm": False,
... "battleNet": False,
... "clientDebugFlags": 265,
... "competitive": False,
... "cooperative": False,
... "fog": 0,
... "heroDuplicatesAllowed": True,
... "lockTeams": True,
... "noVictoryOrDefeat": False,
... "observers": 0,
... "practice": False,
... "randomRaces": False,
... "teamsTogether": False,
... "userDifficulty": 0}
...
>>> game_description_dict ={
... "gameOptions": game_options_object,
... "gameSpeed": "Faster",
... "isBlizzardMap": True,
... "mapAuthorName": "98-S2-1-26",
... "mapFileSyncChecksum": 2133219109,
... "mapSizeX": 144,
... "mapSizeY": 160,
... "maxPlayers": 2
... }
...
>>> game_description_object = GameDescription.from_dict(d=game_description_dict)
...
>>> assert isinstance(game_description_object, GameDescription)
>>> assert isinstance(game_description_object.gameOptions, GameOptions)
>>> assert isinstance(game_description_object.gameSpeed, str)
>>> assert isinstance(game_description_object.isBlizzardMap, bool)
>>> assert isinstance(game_description_object.mapAuthorName, str)
>>> assert isinstance(game_description_object.mapFileSyncChecksum, int)
>>> assert isinstance(game_description_object.mapSizeX, int)
>>> assert isinstance(game_description_object.mapSizeY, int)
>>> assert isinstance(game_description_object.maxPlayers, int)
...
>>> assert game_description_object.gameOptions == game_options_object
>>> assert game_description_object.gameSpeed == "Faster"
>>> assert game_description_object.isBlizzardMap == True
>>> assert game_description_object.mapAuthorName == "98-S2-1-26"
>>> assert game_description_object.mapFileSyncChecksum == 2133219109
>>> assert game_description_object.mapSizeX == 144
>>> assert game_description_object.mapSizeY == 160
>>> assert game_description_object.maxPlayers == 2
...
>>> assert game_description_object.mapFileSyncChecksum > 0
>>> assert game_description_object.mapSizeX > 0
>>> assert game_description_object.mapSizeY > 0
>>> assert game_description_object.maxPlayers > 0
**Incorrect Usage Examples:**
>>> gameOptions_value_wrong = "False"
>>> gameSpeed_value_wrong = True
>>> isBlizzardMap_value_wrong = "wrong type"
>>> mapAuthorName_value_wrong = int(2)
>>> mapFileSyncChecksum_value_wrong = str(2)
>>> mapSizeX_value_wrong = str(2)
>>> mapSizeY_value_wrong = str(2)
>>> maxPlayers_value_wrong = str(2)
>>> GameDescription(
... gameOptions=gameOptions_value_wrong,
... gameSpeed=gameSpeed_value_wrong,
... isBlizzardMap=isBlizzardMap_value_wrong,
... mapAuthorName=mapAuthorName_value_wrong,
... mapFileSyncChecksum=mapFileSyncChecksum_value_wrong,
... mapSizeX=mapSizeX_value_wrong,
... mapSizeY=mapSizeY_value_wrong,
... maxPlayers=maxPlayers_value_wrong)
Traceback (most recent call last):
...
TypeError: unsupported operand type(s) ...
"""
return GameDescription(
gameOptions=GameOptions.from_dict(d=d["gameOptions"]),
gameSpeed=d["gameSpeed"],
isBlizzardMap=d["isBlizzardMap"],
mapAuthorName=d["mapAuthorName"],
mapFileSyncChecksum=d["mapFileSyncChecksum"],
mapSizeX=d["mapSizeX"],
mapSizeY=d["mapSizeY"],
maxPlayers=d["maxPlayers"],
)
def __init__(
self,
gameOptions: GameOptions,
gameSpeed: str,
isBlizzardMap: bool,
mapAuthorName: str,
mapFileSyncChecksum: int,
mapSizeX: int,
mapSizeY: int,
maxPlayers: int,
) -> None:
self.gameOptions = gameOptions
self.gameSpeed = gameSpeed
self.isBlizzardMap = isBlizzardMap
self.mapAuthorName = mapAuthorName
self.mapFileSyncChecksum = mapFileSyncChecksum
self.mapSizeX = mapSizeX
self.mapSizeY = mapSizeY
self.maxPlayers = maxPlayers
|
/sc2_datasets-1.0.2-py3-none-any.whl/sc2_datasets/replay_parser/init_data/game_description.py
| 0.847385 | 0.583915 |
game_description.py
|
pypi
|
from typing import Any, Dict
class GameOptions:
"""
GameOptions represents the replay game options
:param advancedSharedControl: Specifies if advanced shared control is enabled
:type advancedSharedControl: bool
:param amm: Specifies if AMM (AutoMM - Automated Match Making) is enabled
:type amm: bool
:param battleNet: Specifies if game has been played on Battle.net
:type battleNet: bool
:param clientDebugFlags: Specifies the client debug flag
:type clientDebugFlags: int
:param competitive: It means either ranked or unranked,
:type competitive: bool
:param cooperative: Specifies if game was cooperative
:type cooperative: bool
:param fog: Specifies the value of fog in the game
:type fog: int
:param heroDuplicatesAllowed: Specifies if hero can be duplicated
:type heroDuplicatesAllowed: bool
:param lockTeams: Specifies if teams are locked
:type lockTeams: bool
:param noVictoryOrDefeat: There is no information about this parameter
:type noVictoryOrDefeat: bool
:param observers: Specifies count of observers watching the game
:type observers: int
:param practice: There is no information about this parameter
:type practice: bool
:param randomRaces: Specifies if random races are in the game
:type randomRaces: bool
:param teamsTogether: Specifies if teams of players are in the game
:type teamsTogether: bool
:param userDifficulty: There is no information about this parameter
:type userDifficulty: bool
"""
@staticmethod
def from_dict(d: Dict[str, Any]) -> "GameOptions":
"""
Static method returning initialized GameOptions class from a dictionary.
This helps with the original JSON parsing.
:param d: Describes a dictionary, it holds translations of a phrase or sentence
:type d: Dict[str, Any]
:return: Specifies a list of parameters about the game like\
a number of observers, fog of the game, if the game was competitive etc.
:rtype: GameOptions
**Correct Usage Examples:**
Using from_dict factory method provides ease of use when parsing
a replay pre-processed with SC2InfoExtractorGo_
This method requires a dictionary representation of data to be passed
as a parameter because of the built
in json parser provided by the Python standard library.
_SC2InfoExtractorGo: https://github.com/Kaszanas/SC2InfoExtractorGo
The use of this method is intended to get game options information
from the game's json representation.
>>> game_options_dict = {
... "advancedSharedControl": False,
... "amm": False,
... "battleNet": False,
... "clientDebugFlags": 265,
... "competitive": False,
... "cooperative": False,
... "fog": 0,
... "heroDuplicatesAllowed": True,
... "lockTeams": True,
... "noVictoryOrDefeat": False,
... "observers": 0,
... "practice": False,
... "randomRaces": False,
... "teamsTogether": False,
... "userDifficulty": 0}
...
>>> game_options_object = GameOptions.from_dict(d=game_options_dict)
...
>>> assert isinstance(game_options_object, GameOptions)
>>> assert isinstance(game_options_object.advancedSharedControl, bool)
>>> assert isinstance(game_options_object.amm, bool)
>>> assert isinstance(game_options_object.battleNet, bool)
>>> assert isinstance(game_options_object.clientDebugFlags, int)
>>> assert isinstance(game_options_object.competitive, bool)
>>> assert isinstance(game_options_object.cooperative, bool)
>>> assert isinstance(game_options_object.fog, int)
>>> assert isinstance(game_options_object.heroDuplicatesAllowed, bool)
>>> assert isinstance(game_options_object.lockTeams, bool)
>>> assert isinstance(game_options_object.noVictoryOrDefeat, bool)
>>> assert isinstance(game_options_object.observers, int)
>>> assert isinstance(game_options_object.practice, bool)
>>> assert isinstance(game_options_object.randomRaces, bool)
>>> assert isinstance(game_options_object.teamsTogether, bool)
>>> assert isinstance(game_options_object.userDifficulty, int)
...
>>> assert game_options_object.advancedSharedControl == False
>>> assert game_options_object.amm == False
>>> assert game_options_object.battleNet == False
>>> assert game_options_object.clientDebugFlags == 265
>>> assert game_options_object.competitive == False
>>> assert game_options_object.cooperative == False
>>> assert game_options_object.fog == 0
>>> assert game_options_object.heroDuplicatesAllowed == True
>>> assert game_options_object.lockTeams == True
>>> assert game_options_object.noVictoryOrDefeat == False
>>> assert game_options_object.observers == 0
>>> assert game_options_object.practice == False
>>> assert game_options_object.randomRaces == False
>>> assert game_options_object.teamsTogether == False
>>> assert game_options_object.userDifficulty == 0
...
>>> assert game_options_object.clientDebugFlags >= 0
>>> assert game_options_object.fog >= 0
>>> assert game_options_object.observers >= 0
>>> assert game_options_object.userDifficulty >= 0
**Incorrect Usage Examples:**
>>> advancedSharedControl_wrong = "False"
>>> amm_wrong = True
>>> battleNet_wrong = "wrong type"
>>> clientDebugFlags_wrong = int(2)
>>> competitive_wrong = str(2)
>>> cooperative_wrong = str(2)
>>> fog_wrong = str(2)
>>> heroDuplicatesAllowed_wrong = str(2)
>>> lockTeams_wrong = str(2)
>>> noVictoryOrDefeat_wrong = str(2)
>>> observers_wrong = str(2)
>>> practice_wrong = str(2)
>>> randomRaces_wrong = str(2)
>>> teamsTogether_wrong = str(2)
>>> userDifficulty_wrong = str(2)
>>> GameOptions(
... advancedSharedControl=advancedSharedControl_wrong,
... amm=amm_wrong,
... battleNet=battleNet_wrong,
... clientDebugFlags=clientDebugFlags_wrong,
... competitive=competitive_wrong,
... cooperative=cooperative_wrong,
... fog=fog_wrong,
... heroDuplicatesAllowed=heroDuplicatesAllowed_wrong,
... lockTeams=lockTeams_wrong,
... noVictoryOrDefeat=observers_wrong,
... observers=practice_wrong,
... practice=practice_wrong,
... randomRaces=randomRaces_wrong,
... teamsTogether=teamsTogether_wrong,
... userDifficulty=userDifficulty_wrong)
Traceback (most recent call last):
...
TypeError: unsupported operand type(s) ...
"""
return GameOptions(
advancedSharedControl=d["advancedSharedControl"],
amm=d["amm"],
battleNet=d["battleNet"],
clientDebugFlags=d["clientDebugFlags"],
competitive=d["competitive"],
cooperative=d["cooperative"],
fog=d["fog"],
heroDuplicatesAllowed=d["heroDuplicatesAllowed"],
lockTeams=d["lockTeams"],
noVictoryOrDefeat=d["noVictoryOrDefeat"],
observers=d["observers"],
practice=d["practice"],
randomRaces=d["randomRaces"],
teamsTogether=d["teamsTogether"],
userDifficulty=d["userDifficulty"],
)
def __init__(
self,
advancedSharedControl: bool,
amm: bool,
battleNet: bool,
clientDebugFlags: int,
competitive: bool,
cooperative: bool,
fog: int,
heroDuplicatesAllowed: bool,
lockTeams: bool,
noVictoryOrDefeat: bool,
observers: int,
practice: bool,
randomRaces: bool,
teamsTogether: bool,
userDifficulty: bool,
) -> None:
self.advancedSharedControl = advancedSharedControl
self.amm = amm
self.battleNet = battleNet
self.clientDebugFlags = clientDebugFlags
self.competitive = competitive
self.cooperative = cooperative
self.fog = fog
self.heroDuplicatesAllowed = heroDuplicatesAllowed
self.lockTeams = lockTeams
self.noVictoryOrDefeat = noVictoryOrDefeat
self.observers = observers
self.practice = practice
self.randomRaces = randomRaces
self.teamsTogether = teamsTogether
self.userDifficulty = userDifficulty
|
/sc2_datasets-1.0.2-py3-none-any.whl/sc2_datasets/replay_parser/init_data/game_options.py
| 0.902045 | 0.569015 |
game_options.py
|
pypi
|
from typing import Dict
from sc2_datasets.replay_parser.tracker_events.events.player_setup import PlayerSetup
from sc2_datasets.replay_parser.tracker_events.events.player_stats.player_stats import (
PlayerStats,
)
from sc2_datasets.replay_parser.tracker_events.events.unit_born import UnitBorn
from sc2_datasets.replay_parser.tracker_events.events.unit_died import UnitDied
from sc2_datasets.replay_parser.tracker_events.events.unit_done import UnitDone
from sc2_datasets.replay_parser.tracker_events.events.unit_init import UnitInit
from sc2_datasets.replay_parser.tracker_events.events.unit_owner_change import (
UnitOwnerChange,
)
from sc2_datasets.replay_parser.tracker_events.events.unit_positions import (
UnitPositions,
)
from sc2_datasets.replay_parser.tracker_events.events.unit_type_change import (
UnitTypeChange,
)
from sc2_datasets.replay_parser.tracker_events.events.upgrade import Upgrade
from sc2_datasets.replay_parser.tracker_events.tracker_event import TrackerEvent
class TrackerEventsParser:
@staticmethod
def from_dict(d: Dict) -> TrackerEvent:
"""
Static method returning initialized TrackerEvent class from a dictionary.
This helps with the original JSON parsing.
:param d: Specifies a dictionary as available in the JSON file\
that is a result of pre-processing some .SC2Replay file.
:type d: Dict
:return: Returns an initialized TrackerEvent class.
:rtype: TrackerEvent
"""
type_name = d["evtTypeName"]
match type_name:
case PlayerStats.__name__:
return PlayerStats.from_dict(d=d)
case PlayerSetup.__name__:
return PlayerSetup.from_dict(d=d)
case UnitBorn.__name__:
return UnitBorn.from_dict(d=d)
case UnitDied.__name__:
return UnitDied.from_dict(d=d)
case UnitDone.__name__:
return UnitDone.from_dict(d=d)
case UnitInit.__name__:
return UnitInit.from_dict(d=d)
case UnitOwnerChange.__name__:
return UnitOwnerChange.from_dict(d=d)
case UnitPositions.__name__:
return UnitPositions.from_dict(d=d)
case UnitTypeChange.__name__:
return UnitTypeChange.from_dict(d=d)
case Upgrade.__name__:
return Upgrade.from_dict(d=d)
|
/sc2_datasets-1.0.2-py3-none-any.whl/sc2_datasets/replay_parser/tracker_events/tracker_events_parser.py
| 0.819893 | 0.327158 |
tracker_events_parser.py
|
pypi
|
from typing import Dict
from sc2_datasets.replay_parser.tracker_events.tracker_event import TrackerEvent
class UnitBorn(TrackerEvent):
"""
UnitBorn is containing some "details" information about unit
at the moment of it has appeared in the game
:param controlPlayerId: Specifies the information about player id who made\
the unit in the game
:type controlPlayerId: int
:param id: Specifies the ID of an event which corresponds to its name.
:type id: int
:param loop: Specifies the game loop number (game-engine tick)\
at which the event occurred
:type loop: int
:param unitTagIndex: Specifies a pointer for a specific unit which\
was creating in the game
:type unitTagIndex: int
:param unitTagRecycle: There is no specific information about this parameter
:type unitTagRecycle: int
:param unitTypeName: Specifies the in game unit name that was created in the game
:type unitTypeName: str
:param upkeepPlayerId: Specifies an id number of player who was having\
the control of the unit in the game
:type upkeepPlayerId: int
:param x: Specifies x coordinate of map in pixels where the object was created.
:type x: int
:param y: Specifies y coordinate of map in pixels where the object was created.
:type y: int
"""
def from_dict(d: Dict) -> "UnitBorn":
"""
Static method returning initialized UnitBorn class from a dictionary.
This helps with the original JSON parsing.
:param d: Specifies a dictionary as available in the JSON file\
that is a result of pre-processing some .SC2Replay file.
:type d: Dict
:return: Returns an initialized UnitBorn class.
:rtype: UnitBorn
"""
return UnitBorn(
controlPlayerId=d["controlPlayerId"],
id=d["id"],
loop=d["loop"],
unitTagIndex=d["unitTagIndex"],
unitTagRecycle=d["unitTagRecycle"],
unitTypeName=d["unitTypeName"],
upkeepPlayerId=d["upkeepPlayerId"],
x=d["x"],
y=d["y"],
)
def __init__(
self,
controlPlayerId: int,
id: int,
loop: int,
unitTagIndex: int,
unitTagRecycle: int,
unitTypeName: str,
upkeepPlayerId: int,
x: int,
y: int,
) -> None:
self.controlPlayerId = controlPlayerId
self.id = id
self.loop = loop
self.unitTagIndex = unitTagIndex
self.unitTagRecycle = unitTagRecycle
self.unitTypeName = unitTypeName
self.upkeepPlayerId = upkeepPlayerId
self.x = x
self.y = y
|
/sc2_datasets-1.0.2-py3-none-any.whl/sc2_datasets/replay_parser/tracker_events/events/unit_born.py
| 0.924764 | 0.506836 |
unit_born.py
|
pypi
|
from typing import Dict
from sc2_datasets.replay_parser.tracker_events.tracker_event import TrackerEvent
class UnitDied(TrackerEvent):
"""
UnitDied is containing some "details" information about unit
at the moment of it has died in the game
:param id: Specifies the ID of an event which corresponds to its name.
:type id: int
:param killerPlayerId: Specifies an id number of played who has controlled\
and destroyed the unit in the game
:type killerPlayerId: int
:param killerUnitTagIndex: Specifies a pointer for a specific unit\
which destroyed the unit in the game
:type killerUnitTagIndex: int
:param killerUnitTagRecycle: There is no specific information about this parameter
:type killerUnitTagRecycle: int
:param loop: Specifies the game loop number (game-engine tick) at which the event occurred
:type loop: int
:param unitTagIndex: Specifies a pointer for a specific unit which\
was destroyed in the game
:type unitTagIndex: int
:param unitTagRecycle: There is no specific information about this parameter
:type unitTagRecycle: int
:param x: Specifies x coordinate of map in pixels where the object was destroyed.
:type x: int
:param y: Specifies y coordinate of map in pixels where the object was destroyed.
:type y: int
"""
def from_dict(d: Dict) -> "UnitDied":
"""
Static method returning initialized UnitDied class from a dictionary.
This helps with the original JSON parsing.
:param d: Specifies a dictionary as available in the JSON file that
is a result of pre-processing some .SC2Replay file.
:type d: Dict
:return: Returns an initialized UnitDied class.
:rtype: UnitDied
"""
return UnitDied(
id=d["id"],
killerPlayerId=d["killerPlayerId"],
killerUnitTagIndex=d["killerUnitTagIndex"],
killerUnitTagRecycle=d["killerUnitTagRecycle"],
loop=d["loop"],
unitTagIndex=d["unitTagIndex"],
unitTagRecycle=["unitTagRecycle"],
x=d["x"],
y=d["y"],
)
def __init__(
self,
id: int,
killerPlayerId: int,
killerUnitTagIndex: int,
killerUnitTagRecycle: int,
loop: int,
unitTagIndex: int,
unitTagRecycle: int,
x: int,
y: int,
) -> None:
self.id = id
self.killerPlayerId = killerPlayerId
self.killerUnitTagIndex = killerUnitTagIndex
self.killerUnitTagRecycle = killerUnitTagRecycle
self.loop = loop
self.unitTagIndex = unitTagIndex
self.unitTagRecycle = unitTagRecycle
self.x = x
self.y = y
|
/sc2_datasets-1.0.2-py3-none-any.whl/sc2_datasets/replay_parser/tracker_events/events/unit_died.py
| 0.9399 | 0.55263 |
unit_died.py
|
pypi
|
from typing import Dict
from sc2_datasets.replay_parser.tracker_events.tracker_event import TrackerEvent
class UnitOwnerChange(TrackerEvent):
"""
UnitOwnerChange holds some detail information about how the unit position
was changing during the game.
:param controlPlayerId: Specifies the information about player id who made the unit in the game
:type controlPlayerId: int
:param id: Specifies the ID of an event which corresponds to its name.
:type id: int
:param loop: Specifies the game loop number (game-engine tick) when at which the event occurred
:type loop: int
:param unitTagIndex: Specifies a pointer for a specific unit which was doing some changes
:type unitTagIndex: int
:param unitTagRecycle: There is no specific information about this parameter
:type unitTagRecycle: int
:param upkeepPlayerId: Specifies an id number of player who was having\
the control of the unit in the game
:type upkeepPlayerId: int
"""
def from_dict(d: Dict[str, int]) -> "UnitOwnerChange":
"""
Static method returning initialized UnitOwnerChange class from a dictionary.
This helps with the original JSON parsing.
:param d: Specifies a dictionary as available in the JSON file that\
is a result of pre-processing some .SC2Replay file.
:type d: Dict
:return: Returns an initialized UnitOwnerChange class.
:rtype: UnitOwnerChange
"""
return UnitOwnerChange(
controlPlayerId=d["controlPlayerId"],
id=d["id"],
loop=d["loop"],
unitTagIndex=d["unitTagIndex"],
unitTagRecycle=d["unitTagRecycle"],
upkeepPlayerId=d["upkeepPlayerId"],
)
def __init__(
self,
controlPlayerId: int,
id: int,
loop: int,
unitTagIndex: int,
unitTagRecycle: int,
upkeepPlayerId: int,
) -> None:
self.controlPlayerId = controlPlayerId
self.id = id
self.loop = loop
self.unitTagIndex = unitTagIndex
self.unitTagRecycle = unitTagRecycle
self.upkeepPlayerId = upkeepPlayerId
|
/sc2_datasets-1.0.2-py3-none-any.whl/sc2_datasets/replay_parser/tracker_events/events/unit_owner_change.py
| 0.906151 | 0.558327 |
unit_owner_change.py
|
pypi
|
from typing import Dict
from sc2_datasets.replay_parser.tracker_events.tracker_event import TrackerEvent
class UnitInit(TrackerEvent):
"""
UnitInit holds information about initializing object in the game
:param controlPlayerId: Specifies the information about player id who made\
the unit in the game
:type controlPlayerId: int
:param id: Specifies the ID of an event which corresponds to its name.
:type id: int
:param loop: Specifies the game loop number (game-engine tick) when\
at which the event occurred
:type loop: int
:param unitTagIndex: Specifies a pointer for a specific unit\
which was initialized in the game
:type unitTagIndex: int
:param unitTagRecycle: There is no specific information about this parameter
:type unitTagRecycle: int
:param unitTypeName: Specifies a unit name in the game,\
which was initialized in the game
:type unitTypeName: str
:param upkeepPlayerId: Specifies an id number of player\
who was having the control of the unit in the game
:type upkeepPlayerId: int
:param x: Specifies x coordinate of map in pixels where\
the object was initialized.
:type x: int
:param y: Specifies y coordinate of map in pixels where\
the object was initialized.
:type y: int
"""
def from_dict(d: Dict) -> "UnitInit":
"""
Static method returning initialized UnitInit class from a dictionary.
This helps with the original JSON parsing.
:param d: Specifies a dictionary as available in the JSON file that\
is a result of pre-processing some .SC2Replay file.
:type d: Dict
:return: Returns an initialized UnitInit class.
:rtype: UnitInit
"""
return UnitInit(
controlPlayerId=d["controlPlayerId"],
id=d["id"],
loop=d["loop"],
unitTagIndex=d["unitTagIndex"],
unitTagRecycle=d["unitTagRecycle"],
unitTypeName=d["unitTypeName"],
upkeepPlayerId=d["upkeepPlayerId"],
x=d["x"],
y=d["y"],
)
def __init__(
self,
controlPlayerId: int,
id: int,
loop: int,
unitTagIndex: int,
unitTagRecycle: int,
unitTypeName: str,
upkeepPlayerId: int,
x: int,
y: int,
) -> None:
self.controlPlayerId = controlPlayerId
self.id = id
self.loop = loop
self.unitTagIndex = unitTagIndex
self.unitTagRecycle = unitTagRecycle
self.unitTypeName = unitTypeName
self.upkeepPlayerId = upkeepPlayerId
self.x = x
self.y = y
|
/sc2_datasets-1.0.2-py3-none-any.whl/sc2_datasets/replay_parser/tracker_events/events/unit_init.py
| 0.9339 | 0.511717 |
unit_init.py
|
pypi
|
from typing import Dict
from sc2_datasets.replay_parser.tracker_events.tracker_event import TrackerEvent
class Stats(TrackerEvent):
"""
Stats holds specific fields on the economy of a player and is used in PlayerStats event.
:param foodMade: Specifies the amount of supply that a player created.\
This is a limit of units that can be made.
:type foodMade: int
:param foodUsed: Specifies how much of the supply is used for units.
:type foodUsed: int
:param mineralsCollectionRate: Specifies the collection rate of minerals.\
Most likely per minute.
:type mineralsCollectionRate: int
:param mineralsCurrent: Specifies how much minerals the player has in his "bank".
:type mineralsCurrent: int
:param mineralsFriendlyFireArmy: Specifies how much minerals were lost\
in friendly fire on army units.
:type mineralsFriendlyFireArmy: int
:param mineralsFriendlyFireEconomy: Specifies how much minerals were lost\
in friendly fire on economy.
:type mineralsFriendlyFireEconomy: int
:param mineralsFriendlyFireTechnology: Specifies how much minerals were lost\
in friendly fire on technology.
:type mineralsFriendlyFireTechnology: int
:param mineralsKilledArmy: Specifies how much minerals a player killed\
in his oponent's army.
:type mineralsKilledArmy: int
:param mineralsKilledEconomy: Specifies how much minerals player killed\
in his oponents economy.
:type mineralsKilledEconomy: int
:param mineralsKilledTechnology: Specifies how much minerals player killed\
in his oponents technology.
:type mineralsKilledTechnology: int
:param mineralsLostArmy: Specifies how much minerals player lost in his army.
:type mineralsLostArmy: int
:param mineralsLostEconomy: Specifies how much minerals player lost in his economy.
:type mineralsLostEconomy: int
:param mineralsLostTechnology: Specifies how much minerals player lost in his technology.
:type mineralsLostTechnology: int
:param mineralsUsedActiveForces: Specifies how much minerals does the player\
have in his active forces.
:type mineralsUsedActiveForces: int
:param mineralsUsedCurrentArmy: Specifies how much minerals does the player\
have in his army.
:type mineralsUsedCurrentArmy: int
:param mineralsUsedCurrentEconomy: Specifies how much minerals does the player\
have in his economical units and structures.
:type mineralsUsedCurrentEconomy: int
:param mineralsUsedCurrentTechnology: Specifies how much minerals does the player\
have in his technological units, upgrades, and structures.
:type mineralsUsedCurrentTechnology: int
:param mineralsUsedInProgressArmy: Specifies how much minerals does the player\
have in army that is currently being built.
:type mineralsUsedInProgressArmy: int
:param mineralsUsedInProgressEconomy: Specifies how much minerals does the player\
have in economy that is currently being built.
:type mineralsUsedInProgressEconomy: int
:param mineralsUsedInProgressTechnology: Specifies how much minerals does\
the player have in technology that is currently being built.
:type mineralsUsedInProgressTechnology: int
:param vespeneCollectionRate: Specifies what is the vespene collection rate.\
Most likely per minute.
:type vespeneCollectionRate: int
:param vespeneCurrent: Specifies the amount of vespene gas that the user has\
in his "bank".
:type vespeneCurrent: int
:param vespeneFriendlyFireArmy: Specifies how much vespene was lost in friendly fire\
on army units.
:type vespeneFriendlyFireArmy: int
:param vespeneFriendlyFireEconomy: Specifies how much vespene was lost\
in friendly fire on economy.
:type vespeneFriendlyFireEconomy: int
:param vespeneFriendlyFireTechnology: Specifies how much vespene was lost\
in friendly fire on technology.
:type vespeneFriendlyFireTechnology: int
:param vespeneKilledArmy: Specifies how much vespene player killed in his oponents army.
:type vespeneKilledArmy: int
:param vespeneKilledEconomy: Specifies how much vespene player killed\
in his oponents economy.
:type vespeneKilledEconomy: int
:param vespeneKilledTechnology: Specifies how much vespene player killed\
in his oponents technology.
:type vespeneKilledTechnology: int
:param vespeneLostArmy: Specifies how much vespene player lost in his army.
:type vespeneLostArmy: int
:param vespeneLostEconomy: Specifies how much vespene player lost in his economy.
:type vespeneLostEconomy: int
:param vespeneLostTechnology: Specifies how much vespene player lost in his technology.
:type vespeneLostTechnology: int
:param vespeneUsedActiveForces: Specifies how much vespene does the player\
have in his active forces.
:type vespeneUsedActiveForces: int
:param vespeneUsedCurrentArmy: Specifies how much vespene does the player\
have in his army.
:type vespeneUsedCurrentArmy: int
:param vespeneUsedCurrentEconomy: Specifies how much vespene does the player\
have in his economical units and structures.
:type vespeneUsedCurrentEconomy: int
:param vespeneUsedCurrentTechnology: Specifies how much minerals does the player\
have in his technological units, upgrades, and structures.
:type vespeneUsedCurrentTechnology: int
:param vespeneUsedInProgressArmy: Specifies how much vespene does the player\
have in army that is currently being built.
:type vespeneUsedInProgressArmy: int
:param vespeneUsedInProgressEconomy: Specifies how much minerals does the player\
have in economy that is currently being built.
:type vespeneUsedInProgressEconomy: int
:param vespeneUsedInProgressTechnology: Specifies how much minerals does the player\
have in technology that is currently being built.
:type vespeneUsedInProgressTechnology: int
:param workersActiveCount: Specifies the number of workers that the player has.
:type workersActiveCount: int
"""
@staticmethod
def from_dict(d: Dict) -> "Stats":
"""
Static method returning initialized Stats class from a dictionary.
This helps with the original JSON parsing.
:param d: Specifies a dictionary as available in the JSON file\
that is a result of pre-processing some .SC2Replay file.
:type d: Dict
:return: Returns an initialized Stats class.
:rtype: Stats
"""
return Stats(
foodMade=d["scoreValueFoodMade"],
foodUsed=d["scoreValueFoodUsed"],
mineralsCollectionRate=d["scoreValueMineralsCollectionRate"],
mineralsCurrent=d["scoreValueMineralsCurrent"],
mineralsFriendlyFireArmy=d["scoreValueMineralsFriendlyFireArmy"],
mineralsFriendlyFireEconomy=d["scoreValueMineralsFriendlyFireEconomy"],
mineralsFriendlyFireTechnology=d[
"scoreValueMineralsFriendlyFireTechnology"
],
mineralsKilledArmy=d["scoreValueMineralsKilledArmy"],
mineralsKilledEconomy=d["scoreValueMineralsKilledEconomy"],
mineralsKilledTechnology=d["scoreValueMineralsKilledTechnology"],
mineralsLostArmy=d["scoreValueMineralsLostArmy"],
mineralsLostEconomy=d["scoreValueMineralsLostEconomy"],
mineralsLostTechnology=d["scoreValueMineralsLostTechnology"],
mineralsUsedActiveForces=d["scoreValueMineralsUsedActiveForces"],
mineralsUsedCurrentArmy=d["scoreValueMineralsUsedCurrentArmy"],
mineralsUsedCurrentEconomy=d["scoreValueMineralsUsedCurrentEconomy"],
mineralsUsedCurrentTechnology=d["scoreValueMineralsUsedCurrentTechnology"],
mineralsUsedInProgressArmy=d["scoreValueMineralsUsedInProgressArmy"],
mineralsUsedInProgressEconomy=d["scoreValueMineralsUsedInProgressEconomy"],
mineralsUsedInProgressTechnology=d[
"scoreValueMineralsUsedInProgressTechnology"
],
vespeneCollectionRate=d["scoreValueVespeneCollectionRate"],
vespeneCurrent=d["scoreValueVespeneCurrent"],
vespeneFriendlyFireArmy=d["scoreValueVespeneFriendlyFireArmy"],
vespeneFriendlyFireEconomy=d["scoreValueVespeneFriendlyFireEconomy"],
vespeneFriendlyFireTechnology=d["scoreValueVespeneFriendlyFireTechnology"],
vespeneKilledArmy=d["scoreValueVespeneKilledArmy"],
vespeneKilledEconomy=d["scoreValueVespeneKilledEconomy"],
vespeneKilledTechnology=d["scoreValueVespeneKilledTechnology"],
vespeneLostArmy=d["scoreValueVespeneLostArmy"],
vespeneLostEconomy=d["scoreValueVespeneLostEconomy"],
vespeneLostTechnology=d["scoreValueVespeneLostTechnology"],
vespeneUsedActiveForces=d["scoreValueVespeneUsedActiveForces"],
vespeneUsedCurrentArmy=d["scoreValueVespeneUsedCurrentArmy"],
vespeneUsedCurrentEconomy=d["scoreValueVespeneUsedCurrentEconomy"],
vespeneUsedCurrentTechnology=d["scoreValueVespeneUsedCurrentTechnology"],
vespeneUsedInProgressArmy=d["scoreValueVespeneUsedInProgressArmy"],
vespeneUsedInProgressEconomy=d["scoreValueVespeneUsedInProgressEconomy"],
vespeneUsedInProgressTechnology=d[
"scoreValueVespeneUsedInProgressTechnology"
],
workersActiveCount=d["scoreValueWorkersActiveCount"],
)
def __init__(
self,
foodMade: int,
foodUsed: int,
mineralsCollectionRate: int,
mineralsCurrent: int,
mineralsFriendlyFireArmy: int,
mineralsFriendlyFireEconomy: int,
mineralsFriendlyFireTechnology: int,
mineralsKilledArmy: int,
mineralsKilledEconomy: int,
mineralsKilledTechnology: int,
mineralsLostArmy: int,
mineralsLostEconomy: int,
mineralsLostTechnology: int,
mineralsUsedActiveForces: int,
mineralsUsedCurrentArmy: int,
mineralsUsedCurrentEconomy: int,
mineralsUsedCurrentTechnology: int,
mineralsUsedInProgressArmy: int,
mineralsUsedInProgressEconomy: int,
mineralsUsedInProgressTechnology: int,
vespeneCollectionRate: int,
vespeneCurrent: int,
vespeneFriendlyFireArmy: int,
vespeneFriendlyFireEconomy: int,
vespeneFriendlyFireTechnology: int,
vespeneKilledArmy: int,
vespeneKilledEconomy: int,
vespeneKilledTechnology: int,
vespeneLostArmy: int,
vespeneLostEconomy: int,
vespeneLostTechnology: int,
vespeneUsedActiveForces: int,
vespeneUsedCurrentArmy: int,
vespeneUsedCurrentEconomy: int,
vespeneUsedCurrentTechnology: int,
vespeneUsedInProgressArmy: int,
vespeneUsedInProgressEconomy: int,
vespeneUsedInProgressTechnology: int,
workersActiveCount: int,
) -> None:
# This calculation is required for raw data ingestion:
self.foodMade = int(foodMade / 4096)
self.foodUsed = int(foodUsed / 4096)
self.mineralsCollectionRate = mineralsCollectionRate
self.mineralsCurrent = mineralsCurrent
self.mineralsFriendlyFireArmy = mineralsFriendlyFireArmy
self.mineralsFriendlyFireEconomy = mineralsFriendlyFireEconomy
self.mineralsFriendlyFireTechnology = mineralsFriendlyFireTechnology
self.mineralsKilledArmy = mineralsKilledArmy
self.mineralsKilledEconomy = mineralsKilledEconomy
self.mineralsKilledTechnology = mineralsKilledTechnology
self.mineralsLostArmy = mineralsLostArmy
self.mineralsLostEconomy = mineralsLostEconomy
self.mineralsLostTechnology = mineralsLostTechnology
self.mineralsUsedActiveForces = mineralsUsedActiveForces
self.mineralsUsedCurrentArmy = mineralsUsedCurrentArmy
self.mineralsUsedCurrentEconomy = mineralsUsedCurrentEconomy
self.mineralsUsedCurrentTechnology = mineralsUsedCurrentTechnology
self.mineralsUsedInProgressArmy = mineralsUsedInProgressArmy
self.mineralsUsedInProgressEconomy = mineralsUsedInProgressEconomy
self.mineralsUsedInProgressTechnology = mineralsUsedInProgressTechnology
self.vespeneCollectionRate = vespeneCollectionRate
self.vespeneCurrent = vespeneCurrent
self.vespeneFriendlyFireArmy = vespeneFriendlyFireArmy
self.vespeneFriendlyFireEconomy = vespeneFriendlyFireEconomy
self.vespeneFriendlyFireTechnology = vespeneFriendlyFireTechnology
self.vespeneKilledArmy = vespeneKilledArmy
self.vespeneKilledEconomy = vespeneKilledEconomy
self.vespeneKilledTechnology = vespeneKilledTechnology
self.vespeneLostArmy = vespeneLostArmy
self.vespeneLostEconomy = vespeneLostEconomy
self.vespeneLostTechnology = vespeneLostTechnology
self.vespeneUsedActiveForces = vespeneUsedActiveForces
self.vespeneUsedCurrentArmy = vespeneUsedCurrentArmy
self.vespeneUsedCurrentEconomy = vespeneUsedCurrentEconomy
self.vespeneUsedCurrentTechnology = vespeneUsedCurrentTechnology
self.vespeneUsedInProgressArmy = vespeneUsedInProgressArmy
self.vespeneUsedInProgressEconomy = vespeneUsedInProgressEconomy
self.vespeneUsedInProgressTechnology = vespeneUsedInProgressTechnology
self.workersActiveCount = workersActiveCount
|
/sc2_datasets-1.0.2-py3-none-any.whl/sc2_datasets/replay_parser/tracker_events/events/player_stats/stats.py
| 0.781997 | 0.592991 |
stats.py
|
pypi
|
from typing import Any, Dict
class Header:
"""
Header represents the replay header parameters representation.
:param elapsedGameLoops: Specifies how much game loops (game-engine ticks) the game lasted.
:type elapsedGameLoops: int
:param version: Specifies the game version that players have used to play the game.
:type version: str
"""
@staticmethod
def from_dict(d: Dict[str, Any]) -> "Header":
"""
Static method returning initialized Header class from a dictionary.
This helps with the original JSON parsing.
:param d: Specifies a dictionary as available in the JSON file\
that is a result of pre-processing some .SC2Replay file.
:type d: Dict[str, Any]
:return: Returns an initialized Header class.
:rtype: Header
**Correct Usage Examples:**
Using from_dict factory method provides ease of use when parsing
a replay pre-processed with SC2InfoExtractorGo_
This method requires a dictionary representation of data
to be passed as a parameter because of the built in json parser provided
by the Python standard library.
_SC2InfoExtractorGo: https://github.com/Kaszanas/SC2InfoExtractorGo
The use of this method is intended to get header information from
the game's json representation.
>>> elapsedGameLoops = 10000
>>> version = "3.12.0.51702"
>>> Header(
... elapsedGameLoops=d["elapsedGameLoops"],
... version=d["version"])
>>> assert isinstance(elapsedGameLoops, int)
>>> assert elapsedGameLoops > 0
>>> assert isinstance(version, str)
**Incorrect Usage Examples:**
>>> elapsedGameLoops_wrong = "text"
>>> version_wrong = int(2)
>>> Header(
... elapsedGameLoops=elapsedGameLoops_wrong,
... version=version_wrong)
Traceback (most recent call last):
...
TypeError: unsupported operand type(s) ...
"""
return Header(
elapsedGameLoops=d["elapsedGameLoops"],
version=d["version"],
)
def __init__(
self,
elapsedGameLoops: int,
version: str,
) -> None:
self.elapsedGameLoops = elapsedGameLoops
self.version = version
|
/sc2_datasets-1.0.2-py3-none-any.whl/sc2_datasets/replay_parser/header/header.py
| 0.953805 | 0.702109 |
header.py
|
pypi
|
from typing import Dict
from sc2_datasets.replay_parser.game_events.events.camera_save import CameraSave
from sc2_datasets.replay_parser.game_events.events.camera_update import CameraUpdate
from sc2_datasets.replay_parser.game_events.events.cmd import Cmd
from sc2_datasets.replay_parser.game_events.events.cmd_update_target_point import (
CmdUpdateTargetPoint,
)
from sc2_datasets.replay_parser.game_events.events.cmd_update_target_unit import (
CmdUpdateTargetUnit,
)
from sc2_datasets.replay_parser.game_events.events.command_manager_state import (
CommandManagerState,
)
from sc2_datasets.replay_parser.game_events.events.control_group_update import (
ControlGroupUpdate,
)
from sc2_datasets.replay_parser.game_events.events.game_user_leave import GameUserLeave
from sc2_datasets.replay_parser.game_events.events.selection_delta import SelectionDelta
from sc2_datasets.replay_parser.game_events.events.user_options import UserOptions
from sc2_datasets.replay_parser.game_events.game_event import GameEvent
class GameEventsParser:
@staticmethod
def from_dict(d: Dict) -> GameEvent:
"""
Static method returning initialized GameEvent class from a dictionary.
This helps with the original JSON parsing.
:param d: Specifies a dictionary as available in the JSON file\
that is a result of pre-processing some .SC2Replay file.
:type d: Dict
:return: Returns an initialized GameEvent class.
:rtype: GameEvent
"""
type_name = d["evtTypeName"]
match type_name:
case CameraSave.__name__:
return CameraSave.from_dict(d=d)
case CameraUpdate.__name__:
return CameraUpdate.from_dict(d=d)
case CmdUpdateTargetPoint.__name__:
return CmdUpdateTargetPoint.from_dict(d=d)
case CmdUpdateTargetUnit.__name__:
return CmdUpdateTargetUnit.from_dict(d=d)
case Cmd.__name__:
return Cmd.from_dict(d=d)
case CommandManagerState.__name__:
return CommandManagerState.from_dict(d=d)
case ControlGroupUpdate.__name__:
return ControlGroupUpdate.from_dict(d=d)
case GameUserLeave.__name__:
return GameUserLeave.from_dict(d=d)
case SelectionDelta.__name__:
return SelectionDelta.from_dict(d=d)
case UserOptions.__name__:
return UserOptions.from_dict(d=d)
|
/sc2_datasets-1.0.2-py3-none-any.whl/sc2_datasets/replay_parser/game_events/game_events_parser.py
| 0.817756 | 0.249299 |
game_events_parser.py
|
pypi
|
from typing import Dict
from sc2_datasets.replay_parser.game_events.game_event import GameEvent
class ControlGroupUpdate(GameEvent):
"""
ControlGroupUpdate is containing some "details" information about
updates and changes player's control groups in the game.
:param controlGroupIndex: Highly likely this parameter specifies\
an id parameter which control group has selected by the player in the game
:type controlGroupIndex: int
:param controlGroupUpdate: Highly likely this parameter specifies\
an id parameter which control group has changed by the player in the game
:type controlGroupUpdate: int
:param id: Specifies the ID of an event which corresponds to its name.
:type id: int
:param loop: Specifies the game loop number (game-engine tick)\
at which the event occurred
:type loop: int
:param userid: Specifies id number of player who has updated the group control the game
:type userid: int
"""
@staticmethod
def from_dict(d: Dict) -> "ControlGroupUpdate":
"""
Static method returning initialized ControlGroupUpdate class from a dictionary.
This helps with the original JSON parsing.
:param d: Specifies a dictionary as available in the JSON file\
that is a result of pre-processing some .SC2Replay file.
:type d: Dict
:return: Returns an initialized ControlGroupUpdate class.
:rtype: ControlGroupUpdate
"""
return ControlGroupUpdate(
controlGroupIndex=d["controlGroupIndex"],
controlGroupUpdate=d["controlGroupUpdate"],
id=d["id"],
loop=d["loop"],
userid=d["userid"]["userId"],
)
def __init__(
self,
controlGroupIndex: int,
controlGroupUpdate: int,
id: int,
loop: int,
userid: int,
) -> None:
self.controlGroupIndex = controlGroupIndex
self.controlGroupUpdate = controlGroupUpdate
self.id = id
self.loop = loop
self.userid = userid
|
/sc2_datasets-1.0.2-py3-none-any.whl/sc2_datasets/replay_parser/game_events/events/control_group_update.py
| 0.899262 | 0.475544 |
control_group_update.py
|
pypi
|
from types import NoneType
from typing import Dict
from sc2_datasets.replay_parser.game_events.game_event import GameEvent
from sc2_datasets.replay_parser.game_events.events.nested.target_2d import Target2D
class CameraUpdate(GameEvent):
"""
CameraUpdate represents the replay information
about updated camera location in the game.
:param distance: There is no valuable information about this parameter
:type distance: NoneType | float | int
:param follow: There is no valuable information about this parameter
:type follow: bool
:param id: There is no valuable information about this parameter
:type id: int
:param loop: Specifies the game loop number (game-engine tick)
at which the event occurred
:type loop: int
:param pitch: Specifies angle in the vertical plane,
vertical elevation of the camera.
:type pitch: NoneType | float | int
:param reason: There is no valuable information about this parameter
:type reason: NoneType | str
:param target: Specifies the Target class object which includes x and y coordinates,
where the camera location was set
:type target: Target
:param userid: Specifies the id of the player who saved the camera location
:type userid: int
:param yaw: Specifies the angle in the horizontal plane of the camera
:type yaw: NoneType | float | int
"""
# REVIEW: Doctests here:
@staticmethod
def from_dict(d: Dict):
"""
Static method returning initialized CameraUpdate class from a dictionary.
This helps with the original JSON parsing.
:param d: Specifies a dictionary as available in the JSON file\
that is a result of pre-processing some .SC2Replay file.
:type d: Dict
:return: Returns an initialized CameraUpdate class.
:rtype: CameraUpdate
**Correct Usage Examples:**
Using from_dict factory method provides ease of use when parsing
a replay pre-processed with SC2InfoExtractorGo_.
This method requires a dictionary representation
of data to be passed as a parameter because
of the built-in json parser provided by the Python standard library.
.. _SC2InfoExtractorGo: https://github.com/Kaszanas/SC2InfoExtractorGo
>>> from sc2egset_dataset.dataset.replay_data.replay_parser.game_events.events.nested.target_2d import Target2D #noqa
>>> camera_update_dict = {"distance": None,
... "follow": False,
... "id": 49,
... "loop": 136,
... "pitch": None,
... "reason": None,
... "target": {
... "x": 1.002,
... "y": 4.148},
... "userid": {"userId": 1},
... "yaw": None}
>>> camera_update_object = CameraUpdate.from_dict(d=camera_update_dict)
>>> assert isinstance(camera_update_object, CameraUpdate)
>>> assert camera_update_object.distance == None
>>> assert camera_update_object.follow == False
>>> assert camera_update_object.id == 49
>>> assert camera_update_object.loop == 136
>>> assert camera_update_object.pitch == None
>>> assert camera_update_object.reason == None
>>> assert camera_update_object.target.x == 1.002
>>> assert camera_update_object.target.y == 4.148
>>> assert camera_update_object.userid == 1
>>> assert camera_update_object.yaw == None
"""
return CameraUpdate(
distance=d["distance"],
follow=d["follow"],
id=d["id"],
loop=d["loop"],
pitch=d["pitch"],
reason=d["reason"],
target=Target2D(x=d["target"]["x"], y=d["target"]["y"]),
userid=d["userid"]["userId"],
yaw=d["yaw"],
)
def __init__(
self,
distance: NoneType | float | int,
follow: bool,
id: int,
loop: int,
pitch: NoneType | float | int,
reason: NoneType | str,
target: Target2D,
userid: int,
yaw: NoneType | float | int,
) -> None:
self.distance = distance
self.follow = follow
self.id = id
self.loop = loop
self.pitch = pitch
self.reason = reason
self.target = target
self.userid = userid
self.yaw = yaw
|
/sc2_datasets-1.0.2-py3-none-any.whl/sc2_datasets/replay_parser/game_events/events/camera_update.py
| 0.92948 | 0.592224 |
camera_update.py
|
pypi
|
from typing import Dict
from sc2_datasets.replay_parser.game_events.game_event import GameEvent
class UserOptions(GameEvent):
"""
UserOptions is containing some "details" information about player's settings,
profiles, configuration in the game.
:param baseBuildNum: Specifies a unique version number of the game build,\
highly likely game engine number
:type baseBuildNum: int
:param buildNum: Specifies a unique version number of the build,\
highly likely game build number
:type buildNum: int
:param cameraFollow: Specifies if the camera object is following an object
:type cameraFollow: bool
:param debugPauseEnabled: There is no valuable information about this parameter
:type debugPauseEnabled: bool
:param developmentCheatsEnabled: Specifies if cheat option for developers have been enabled
:type developmentCheatsEnabled: bool
:param gameFullyDownloaded: Specifies if the game was fully downloaded,\
with campaign, better graphic settings, etc.
:type gameFullyDownloaded: bool
:param hotkeyProfile: Specifies the name of the player's hotkey group,\
the player was playing on in the game
:type hotkeyProfile: str
:param id: Specifies the ID of an event which corresponds to its name.
:type id: int
:param isMapToMapTransition: There is no valuable information about this parameter
:type isMapToMapTransition: bool
:param loop: Specifies the game loop number (game-engine tick) when\
at which the event occurred
:type loop: int
:param multiplayerCheatsEnabled: Specifies if the game was having cheat\
options enabled in the game
:type multiplayerCheatsEnabled: bool
:param platformMac: Specifies if the game has been played on the Mac operating system
:type platformMac: bool
:param syncChecksummingEnabled: There is no valuable information about this parameter
:type syncChecksummingEnabled: bool
:param testCheatsEnabled: Specifies if the game was having tests on, to detect cheats
:type testCheatsEnabled: bool
:param useGalaxyAsserts: There is no valuable information about this parameter
:type useGalaxyAsserts: bool
:param userid: Specifies the id of the player who has been\
the owner of the options in the game
:type userid: int
:param versionFlags: There is no valuable information about this parameter,\
might it be a player's setting version, default = 0
:type versionFlags: int
"""
@staticmethod
def from_dict(d: Dict) -> "UserOptions":
"""
Static method returning initialized UserOptions class from a dictionary.
This helps with the original JSON parsing.
:param d: Specifies a dictionary as available in the JSON file\
that is a result of pre-processing some .SC2Replay file.
:type d: Dict
:return: Returns an initialized UserOptions class.
:rtype: UserOptions
"""
return UserOptions(
baseBuildNum=d["baseBuildNum"],
buildNum=d["buildNum"],
cameraFollow=d["cameraFollow"],
debugPauseEnabled=d["debugPauseEnabled"],
developmentCheatsEnabled=d["developmentCheatsEnabled"],
gameFullyDownloaded=d["gameFullyDownloaded"],
hotkeyProfile=d["hotkeyProfile"],
id=d["id"],
isMapToMapTransition=d["isMapToMapTransition"],
loop=d["loop"],
multiplayerCheatsEnabled=d["multiplayerCheatsEnabled"],
platformMac=d["platformMac"],
syncChecksummingEnabled=d["syncChecksummingEnabled"],
testCheatsEnabled=d["testCheatsEnabled"],
useGalaxyAsserts=d["useGalaxyAsserts"],
userid=d["userid"]["userId"],
versionFlags=d["versionFlags"],
)
def __init__(
self,
baseBuildNum: int,
buildNum: int,
cameraFollow: bool,
debugPauseEnabled: bool,
developmentCheatsEnabled: bool,
gameFullyDownloaded: bool,
hotkeyProfile: str,
id: int,
isMapToMapTransition: bool,
loop: int,
multiplayerCheatsEnabled: bool,
platformMac: bool,
syncChecksummingEnabled: bool,
testCheatsEnabled: bool,
useGalaxyAsserts: bool,
userid: int,
versionFlags: int,
) -> None:
self.baseBuildNum = baseBuildNum
self.buildNum = buildNum
self.cameraFollow = cameraFollow
self.debugPauseEnabled = debugPauseEnabled
self.developmentCheatsEnabled = developmentCheatsEnabled
self.gameFullyDownloaded = gameFullyDownloaded
self.hotkeyProfile = hotkeyProfile
self.id = id
self.isMapToMapTransition = isMapToMapTransition
self.loop = loop
self.multiplayerCheatsEnabled = multiplayerCheatsEnabled
self.platformMac = platformMac
self.syncChecksummingEnabled = syncChecksummingEnabled
self.testCheatsEnabled = testCheatsEnabled
self.useGalaxyAsserts = useGalaxyAsserts
self.userid = userid
self.versionFlags = versionFlags
|
/sc2_datasets-1.0.2-py3-none-any.whl/sc2_datasets/replay_parser/game_events/events/user_options.py
| 0.881283 | 0.530297 |
user_options.py
|
pypi
|
from typing import Dict
from sc2_datasets.replay_parser.game_events.game_event import GameEvent
from sc2_datasets.replay_parser.game_events.events.nested.target_2d import Target2D
class CameraSave(GameEvent):
"""
CameraSave represents the replay information about saved camera in the game.
:param id: Highly likely this field specifies an id of CameraSave object,\
many elements have the same id in
:type id: int
:param loop: Specifies the game loop number (game-engine tick)\
at which the event occurred
:type loop: int
:param target: Specifies the Target class object which includes x and y coordinates,\
where the camera location was set in the game
:type target: Target
:param userid: Specifies the id of the player who saved the camera location
:type userid: int
:param which: Specifies a hotkey [0-9] to which camera location was set
:type which: int
"""
# REVIEW: Doctests here:
@staticmethod
def from_dict(d: Dict) -> "CameraSave":
"""
Static method returning initialized CameraSave class from a dictionary.
This helps with the original JSON parsing.
:param d: Specifies a dictionary as available in the JSON file\
that is a result of pre-processing some .SC2Replay file.
:type d: Dict
:return: Returns an initialized CameraSave class.
:rtype: CameraSave
**Correct Usage Examples:**
Using from_dict factory method provides ease of use when parsing
a replay pre-processed with SC2InfoExtractorGo_.
This method requires a dictionary representation
of data to be passed as a parameter because of the built in json
parser provided by the Python standard library.
.. _SC2InfoExtractorGo: https://github.com/Kaszanas/SC2InfoExtractorGo
>>> from sc2egset_dataset.dataset.replay_data.replay_parser.game_events.events.nested.target_2d import Target2D #noqa
>>> camera_save_dict = {"id": 5,
... "loop": 22,
... "target": {
... "x": 3.578125,
... "y": 0.742431640625},
... "userid": {
... "userId": 0},
... "which": 0}
>>> camera_save_object = CameraSave.from_dict(d=camera_save_dict)
>>> assert isinstance(camera_save_object, CameraSave)
>>> assert camera_save_object.id == 5
>>> assert camera_save_object.loop == 22
>>> assert isinstance(camera_save_object.target, Target2D)
>>> assert camera_save_object.target.x == 3.578125
>>> assert camera_save_object.target.y == 0.742431640625
>>> assert camera_save_object.userid == 0
>>> assert camera_save_object.loop == 22
"""
return CameraSave(
id=d["id"],
loop=d["loop"],
target=Target2D(x=d["target"]["x"], y=d["target"]["y"]),
userid=d["userid"]["userId"],
which=d["which"],
)
def __init__(
self,
id: int,
loop: int,
target: Target2D,
userid: int,
which: int,
) -> None:
self.id = id
self.loop = loop
self.target = target
self.userid = userid
self.which = which
|
/sc2_datasets-1.0.2-py3-none-any.whl/sc2_datasets/replay_parser/game_events/events/camera_save.py
| 0.900207 | 0.518912 |
camera_save.py
|
pypi
|
from types import NoneType
from typing import Dict
from sc2_datasets.replay_parser.game_events.game_event import GameEvent
# TODO: Can the sequence be an int here?
# Should this be encoded somehow if there is a NoneType detected?
class Cmd(GameEvent):
"""
Cmd is containing some "details" information about command interface events
:param id: Specifies the ID of an event which corresponds to its name.
:type id: int
:param loop: Specifies the game loop number (game-engine tick)\
at which the event occurred
:type loop: int
:param otherUnit: There is no specific information about this parameter
:type otherUnit: NoneType
:param sequence: Highly likely this parameter specifies\
an id parameter which sequence user has typed in the console,\
there is no specific information about this parameter
:type sequence: int
:param unitGroup: There is no specific information about this parameter
:type unitGroup: NoneType | int
:param userid: Highly likely this parameter specifies\
a user's id who has been using interface, there is no specific information.
:type userid: int
"""
@staticmethod
def from_dict(d: Dict) -> "Cmd":
"""
Static method returning initialized Cmd class from a dictionary.
This helps with the original JSON parsing.
:param d: Specifies a dictionary as available in the JSON file\
that is a result of pre-processing some .SC2Replay file.
:type d: Dict
:return: Returns an initialized Cmd class.
:rtype: Cmd
"""
return Cmd(
id=d["id"],
loop=d["loop"],
otherUnit=d["otherUnit"],
sequence=d["sequence"],
unitGroup=d["unitGroup"],
userid=d["userid"]["userId"],
)
def __init__(
self,
id: int,
loop: int,
otherUnit: NoneType,
sequence: int,
unitGroup: NoneType | int,
userid: int,
) -> None:
self.id = id
self.loop = loop
self.otherUnit = otherUnit
self.sequence = sequence
self.unitGroup = unitGroup
self.userid = userid
|
/sc2_datasets-1.0.2-py3-none-any.whl/sc2_datasets/replay_parser/game_events/events/cmd.py
| 0.603114 | 0.478407 |
cmd.py
|
pypi
|
from types import NoneType
from typing import Dict, List
from sc2_datasets.replay_parser.game_events.game_event import GameEvent
class AddSubgroups(GameEvent):
"""
AddSubgroups is a data type holding information about some subgroup change.
We were not able to resolve its specific meaning.
:param count: Specifies some unknown count parameter.
:type count: int
:param intraSubgroupPriority: Specifies some priority within the intra subgroup.
:type intraSubgroupPriority: int
:param subgroupPriority: Specifies some subgroup priority.
:type subgroupPriority: int
:param unitLink: Most likely specifies which units were affected.
:type unitLink: int
"""
@staticmethod
def from_dict(d: Dict) -> "AddSubgroups":
return [
AddSubgroups(
count=subgroup["count"],
intraSubgroupPriority=subgroup["intraSubgroupPriority"],
subgroupPriority=subgroup["subgroupPriority"],
unitLink=subgroup["unitLink"],
)
for subgroup in d
]
def __init__(
self,
count: int,
intraSubgroupPriority: int,
subgroupPriority: int,
unitLink: int,
) -> None:
self.count = count
self.intraSubgroupPriority = intraSubgroupPriority
self.subgroupPriority = subgroupPriority
self.unitLink = unitLink
class Delta(GameEvent):
"""
Most likely specifies a change in which units belong to some subgroups.
We are unsure of the precise definition of this data type.
:param addSubgroups: Most likely specifies a class with additional information
on which subgroups were added.
:type addSubgroups: AddSubgroups
:param addUnitTags: Most likely specifies which unit tags were added to a subgroup.
:type addUnitTags: List[int]
:param removeMask: This is an unknown parameter. We were not able to interpret it.
:type removeMask: NoneType
:param subgroupIndex: Most likely specifies which subgroup was changed.
:type subgroupIndex: int
"""
@staticmethod
def from_dict(d: Dict) -> "Delta":
return Delta(
addSubgroups=AddSubgroups.from_dict(d=d["addSubgroups"]),
addUnitTags=d["addUnitTags"],
removeMask=d["removeMask"],
subgroupIndex=d["subgroupIndex"],
)
def __init__(
self,
addSubgroups: List[AddSubgroups],
addUnitTags: List[int],
removeMask: NoneType,
subgroupIndex: int,
) -> None:
self.addSubgroups = addSubgroups
self.addUnitTags = addUnitTags
self.removeMask = removeMask
self.subgroupIndex = subgroupIndex
|
/sc2_datasets-1.0.2-py3-none-any.whl/sc2_datasets/replay_parser/game_events/events/nested/delta.py
| 0.912993 | 0.394405 |
delta.py
|
pypi
|
from typing import Any, Dict
class Details:
"""
Data type containing some "details" information about an StarCraft II game.
:param gameSpeed: Game speed setting as set in the game options.\
Can be one of "slower", "slow", "normal", "fast", or "faster".\
Typically competitive or ranked games are played on "faster" setting.\
Additional information is available at: https://liquipedia.net/starcraft2/Game_Speed
:type gameSpeed: str
:param isBlizzardMap: Specifies if the map that was used\
in the replay was approved and published by Blizzard (game publisher)
:type isBlizzardMap: bool
:param timeUTC: Denotes the time at which the game was started.
:type timeUTC: str
"""
# REVIEW: Doctests for this:
@staticmethod
def from_dict(d: Dict[str, Any]) -> "Details":
"""
Static method returning initialized Details class from a dictionary.
This helps with the original JSON parsing.
:param d: Specifies a dictionary as available in the JSON file\
that is a result of pre-processing some .SC2Replay file.
:type d: Dict[str, Any]
:return: Returns an initialized Details class.
:rtype: Details
**Correct Usage Examples:**
Using from_dict factory method provides ease of use when parsing
a replay pre-processed with SC2InfoExtractorGo_.
This method requires a dictionary representation of data
to be passed as a parameter because of the built-in json parser
provided by the Python standard library.
.. _SC2InfoExtractorGo: https://github.com/Kaszanas/SC2InfoExtractorGo
>>> details_dict = {"gameSpeed": "Faster",
... "isBlizzardMap": True,
... "timeUTC": "2017-04-29T05:15:32.4903483+02:00"}
>>> details_object = Details.from_dict(d=details_dict)
>>> assert isinstance(details_object, Details)
>>> assert details_object.gameSpeed == "Faster"
>>> assert details_object.isBlizzardMap == True
>>> assert details_object.timeUTC == "2017-04-29T05:15:32.4903483+02:00"
"""
return Details(
gameSpeed=d["gameSpeed"],
isBlizzardMap=d["isBlizzardMap"],
timeUTC=d["timeUTC"],
)
def __init__(
self,
gameSpeed: str,
isBlizzardMap: bool,
timeUTC: str,
) -> None:
self.gameSpeed = gameSpeed
self.isBlizzardMap = isBlizzardMap
self.timeUTC = timeUTC
|
/sc2_datasets-1.0.2-py3-none-any.whl/sc2_datasets/replay_parser/details/details.py
| 0.927523 | 0.531696 |
details.py
|
pypi
|
from typing import Dict
from sc2_datasets.replay_parser.message_events.message_event import MessageEvent
class Chat(MessageEvent):
"""
Chat holds information about messages between players during the game
:param id: Specifies id of the chat event
:type id: int
:param loop: Specifies game loop number when the event occurred
:type loop: int
:param recipient: Specifies the message recipient of the event
:type recipient: int
:param string: Specifies the message in the chat event
:type string: str
:param userid: Specifies user id causing the event
:type userid: int
"""
@staticmethod
def from_dict(d: Dict) -> "Chat":
"""
Static method returning initialized Chat class from a dictionary.
This helps with the original JSON parsing.
:param d: Specifies a dictionary, it holds translations of a phrase or sentence.
:type d: Dict
:return: Specifies a list of chat parameters like a user id, recipient etc.
:rtype: Chat
**Correct Usage Examples:**
Using from_dict factory method provides ease of use when parsing
a replay pre-processed with SC2InfoExtractorGo_
This method requires a dictionary representation of data to be passed
as a parameter because of the built in json
parser provided by the Python standard library.
_SC2InfoExtractorGo: https://github.com/Kaszanas/SC2InfoExtractorGo
The use of this method is intended to get chat information
from the game's json representation.
>>> chat_dict ={
... "id": 0,
... "loop": 185,
... "recipient": 0,
... "string": "gl hf",
... "userid": {
... "userId": 1}
... }
...
>>> chat_object = Chat.from_dict(d=chat_dict)
...
>>> assert isinstance(chat_object, Chat)
>>> assert isinstance(chat_object.id, int)
>>> assert isinstance(chat_object.loop, int)
>>> assert isinstance(chat_object.recipient, int)
>>> assert isinstance(chat_object.string, str)
>>> assert isinstance(chat_object.userid, int)
...
>>> assert chat_object.id == 0
>>> assert chat_object.loop == 185
>>> assert chat_object.recipient == 0
>>> assert chat_object.string == "gl hf"
>>> assert chat_object.userid == 1
...
>>> assert chat_object.id >= 0
>>> assert chat_object.loop >= 0
>>> assert chat_object.recipient >= 0
>>> assert chat_object.userid >= 0
**Incorrect Usage Examples:**
>>> gameOptions_value_wrong = "False"
>>> gameSpeed_value_wrong = True
>>> isBlizzardMap_value_wrong = "wrong type"
>>> mapAuthorName_value_wrong = int(2)
>>> mapFileSyncChecksum_value_wrong = str(2)
>>> mapSizeX_value_wrong = str(2)
>>> mapSizeY_value_wrong = str(2)
>>> maxPlayers_value_wrong = str(2)
>>> GameDescription(
... gameOptions=gameOptions_value_wrong,
... gameSpeed=gameSpeed_value_wrong,
... isBlizzardMap=isBlizzardMap_value_wrong,
... mapAuthorName=mapAuthorName_value_wrong,
... mapFileSyncChecksum=mapFileSyncChecksum_value_wrong,
... mapSizeX=mapSizeX_value_wrong,
... mapSizeY=mapSizeY_value_wrong,
... maxPlayers=maxPlayers_value_wrong)
Traceback (most recent call last):
...
TypeError: unsupported operand type(s) ...
"""
return Chat(
id=d["id"],
loop=d["loop"],
recipient=d["recipient"],
string=d["string"],
userid=d["userid"]["userId"],
)
def __init__(
self,
id: int,
loop: int,
recipient: int,
string: str,
userid: int,
) -> None:
self.id = id
self.loop = loop
self.recipient = recipient
self.string = string
self.userid = userid
|
/sc2_datasets-1.0.2-py3-none-any.whl/sc2_datasets/replay_parser/message_events/events/chat.py
| 0.896179 | 0.527682 |
chat.py
|
pypi
|
from typing import Dict, List
import numpy as np
# pylama:ignore=E501
from sc2_datasets.replay_parser.tracker_events.events.player_stats.player_stats import (
PlayerStats,
)
from sc2_datasets.replay_data.sc2_replay_data import SC2ReplayData
def filter_player_stats(
sc2_replay: SC2ReplayData,
) -> Dict[str, List[PlayerStats]]:
"""
Filters PlayerStats events and places them in lists based on the playerId
:param sc2_replay: Specifies the replay that the outcome will be selected from.
:type sc2_replay: SC2ReplayData
:return: Returns a dictionary containing a mapping from playerId to the respective player stats.
:rtype: Dict[str, List[PlayerStats]]
**Correct Usage Examples:**
The use of this method is intended to filter player's events from the game based on playerId
You should set sc2_replay parameter.
May help you in analysing on the dataset.
The parameters should be set as in the example below.
>>> filter_player_stats_object = filter_player_stats(
... sc2_replay= sc2_replay: SC2ReplayData)
>>> assert isinstance(sc2_replay, SC2ReplayData)
**Incorrect Usage Examples:**
>>> wrong_type_object = int(2)
>>> filter_player_stats_object = filter_player_stats(
... sc2_replay= wrong_type_object)
Traceback (most recent call last):
...
TypeError: unsupported operand type(s) ...
If you don't set parameters or paste incorect parameters' type.
Will throw an exception if the player's id in the game is greater than 2.
"""
player_stats_events = {"1": [], "2": []}
# Filter PlayerStats:
for event in sc2_replay.trackerEvents:
if type(event).__name__ == "PlayerStats":
if event.playerId == 1:
player_stats_events["1"].append(event)
elif event.playerId == 2:
player_stats_events["2"].append(event)
else:
raise Exception("There are more than player in TrackerEvents!")
return player_stats_events
def average_player_stats(
sc2_replay: SC2ReplayData,
) -> Dict[str, List[float]]:
"""
Exposes the logic of selecting and averaging PlayerStats events from within TrackerEvents list.
:param sc2_replay: Specifies the replay that the outcome will be selected from.
:type sc2_replay: SC2ReplayData
:return: Returns a dictionary containing averaged features.
:rtype: Dict[str, List[float]]
**Correct Usage Examples:**
The use of this method is intended to average stats of the player from the game.
You should set sc2_replay parameter.
The parameters should be set as in the example below.
>>> average_player_stats_object = average_player_stats(
... sc2_replay= sc2_replay: SC2ReplayData)
>>> assert isinstance(sc2_replay, SC2ReplayData)
**Incorrect Usage Examples:**
>>> wrong_type_object = int(2)
>>> average_player_stats_object = average_player_stats(
... sc2_replay= wrong_type_object)
Traceback (most recent call last):
...
TypeError: unsupported operand type(s) ...
If you don't set parameters or paste incorect parameters' type.
"""
player_stats_dict = filter_player_stats(sc2_replay=sc2_replay)
average_player_features = {}
for key, list_of_events in player_stats_dict.items():
# Summing all of the features that are within Stats that is held in PlayerStats:
sum_of_features = list(list_of_events[0].stats.__dict__.values())
for index, player_stats in enumerate(list_of_events):
if index == 0:
continue
sum_of_features = np.add(
sum_of_features, list(player_stats.stats.__dict__.values())
)
# TODO: Verify if this is not better than the above iterative approach to summing features:
# sum_of_features = functools.reduce(
# np.add, [elem.stats.__dict__.values() for elem in list_of_events]
# )
# Getting the average of the features:
average_player_features[key] = [
item / len(list_of_events) for item in sum_of_features
]
return average_player_features
def select_apm_1v1(sc2_replay: SC2ReplayData) -> Dict[str, int]:
"""
Exposes logic for selecting APM from replay data.
:param sc2_replay: Specifies the replay that the outcome will be selected from.
:type sc2_replay: SC2ReplayData
:return: Returns player id to APM mapping.
:rtype: Dict[str, int]
**Correct Usage Examples:**
The use of this method is intended to check the value
of the correct APM from the selected replay.
The parameters should be set as in the example below.
>>> select_apm_1v1_object = select_apm_1v1(
... sc2_replay= sc2_replay: SC2ReplayData)
>>> assert isinstance(sc2_replay, SC2ReplayData)
**Incorrect Usage Examples:**
>>> wrong_type_object = int(2)
>>> select_apm_1v1_object = select_apm_1v1(
... sc2_replay= wrong_type_object)
Traceback (most recent call last):
...
TypeError: unsupported operand type(s) ...
If you don't set parameters or paste incorect parameters' type.
"""
# Initializing dictionary for holding APM:
player_apm = {"1": 0, "2": 0}
# Selecting from sc2_replay and placing it in the dictionary:
for toon_desc_map in sc2_replay.toonPlayerDescMap:
apm = toon_desc_map.toon_player_info.APM
player_apm[toon_desc_map.toon_player_info.playerID] = apm
return player_apm
def select_outcome_1v1(sc2_replay: SC2ReplayData) -> Dict[str, int]:
"""
Exposes logic for selecting game outcome of a 1v1 game. Maps loss to 0, and win to 1
:param sc2_replay: Specifies the replay that the outcome will be selected from.
:type sc2_replay: SC2ReplayData
:return: Returns a dictionary mapping loss to 0, and win to 1 for playerIDs
:rtype: Dict[str, int]
**Correct Usage Examples:**
The use of this method is intended to check logic value of the selected 1v1 game, lose or win
You should set sc2_replay parameter.
The parameters should be set as in the example below.
>>> select_outcome_1v1_object = select_outcome_1v1(
... sc2_replay= sc2_replay: SC2ReplayData)
>>> assert isinstance(sc2_replay, SC2ReplayData)
**Incorrect Usage Examples:**
>>> wrong_type_object = int(2)
>>> select_outcome_1v1_object = select_outcome_1v1(
... sc2_replay= wrong_type_object)
Traceback (most recent call last):
...
TypeError: unsupported operand type(s) ...
If you don't set parameters or paste incorect parameters' type.
"""
player_outcome = {"1": 0, "2": 0}
result_dict = {"Loss": 0, "Win": 1}
for toon_desc_map in sc2_replay.toonPlayerDescMap:
result = result_dict[toon_desc_map.toon_player_info.result]
player_outcome[toon_desc_map.toon_player_info.playerID] = result
return player_outcome
|
/sc2_datasets-1.0.2-py3-none-any.whl/sc2_datasets/transforms/utils.py
| 0.81626 | 0.627495 |
utils.py
|
pypi
|
from typing import Dict
import pandas as pd
from sc2_datasets.replay_data.sc2_replay_data import SC2ReplayData
from sc2_datasets.transforms.pandas.player_stats_to_dict import (
playerstats_average_to_dict,
)
from sc2_datasets.transforms.utils import select_apm_1v1, select_outcome_1v1
# REVIEW: Verify this:
def avg_playerstats_pd_dict_transform(
sc2_replay: SC2ReplayData,
) -> Dict[str, int | float]:
"""
Exposes logic for composing a row containing features for classification task.
:param sc2_replay: Specifies the parsed structure of a replay.
:type sc2_replay: SC2ReplayData
:return: Returns a dictionary representation of the averaged values.
:rtype: Dict[str, float]
**Correct Usage Examples:**
This method may help you to transforming reply to the dict type.
You should set sc2_replay parameter.
The parameters should be set as in the example below.
>>> avg_playerstats_pd_dict_transform_object = avg_playerstats_pd_dict_transform(
... sc2_replay= sc2_replay: SC2ReplayData)
>>> assert isinstance(sc2_replay, SC2ReplayData)
**Incorrect Usage Examples:**
>>> wrong_type_object = int(2)
>>> avg_playerstats_pd_dict_transform_object = avg_playerstats_pd_dict_transform(
... sc2_replay= wrong_type_object)
Traceback (most recent call last):
...
TypeError: unsupported operand type(s) ...
If you don't set parameters or paste incorect parameters' type.
"""
# Select average PlayerStats
player_stats_dict = playerstats_average_to_dict(sc2_replay=sc2_replay)
dataframe = pd.DataFrame(player_stats_dict)
# Select outcome and add to dataframe column:
game_outcome = select_outcome_1v1(sc2_replay=sc2_replay)
for player_id, outcome in game_outcome.items():
dataframe[f"outcome_{player_id}"] = outcome
# Select APM and add to dataframe column:
player_apm = select_apm_1v1(sc2_replay=sc2_replay)
for player_id, apm in player_apm.items():
dataframe[f"apm_{player_id}"] = apm
final_dict = dataframe.to_dict()
return final_dict
|
/sc2_datasets-1.0.2-py3-none-any.whl/sc2_datasets/transforms/pandas/avg_playerstats_pd_dict.py
| 0.903503 | 0.547041 |
avg_playerstats_pd_dict.py
|
pypi
|
from typing import Any, List, Dict
import pandas as pd
from sc2_datasets.replay_data.sc2_replay_data import SC2ReplayData
from sc2_datasets.transforms.utils import filter_player_stats
# TODO: Document this:
# TODO: Consider renaming:
# REVIEW: Verify this code!
def playerstats_average_to_dict(sc2_replay: SC2ReplayData) -> Dict[str, float]:
"""
Exposes a logic of converting a single list of TrackerEvents to a dictionary representation
of the data that can be used to initialize a pandas DataFrame.
:param sc2_replay: Specifies a dataframe that will be averaged.
:type sc2_replay: SC2ReplayData
:return: Returns a dictionary representation of the averaged values.
:rtype: Dict[str, float]
**Correct Usage Examples:**
This method may help you to operate with data on the game replay.
Can be used for converting average player statistics to the dictionary representation.
Then you can use it for DataFrame initialization in pandas.
You should set sc2_replay parameter.
The parameters should be set as in the examples below.
>>> playerstats_average_to_dict_object = playerstats_average_to_dict(
... sc2_replay= sc2_replay: SC2ReplayData)
>>> assert isinstance(sc2_replay, SC2ReplayData)
**Incorrect Usage Examples:**
>>> wrong_type_object = int(2)
>>> playerstats_average_to_dict_object = playerstats_average_to_dict(
... sc2_replay= wrong_type_object)
Traceback (most recent call last):
...
TypeError: unsupported operand type(s) ...
If you don't set parameters or paste incorect parameters' type.
"""
final_dict_average = {}
# Getting the dataframe representation from tracker events:
playerID_df_repr = playerstats_to_dict(sc2_replay=sc2_replay)
for playerID, df_repr in playerID_df_repr.items():
# Initializing dataframe from dict:
dataframe = pd.DataFrame.from_dict(df_repr)
dict_average_repr = average_playerstats_dataframe(playerstats_df=dataframe)
if playerID not in final_dict_average:
final_dict_average[playerID] = dict_average_repr
return final_dict_average
# REVIEW: This needs to be reviewed:
# TODO: Consider renaming:
def playerstats_to_dict(
sc2_replay: SC2ReplayData,
additional_data_dict: Dict[str, Dict[str, Any]] = {},
) -> Dict[str, Dict[str, List[Any]]]:
"""
Exposes a logic of converting a single list of TrackerEvents to a dictionary representation
of the data that can be used to initialize a pandas DataFrame.
Example additional_data_dict:
{"1": {
"outcome": 1
}
"2": {
"outcome": 0
}
}
Example return:
Without additional data:
{"1": {"gameloop": [1,2],
"army": [120, 250]},
"2": {"gameloop": [1,2],
"army: [50, 300]}
}
With additional data (1 denoting a victory, 0 denoting a loss):
{"1": {"gameloop": [1,2],
"army": [120, 250],
"outcome": [1, 1]},
"2": {"gameloop": [1,2],
"army: [50, 300],
"outcome": [0, 0]}
}
:param sc2_replay: Specifies a replay that will be used to obtain
the list of TrackerEvents to be converted.
:type sc2_replay: SC2ReplayData
:return: Returns a dictionary of features with additional information
repeated for all of the occurences of events.
:rtype: Dict[str, Dict[str, List[Any]]]
**Correct Usage Examples:**
This method may help you to operate with data on the game replay.
Can be used for converting list to the dictionary representation.
Then you can use it for DataFrame initialization in pandas.
You should set sc2_replay parameter.
The parameters should be set as in the examples below.
>>> playerstats_to_dict_object = playerstats_to_dict(
... sc2_replay= sc2_replay:SC2ReplayData)
>>> assert isinstance(sc2_replay, SC2ReplayData)
Prameter named 'additional_data_dict' is optional, you can leave it blank.
If you want to use this parameter, be sure that your parameter looks similar as in the example.
>>> additional_data = {
... "1": {"outcome": 1},
... "2": {"outcome": 2},
... }
>>> playerstats_to_dict_object = playerstats_to_dict(
... sc2_replay= sc2_replay: SC2ReplayData,
... additional_data_dict = additional_data: Dict)
>>> assert isinstance(sc2_replay, SC2ReplayData)
>>> assert isinstance(additional_data_dict, Dict)
**Incorrect Usage Examples:**
>>> wrong_type_object = int(2)
>>> playerstats_to_dict_object = playerstats_to_dict(
... sc2_replay= sc2_replay,
... additional_data_dict = wrong_type_object)
Traceback (most recent call last):
...
TypeError: unsupported operand type(s) ...
If you don't set parameters or paste incorect parameters' type.
"""
dataframe_representation = {}
player_stats_dict = filter_player_stats(sc2_replay=sc2_replay)
for playerID, list_of_events in player_stats_dict.items():
# Dataframe representation of playerID will be a dictionary
# of feature name mapping to the value:
if playerID not in dataframe_representation:
dataframe_representation[playerID] = {}
for event in list_of_events:
# Adding gameloop information to the dict:
if "gameloop" not in dataframe_representation[playerID]:
dataframe_representation[playerID]["gameloop"] = []
dataframe_representation[playerID]["gameloop"].append(event.loop)
# Additional data needs to be added in case that there
# can be some information that is constant throughout the game
# This can be for example MMR of a player, APM of a player, outcome or other
# Appending additional data:
if additional_data_dict:
additional_data = additional_data_dict[playerID]
for key, additional_val in additional_data.items():
if key not in dataframe_representation[playerID]:
dataframe_representation[playerID][key] = []
dataframe_representation[playerID][key].append(additional_val)
# Adding all features to the dict:
for feature_name, feature_value in event.stats.__dict__.items():
if feature_name not in dataframe_representation[playerID]:
dataframe_representation[playerID][feature_name] = []
dataframe_representation[playerID][feature_name].append(feature_value)
return dataframe_representation
# TODO: Consider renaming:
def average_playerstats_dataframe(playerstats_df: pd.DataFrame) -> Dict[str, float]:
"""
Averages a game dataframe
:param playerstats_df: Specifies a dataframe that will be averaged.
:type playerstats_df: pd.DataFrame
:return: Returns a dictionary representation of the averaged values.
:rtype: Dict[str, float]
**Correct Usage Examples:**
This method may help you to operate with data on the game replay.
Obtains averaged game dataframe information.
You should set sc2_replay parameter.
The parameters should be set as in the example below.
>>> average_playerstats_dataframe_object = average_playerstats_dataframe(
... playerstats_df= playerstats_df: pd.DataFrame)
>>> assert isinstance(playerstats_df, pd.DataFrame)
**Incorrect Usage Examples:**
>>> wrong_type_object = int(2)
>>> average_playerstats_dataframe_object = average_playerstats_dataframe(
... playerstats_df= wrong_type_object)
Traceback (most recent call last):
...
TypeError: unsupported operand type(s) ...
If you don't set parameters or paste incorect parameters' type.
"""
mean_playerstats = playerstats_df.mean().to_dict()
return mean_playerstats
|
/sc2_datasets-1.0.2-py3-none-any.whl/sc2_datasets/transforms/pandas/player_stats_to_dict.py
| 0.580471 | 0.68052 |
player_stats_to_dict.py
|
pypi
|
from typing import Any, Callable, Dict, List, Set, Tuple
from torch.utils.data import Dataset
from sc2_datasets.torch.datasets.sc2_replaypack_dataset import SC2ReplaypackDataset
from sc2_datasets.replay_data.sc2_replay_data import SC2ReplayData
class SC2Dataset(Dataset):
"""
Inherits from PyTorch Dataset and ensures that the dataset for SC2EGSet is downloaded.
:param unpack_dir: Specifies the path of a directory\
where the dataset files will be unpacked.
:type unpack_dir: str
:param download_dir: Specifies the path of a directory where\
the dataset files will be downloaded.
:type download_dir: str
:param names_urls: Specifies the URL of the dataset which\
will be used to download the files.
:type names_urls: List[Tuple[str, str]]
:param unpack_n_workers: Specifies the number of workers\
that will be used for unpacking the archive, defaults to 16
:type unpack_n_workers: int, optional
:param transform: PyTorch transform. function that takes SC2ReplayData and return something
:type transform: Func[SC2ReplayData, T]
:param validator: Specifies the validation option for fetched data, defaults to None
:type validator: Callable | None, optional
"""
def __init__(
self,
names_urls: List[Tuple[str, str]],
unpack_dir: str = "./data/unpack",
download_dir: str = "./data/download",
download: bool = True,
unpack_n_workers: int = 16,
transform: Callable | None = None,
validator: Callable | None = None,
):
# PyTorch fields:
self.transform = transform
# Custom fields:
self.download_dir = download_dir
self.unpack_dir = unpack_dir
self.names_urls = names_urls
self.download = download
self.unpack_n_workers = unpack_n_workers
self.validator = validator
self.skip_files: Dict[str, Set[str]] = {}
# We have received an URL for the dataset
# and it migth not have been downloaded:
self.len = 0
self.ensure_downloaded()
def ensure_downloaded(self):
"""
Ensures that the dataset was downloaded before accessing the __len__ or __getitem__ methods.
"""
self.replaypacks: List[SC2ReplaypackDataset] = []
# Iterating over the provided URLs:
for replaypack_name, url in self.names_urls:
# Initializing SC2ReplaypackDataset cumulatively calculating its length:
replaypack = SC2ReplaypackDataset(
replaypack_name=replaypack_name,
download_dir=self.download_dir,
unpack_dir=self.unpack_dir,
url=url,
download=self.download,
unpack_n_workers=self.unpack_n_workers,
validator=self.validator,
)
# Retrieving files that were skipped when initializing a dataset,
# This is based on validator:
# TODO: This will be used later:
# self.skip_files[replaypack_name] = replaypack.skip_files
self.replaypacks.append(replaypack)
self.len += len(replaypack)
def __len__(self) -> int:
"""
Returns the number of items that are within the dataset
"""
return self.len
def __getitem__(self, index: Any) -> Tuple[Any, Any] | SC2ReplayData:
"""
Exposes logic of getting a single parsed item by using dataset[index].
:param index: Specifies the index of an item that should be retrieved.
:type index: Any
:raises IndexError: To support negative indexing,\
if the index is less than zero twice, IndexError is raised.
:raises IndexError: If the index is greater than length\
of the dataset IndexError is raised.
:return: Returns a parsed SC2ReplayData from an underlying SC2ReplaypackDataset,\
or a result of a transform that was passed to the dataset.
:rtype: Tuple[Any, Any] | SC2ReplayData
"""
# If the index is negative, treat it as if expressed from the back of the sequence.
# For example, if index is -1 and lenght is 10,
# it means we are looking for the last element, which is at index 10 + (-1) = 9
if index < 0:
index = self.len + index
if index < 0:
raise IndexError(f"Computed index {index} is still less than zero!")
if index > self.len:
raise IndexError(f"Computed index {index} is greater than {self.len}!")
for replaypack in self.replaypacks:
if index < len(replaypack):
if self.transform:
return self.transform(replaypack[index])
return replaypack[index]
else:
index -= len(replaypack)
|
/sc2_datasets-1.0.2-py3-none-any.whl/sc2_datasets/torch/datasets/sc2_dataset.py
| 0.839175 | 0.525673 |
sc2_dataset.py
|
pypi
|
from pathlib import Path
from typing import List, Set, Tuple
from concurrent.futures import ProcessPoolExecutor
from tqdm import tqdm
import math
from sc2_datasets.validators.validate_chunk import validate_chunk
from sc2_datasets.validators.validator_utils import (
read_validation_file,
save_validation_file,
)
def validate_integrity_mp(
list_of_replays: List[str],
n_workers: int,
) -> Tuple[Set[str], Set[str]]:
"""
Exposes logic for multiprocess validation of the replays.
Validates if the replay can be parsed by using SC2ReplayData by spawning multiple processes.
:param list_of_replays: Specifies a list of replays that should be checked by the validator.
:type list_of_replays: List[str]
:param n_workers: Specifies the number of workers (processes)\
that will be used for validating replays. Must be a positive int.
:type n_workers: int
:return: Returns a tuple that contains (all validated replays, files to be skipped).
:rtype: Tuple[Set[str], Set[str]]
**Correct Usage Examples:**
Validators can be used to check if a file is correct before
loading it for some modeling task. Below you will find a sample
execution that should contain one correct file and one incorrect file.
This results in the final tuple containing two sets.
The first tuple denotes correctly validated files,
whereas the second tuple denotes the files that should
be skipped in modeling tasks.
>>> validated_replays = validate_integrity_mp(
... list_of_replays=[
... "./test/test_files/single_replay/test_replay.json",
... "./test/test_files/single_replay/test_bit_flip_example.json"],
... n_workers=1)
>>> assert len(validated_replays[0]) == 1
>>> assert len(validated_replays[1]) == 1
Example using more workers than replays:
>>> validated_replays = validate_integrity_mp(
... list_of_replays=[
... "./test/test_files/single_replay/test_replay.json",
... "./test/test_files/single_replay/test_bit_flip_example.json"],
... n_workers=8)
>>> assert len(validated_replays[0]) == 1
>>> assert len(validated_replays[1]) == 1
Example showing passing an empty list to the valdation function:
>>> validated_replays = validate_integrity_mp(
... list_of_replays=[],
... n_workers=8)
>>> assert len(validated_replays[0]) == 0
>>> assert len(validated_replays[1]) == 0
"""
if n_workers <= 0:
raise Exception("Number of workers cannot be equal or less than zero!")
if len(list_of_replays) == 0:
return (set(), set())
chunksize = math.ceil(len(list_of_replays) / n_workers)
futures = []
# Iterate and submit jobs to the ProcessPoolExecutor:
with ProcessPoolExecutor(n_workers) as exe:
for index in range(0, len(list_of_replays), chunksize):
filenames = list_of_replays[index : (index + chunksize)]
futures.append(exe.submit(validate_chunk, filenames))
# Calculate results from futures:
result = []
for future in tqdm(futures, desc="Validating files: "):
result.extend(future.result())
# Convert result to two sets:
validated = set()
skip_files = set()
for sc2_file_info, is_correct in result:
if is_correct:
validated.add(sc2_file_info)
else:
skip_files.add(sc2_file_info)
return (validated, skip_files)
# REVIEW: This function:
# TODO: Add temporary files to be used as a validator file:
def validate_integrity_persist_mp(
list_of_replays: List[str],
n_workers: int,
validation_file_path: Path = Path("validator_file.json"),
) -> Set[str]:
"""
Exposes the logic for validating replays using multiple processes.
This function uses a validation file that persists the files which were previously checked.
:param list_of_replays: Specifies the list of replays that are supposed to be validated.
:type list_of_replays: List[str]
:param n_workers: Specifies the number of workers that will be used to validate the files.
:type n_workers: int
:param validation_file_path: Specifies the path to the validation\
file which will be read to obtain the
:type validation_file_path: Path
:return: Returns a set of files that should be skipped in further processing.
:rtype: Set[str]
**Correct Usage Examples:**
Persistent validators save the validation information to a specified filepath.
Only the files that ought to be skipped are returned as a set from this function.
>>> from pathlib import Path
>>> replays_to_skip = validate_integrity_persist_mp(
... list_of_replays=[
... "test/test_files/single_replay/test_replay.json",
... "test/test_files/single_replay/test_bit_flip_example.json"],
... n_workers=1,
... validation_file_path=Path("validator_file.json"))
>>> assert len(replays_to_skip) == 1
"""
# Reading from a file:
read_validated_files, read_skip_files = read_validation_file(
path=validation_file_path
)
# Validate replays:
files_to_validate = set(list_of_replays) - read_validated_files - read_skip_files
validated_files = set()
skip_files = set()
if files_to_validate:
validated_files, skip_files = validate_integrity_mp(
list_of_replays=list(files_to_validate), n_workers=n_workers
)
# Updating the sets of validated and skip_files:
read_validated_files.update(validated_files)
read_skip_files.update(skip_files)
# Saving to a file:
save_validation_file(
validated_files=read_validated_files,
skip_files=read_skip_files,
path=validation_file_path,
)
return read_skip_files
|
/sc2_datasets-1.0.2-py3-none-any.whl/sc2_datasets/validators/multiprocess_validator.py
| 0.806434 | 0.636925 |
multiprocess_validator.py
|
pypi
|
import logging
from pathlib import Path
from typing import Set, Tuple
import json
# TODO: consider splitting file creation out from this method
def read_validation_file(
path: Path,
) -> Tuple[Set[str], Set[str]]:
"""
Attempts to read the validation file from a specified path
:param path: Specifies the path that will be used to read the validation file.
:type path: Path
:return: Returns a list of files that were validated as ones that should be skipped.
:rtype: List[str]
**Correct Usage Examples:**
This function is a helper that is required to have persistent validators which are
able to skip the files that were previously processed.
It is tasked with reading the validation file.
Return of this function shoud contain information on which files were validated
(all of the validated files), and which files ought to be skipped.
>>> from pathlib import Path
>>> validator_file_content = read_validation_file(path=Path("validator_file.json"))
>>> assert len(validator_file_content[0]) == 2
>>> assert len(validator_file_content[1]) == 1
"""
if not path.is_file():
with path.open(mode="w", encoding="utf-8") as input_file:
# If there is no content in the file, initlialize empty lists and write them to file:
initialize_content = {"validated_files": [], "skip_files": []}
json.dump(initialize_content, input_file)
validated_file_set = {}
skip_file_set = {}
# Reading the file:
with path.open(mode="r", encoding="utf-8") as input_file:
try:
# Try reading the data from JSON:
json_data = json.load(input_file)
# Immediately converting the lists of strings denoting paths to sets:
validated_file_set = set(json_data["validated_files"])
skip_file_set = set(json_data["skip_files"])
except Exception as e:
logging.error("Error while parsing json!", exc_info=e)
return (validated_file_set, skip_file_set)
def save_validation_file(
validated_files: Set[str],
skip_files: Set[str],
path: Path = Path("validator_file.json"),
) -> None:
"""
Attempts to save the validation file to a specified path
:param validated_files: Specifies the list of replays that were verified\
as ones that were processed.
:type validated_files: Set[str]
:param skip_files: Specifies the list of replays that were verified\
as ones that should be skipped.
:type skip_files: Set[str]
:param path: Specifies the path to the file that will be saved,\
Defaults to Path("validator_file.json")
:type path: Path
**Correct Usage Examples:**
This function is a helper that is required to have persistent validators which are
able to skip the files that were previously processed.
It is tasked with saving the information that was processed
by the validators so that future runs of the program can use this information.
>>> from pathlib import Path
>>> validated_files = {"validated_file_0.json", "validated_file_1.json"}
>>> skip_files = {"validated_file_0.json"}
>>> validator_file_content = save_validation_file(
... validated_files=validated_files,
... skip_files=skip_files)
"""
# Gettings paths as posix to be able to serialize them:
validated_file_list = [Path(file).as_posix() for file in validated_files]
skip_file_list = [Path(file).as_posix() for file in skip_files]
# Initializing the dict that will be serialized to a file:
file_dict = {
"validated_files": validated_file_list,
"skip_files": skip_file_list,
}
with open(path, mode="w", encoding="utf-8") as output_file:
json.dump(file_dict, output_file)
|
/sc2_datasets-1.0.2-py3-none-any.whl/sc2_datasets/validators/validator_utils.py
| 0.552057 | 0.532425 |
validator_utils.py
|
pypi
|
from pathlib import Path
from typing import List, Set, Tuple
from sc2_datasets.validators.validate_chunk import validate_chunk
from sc2_datasets.validators.validator_utils import (
read_validation_file,
save_validation_file,
)
# REVIEW: This function:
# TODO: Add temporary files to be used as a validator file:
def validate_integrity_persist_sp(
list_of_replays: List[str],
validation_file_path: Path,
) -> Set[str]:
"""
Exposes the logic for validating replays using a single process.
This function uses a validation file that persists the files which were previously checked.
:param list_of_replays: Specifies the list of replays that are supposed to be validated.
:type list_of_replays: List[str]
:param validation_file_path: Specifies the path to the validation file\
which will be read to obtain the
:type validation_file_path: Path
:return: Returns a set of files that should be skipped in further processing.
:rtype: Set[str]
**Correct Usage Examples:**
Persistent validators save the validation information to a specified filepath.
Only the files that ought to be skipped are returned as a set from this function.
>>> from pathlib import Path
>>> replays_to_skip = validate_integrity_persist_sp(
... list_of_replays=[
... "test/test_files/single_replay/test_replay.json",
... "test/test_files/single_replay/test_bit_flip_example.json"],
... validation_file_path=Path("validator_file.json"))
>>> assert len(replays_to_skip) == 1
"""
# Reading from a file:
read_validated_files, read_skip_files = read_validation_file(
path=validation_file_path
)
# Validate only the files we haven't already validated:
files_to_validate = set(list_of_replays) - read_validated_files - read_skip_files
# TODO: Consider changing the input param to set
# Perform the validation:
validated_files, skip_files = validate_integrity_sp(
list_of_replays=list(files_to_validate)
)
# Updating the sets of validated and skip_files:
read_validated_files.update(validated_files)
read_skip_files.update(skip_files)
# Save to a file:
save_validation_file(
validated_files=read_validated_files,
skip_files=read_skip_files,
path=validation_file_path,
)
return read_skip_files
def validate_integrity_sp(
list_of_replays: List[str],
) -> Tuple[Set[str], Set[str]]:
"""
Exposes logic for single process integrity validation of a replay.
:param list_of_replays: Specifies the SC2ReplayInfo information\
of the files that will be validated.
:type list_of_replays: List[str]
:return: Returns a tuple that contains (validated replays, files to be skipped).
:rtype: Tuple[Set[str], Set[str]]
**Correct Usage Examples:**
Validators can be used to check if a file is correct before loading it for some modeling task.
Below you will find a sample execution which
should contain one correct file and one incorrect file.
This results in the final tuple containing two sets.
The first tuple denotes correctly validated files,
whereas the second tuple denotes the files that should be skipped in modeling tasks.
>>> validated_replays = validate_integrity_sp(
... list_of_replays=[
... "./test/test_files/single_replay/test_replay.json",
... "./test/test_files/single_replay/test_bit_flip_example.json"])
>>> assert len(validated_replays[0]) == 2
>>> assert len(validated_replays[1]) == 1
"""
# TODO: Convert this!
validated_files = validate_chunk(list_of_replays=list_of_replays)
# REVIEW: revisit
# Convert result to two sets:
validated = set()
skip_files = set()
for sc2_file_info, is_correct in validated_files:
if is_correct:
validated.add(sc2_file_info)
else:
skip_files.add(sc2_file_info)
return (validated, skip_files)
|
/sc2_datasets-1.0.2-py3-none-any.whl/sc2_datasets/validators/singleprocess_validator.py
| 0.630571 | 0.68615 |
singleprocess_validator.py
|
pypi
|
import json
import os
from sc2_datasets.utils.zip_utils import unpack_zipfile
from typing import Dict, Tuple
def load_replaypack_information(
replaypack_name: str,
replaypack_path: str,
unpack_n_workers: int,
) -> Tuple[str, Dict[str, str], Dict[str, str]]:
"""
Helper function that loads replaypack information from a standard directory structure.
:param replaypack_name: Specifies the replaypack name that will be used\
as a subdirectory where replaypack .json files will be extracted.
:type replaypack_name: str
:param replaypack_path: Specifies the path to the extracted replaypack.
:type replaypack_path: str
:param unpack_n_workers: Specifies the number of workers that will\
be used for unpacking the archive.
:type unpack_n_workers: int
:return: Returns path to the directory that contains .json files\
with data extracted from replays, summary information that\
was generated when extracting the data from replays,\
mapping information that specifies what was the directory\
structure pre-extraction, and log file which contaions\
how many files were successfully extracted.
:rtype: Tuple[str, Dict[str, str], Dict[str, str]]
**Correct Usage Examples:**
The use of this method is intended to download a .zip replaypack
of SC2 games and unpack the downloaded files
to the folder.
May help you to download and unpack downloaded files.
The parameters should be set as in the example below.
>>> load_replaypack_information_object = load_replaypack_information(
... replaypack_name="replaypack_name",
... replaypack_path="replaypack_path",
... unpack_n_workers=1)
>>> assert isinstance(replaypack_name, str)
>>> assert isinstance(replaypack_path, str)
>>> assert isinstance(unpack_n_workers, int)
>>> assert unpack_n_workers >= 1
"""
replaypack_files = os.listdir(replaypack_path)
# Initializing variables that should be returned:
replaypack_data_path = os.path.join(replaypack_path, replaypack_name + "_data")
replaypack_main_log_obj_list = []
replaypack_processed_failed = {}
replaypack_summary = {}
replaypack_dir_mapping = {}
# Extracting the nested .zip files,
# and loading replaypack information files:
for file in replaypack_files:
if file.endswith("_data.zip"):
# Unpack the .zip archive only if it is not unpacked already:
if not os.path.isdir(replaypack_data_path):
replaypack_data_path = unpack_zipfile(
destination_dir=replaypack_path,
subdir=replaypack_name + "_data",
zip_path=os.path.join(replaypack_path, file),
n_workers=unpack_n_workers,
)
if file.endswith("_main_log.log"):
with open(os.path.join(replaypack_path, file)) as main_log_file:
# Reading the lines of the log file and parsing them:
for line in main_log_file.readlines():
log_object = json.loads(line)
replaypack_main_log_obj_list.append(log_object)
if file.endswith("_processed_failed.log"):
with open(os.path.join(replaypack_path, file)) as processed_files:
replaypack_processed_failed = json.load(processed_files)
if file.endswith("_processed_mapping.json"):
with open(os.path.join(replaypack_path, file)) as mapping_file:
replaypack_dir_mapping = json.load(mapping_file)
if file.endswith("_summary.json"):
with open(os.path.join(replaypack_path, file)) as summary_file:
replaypack_summary = json.load(summary_file)
return (
replaypack_data_path,
replaypack_main_log_obj_list,
replaypack_processed_failed,
replaypack_dir_mapping,
replaypack_summary,
)
|
/sc2_datasets-1.0.2-py3-none-any.whl/sc2_datasets/utils/dataset_utils.py
| 0.812793 | 0.228404 |
dataset_utils.py
|
pypi
|
import os
import requests
from sc2_datasets.utils.zip_utils import unpack_zipfile
# REVIEW: This was changed, needs review:
def download_replaypack(
destination_dir: str, replaypack_name: str, replaypack_url: str
) -> str:
"""
Exposes logic for downloading a single StarCraft II replaypack from an url.
:param destination_dir: Specifies the destination directory where the replaypack will be saved.
:type destination_dir: str
:param replaypack_name: Specifies the name of a replaypack that will\
be used for the downloaded .zip archive.
:type replaypack_name: str
:param replaypack_url: Specifies the url that is a direct link\
to the .zip which will be downloaded.
:type replaypack_url: str
:raises Exception: If more than one file is downloaded, exception is thrown.
:return: Returns the filepath to the downloaded .zip archive.
:rtype: str
**Correct Usage Examples:**
The use of this method is intended
to download a .zip replaypack containing StarCraft II games.
Replaypack download directory should be empty before running
this function.
Replaypack name will be used as the name for the downloaded .zip archive.
Replaypack url should be valid and poiting directly to a .zip archive hosted
on some server.
The parameters should be set as in the example below.
>>> replaypack_download_dir = "datasets/download_directory"
>>> replaypack_name = "TournamentName"
>>> replaypack_url = "some_url"
>>> download_replaypack_object = download_replaypack(
... destination_dir=replaypack_download_dir,
... replaypack_name=replaypack_name,
... replaypack_url=replaypack_url)
>>> assert isinstance(replaypack_download_dir, str)
>>> assert isinstance(replaypack_name, str)
>>> assert isinstance(replaypack_url, str)
>>> assert len(os.listdir(replaypack_download_dir)) == 0
>>> assert existing_files[0].endswith(".zip")
"""
# Check if there is something in the destination directory:
existing_files = []
if os.path.exists(destination_dir):
existing_files = os.listdir(destination_dir)
filename_with_ext = replaypack_name + ".zip"
download_filepath = os.path.join(destination_dir, filename_with_ext)
# The file was previously downloaded so return it immediately:
if existing_files:
if download_filepath in existing_files:
return download_filepath
# Send a request and save the response content into a .zip file.
# The .zip file should be a replaypack:
response = requests.get(url=replaypack_url)
with open(download_filepath, "wb") as output_zip_file:
output_zip_file.write(response.content)
return download_filepath
def download_and_unpack_replaypack(
replaypack_download_dir: str,
replaypack_unpack_dir: str,
replaypack_name: str,
url: str,
) -> str:
"""
Helper function that downloads a replaypack from a specified url.
The archive is saved to replaypack_download_dir using a replaypack_name.
This function extracts the replaypack to the replaypack_unpack_dir
:param replaypack_download_dir: Specifies a directory where the .zip archive will be downloaded.
:type replaypack_download_dir: str
:param replaypack_unpack_dir: Specifies a directory where the .zip file will be extracted
under a replaypack_name directory.
:type replaypack_unpack_dir: str
:param replaypack_name: Specifies a replaypack name which will be used to create paths.
:type replaypack_name: str
:param url: Specifies the url that will be used to download the replaypack.
:type url: str
:return: Returns the filepath to the directory where the .zip was extracted.
:rtype: str
**Correct Usage Examples:**
The use of this method is intended to download a .zip replaypack of SC2 games
and unpack the downloaded files to the folder.
You should set every parameter:
replaypack_download_dir, replaypack_unpack_dir, replaypack_name and url.
The parameters should be set as in the example below.
>>> download_and_unpack_replaypack_object = download_and_unpack_replaypack(
... replaypack_download_dir="/directory/replaypack_download_dir",
... replaypack_unpack_dir="/directory/replaypack_unpack_dir",
... replaypack_name="replaypack_name",
... url="url")
>>> assert isinstance(replaypack_download_dir, str)
>>> assert isinstance(replaypack_unpack_dir, str)
>>> assert isinstance(replaypack_name, str)
>>> assert isinstance(url, str)
"""
# Downloading the replaypack:
download_path = download_replaypack(
destination_dir=replaypack_download_dir,
replaypack_name=replaypack_name,
replaypack_url=url,
)
# Unpacking the replaypack:
_ = unpack_zipfile(
destination_dir=replaypack_unpack_dir,
subdir=replaypack_name,
zip_path=download_path,
n_workers=1,
)
return os.path.join(replaypack_unpack_dir, replaypack_name)
|
/sc2_datasets-1.0.2-py3-none-any.whl/sc2_datasets/utils/download_utils.py
| 0.844922 | 0.276349 |
download_utils.py
|
pypi
|
import logging
import os
import zipfile
import math
from concurrent.futures import ProcessPoolExecutor
from typing import List
from tqdm import tqdm
# REVIEW: Check this:
def unpack_chunk(zip_path: str, filenames: List[str], path_to_extract: str):
"""
Helper function for unpacking a chunk of files from an archive
:param zip_path: Specifies the path to the archive file that will be extracted.
:type zip_path: str
:param filenames: specifies a list of the filenames which are within the archive
and will be extracted.
:type filenames: List[str]
:param path_to_extract: Specifies the path to which the files will be extracted to.
:type path_to_extract: str
**Correct Usage Examples:**
The use of this method is intended to extract a zipfile from the .zip file.
You should set every parameter, zip_path, filenames and path_to_extract.
May help you to work with dataset.
The parameters should be set as in the example below.
>>> unpack_chunk_object = unpack_chunk(
... zip_path="./directory/zip_path",
... filenames="./directory/filenames",
... path_to_extract="./directory/path_to_extract")
>>> assert isinstance(zip_path, str)
>>> assert all(isinstance(filename, str) for filename in filenames)
>>> assert isinstance(path_to_extract, str)
"""
with zipfile.ZipFile(zip_path, "r") as zip_file:
for filename in filenames:
try:
zip_file.extract(filename, path_to_extract)
except zipfile.error as e:
logging.error(
f"zipfile error was raised: {e}",
exc_info=True,
)
# REVIEW: Check this:
def unpack_zipfile(
destination_dir: str, subdir: str, zip_path: str, n_workers: int
) -> str:
"""
Helper function that unpacks the content of .zip archive.
:param destination_dir: Specifies the path where the .zip file will be extracted.
:type destination_dir: str
:param subdir: Specifies the subdirectory where the content will be extracted.
:type subdir: str
:param zip_path: Specifies the path to the zip file that will be extracted.
:type zip_path: str
:param n_workers: Specifies the number of workers that will be used for unpacking the archive.
:type n_workers: int
:return: Returns a path to the extracted content.
:rtype: str
**Correct Usage Examples:**
The use of this method is intended to extract a zipfile.
You should set every parameter, destination, subdir, zip_path and n_workers.
May help you to work with dataset.
The parameters should be set as in the example below.
>>> unpack_zipfile_object = unpack_zipfile(
... destination_dir="./directory/destination_dir",
... subdir="./directory/subdir",
... zip_path="./directory/zip_path",
... n_workers=1)
>>> assert isinstance(destination_dir, str)
>>> assert isinstance(subdir, str)
>>> assert isinstance(zip_path, str)
>>> assert isinstance(n_workers, int)
>>> assert n_workers >= 1
"""
if n_workers <= 0:
raise Exception("Number of workers cannot be equal or less than zero!")
file_list: List[str] = []
path_to_extract = os.path.join(destination_dir, subdir)
with zipfile.ZipFile(zip_path, "r") as zip_file:
# Checking the existence of the extraction output directory
# If it doesn't exist it will be created:
if not os.path.exists(path_to_extract):
os.makedirs(path_to_extract)
file_list = zip_file.namelist()
chunksize = math.ceil(len(file_list) / n_workers)
with ProcessPoolExecutor(n_workers) as exe:
for index in tqdm(
range(0, len(file_list), chunksize),
desc=f"Extracting {os.path.basename(destination_dir)}: ",
):
filenames = file_list[index : (index + chunksize)]
_ = exe.submit(unpack_chunk, zip_path, filenames, path_to_extract)
return path_to_extract
|
/sc2_datasets-1.0.2-py3-none-any.whl/sc2_datasets/utils/zip_utils.py
| 0.809916 | 0.482429 |
zip_utils.py
|
pypi
|
from typing import Callable, List, Tuple
from sc2_datasets.available_replaypacks import SC2EGSET_DATASET_REPLAYPACKS
from sc2_datasets.lightning.datamodules.sc2_datamodule import SC2DataModule
class SC2EGSetDataModule(SC2DataModule):
"""
Defines a LightningDataModule abstraction for the
SC2EGSet: StarCraft II Esport Game-State Dataset
:param download_dir: Specifies the path where the dataset will be downloaded
:type download_dir: str, optional
:param unpack_dir: Specifies the path where the dataset will be unpacked\
into a custom directory structure, defaults to "./data/unpack"
:type unpack_dir: str, optional
:param transform: Specifies the PyTorch transforms to be used\
on the replaypack (dataset),
Deprecated since version v1.5: Will be removed in v1.7.0,\
defaults to None
:type transform: _type_, optional
:param dims: Specifies a tuple describing the shape of your data.\
Extra functionality exposed in size,
Deprecated since version v1.5: Will be removed in v1.7.0,\
defaults to None
:type dims: _type_, optional
:param batch_size: Specifies the size of collating individual\
fetched data samples, defaults to 256
:type batch_size: int, optional
:param num_workers: Specifies the data loader instance how many sub-processes\
to use for data loading, defaults to 0
:type num_workers: int, optional
:param unpack_n_workers: Specifies the number of workers\
that will be used for unpacking the archive, defaults to 16
:type unpack_n_workers: int, optional
:param validator: Specifies the validation option for fetched data, defaults to None
:type validator: Callable | None, optional
"""
def __init__(
self,
replaypacks: List[Tuple[str, str]] = SC2EGSET_DATASET_REPLAYPACKS,
download_dir: str = "./data/download",
unpack_dir: str = "./data/unpack",
download: bool = True,
transform=None,
dims=None,
batch_size: int = 256,
num_workers: int = 0,
unpack_n_workers: int = 16,
validator: Callable | None = None,
):
super().__init__(
replaypacks=replaypacks,
download_dir=download_dir,
unpack_dir=unpack_dir,
download=download,
transform=transform,
dims=dims,
batch_size=batch_size,
num_workers=num_workers,
unpack_n_workers=unpack_n_workers,
validator=validator,
)
|
/sc2_datasets-1.0.2-py3-none-any.whl/sc2_datasets/lightning/sc2_egset_datamodule.py
| 0.913 | 0.514095 |
sc2_egset_datamodule.py
|
pypi
|
from typing import Callable, Optional
import pytorch_lightning as pl
from torch.utils.data import random_split
from torch.utils.data.dataloader import DataLoader
from sc2_datasets.torch.datasets.sc2_replaypack_dataset import SC2ReplaypackDataset
class SC2ReplaypackDataModule(pl.LightningDataModule):
"""
Defines a LightningDataModule abstraction for a single StarCraft II replaypack.
:param replaypack_name: Specifies a replaypack name which will be used as a directory name.
:type replaypack_name: str
:param unpack_dir: Specifies the path where the replaypack (dataset)\
will be unpacked into a custom directory structure, defaults to "./data/unpack"
:type unpack_dir: str, optional
:param download_dir: Specifies the path where the replaypack (dataset)\
will be downloaded, defaults to "./data/unpack"
:type download_dir: str, optional
:param url: Specifies the url which will be used to download\
the replaypack (dataset), defaults to ""
:type url: str, optional
:param download: Specifies if the dataset should be downloaded.\
Otherwise the dataset is loaded from the unpack_dir\
and a custom directory structure is assumed, defaults to True
:type download: bool, optional
:param transform: Specifies the PyTorch transforms to be used on the replaypack (dataset),\
Deprecated since version v1.5: Will be removed in v1.7.0, defaults to None
:type transform: _type_, optional
:param dims: Specifies a tuple describing the shape of your data.\
Extra functionality exposed in size,\
Deprecated since version v1.5: Will be removed in v1.7.0, defaults to None
:type dims: _type_, optional
:param unpack_n_workers: Specifies the number of workers\
that will be used for unpacking the archive, defaults to 16
:type unpack_n_workers: int, optional
:param validator: Specifies the validation option for fetched data, defaults to None
:type validator: Callable | None, optional
"""
def __init__(
self,
replaypack_name: str,
unpack_dir: str = "./data/unpack",
download_dir: str = "./data/download",
url: str = "",
download: bool = True,
transform: Callable | None = None,
dims=None,
batch_size: int = 256,
num_workers: int = 0,
unpack_n_workers: int = 16,
validator: Callable | None = None,
):
super().__init__()
# PyTorch fields:
self.transform = transform
self.dims = dims
self.batch_size = batch_size
self.num_workers = num_workers
# Custom fields:
self.replaypack_name = replaypack_name
self.unpack_dir = unpack_dir
self.download_dir = download_dir
self.url = url
self.download = download
self.unpack_n_workers = unpack_n_workers
self.validator = validator
def prepare_data(self) -> None:
# download, split, etc...
# only called on 1 GPU/TPU in distributed
self.dataset = SC2ReplaypackDataset(
replaypack_name=self.replaypack_name,
unpack_dir=self.unpack_dir,
download_dir=self.download_dir,
url=self.url,
download=self.download,
transform=self.transform,
unpack_n_workers=self.unpack_n_workers,
validator=self.validator,
)
def setup(self, stage: Optional[str] = None) -> None:
# make assignments here (val/train/test split)
# called on every process in DDP
total_length = len(self.dataset)
# Add these to be a parameter in the initialization:
# 16.(6)% of total entries will be used for testing:
test_length = int(total_length / 6)
# 10% of total entries will be used for validation
val_length = int(total_length / 10)
# everything else will be used for training
train_length = total_length - test_length - val_length
self.train_dataset, self.test_dataset, self.val_dataset = random_split(
self.dataset,
[train_length, test_length, val_length],
)
def train_dataloader(self) -> DataLoader:
return DataLoader(
self.train_dataset, batch_size=self.batch_size, num_workers=self.num_workers
)
def val_dataloader(self) -> DataLoader:
return DataLoader(
self.val_dataset, batch_size=self.batch_size, num_workers=self.num_workers
)
def test_dataloader(self) -> DataLoader:
return DataLoader(
self.test_dataset, batch_size=self.batch_size, num_workers=self.num_workers
)
def teardown(self, stage: Optional[str] = None) -> None:
# clean up after fit or test
# called on every process in DDP
return super().teardown(stage)
|
/sc2_datasets-1.0.2-py3-none-any.whl/sc2_datasets/lightning/datamodules/sc2_replaypack_datamodule.py
| 0.928571 | 0.515315 |
sc2_replaypack_datamodule.py
|
pypi
|
from typing import Callable, List, Optional, Tuple
import pytorch_lightning as pl
from torch.utils.data import random_split
from torch.utils.data.dataloader import DataLoader
from sc2_datasets.torch.datasets.sc2_dataset import SC2Dataset
class SC2DataModule(pl.LightningDataModule):
"""
Defines a LightningDataModule abstraction for some StarCraft II DataModule.
:param download_dir: Specifies the path where the dataset will be downloaded
:type download_dir: str, optional
:param unpack_dir: Specifies the path where the dataset will be unpacked\
into a custom directory structure, defaults to "./data/unpack"
:type unpack_dir: str, optional
:param transform: Specifies the PyTorch transforms to be used\
on the replaypack (dataset),
Deprecated since version v1.5: Will be removed in v1.7.0,\
defaults to None
:type transform: _type_, optional
:param dims: Specifies a tuple describing the shape of your data.\
Extra functionality exposed in size,
Deprecated since version v1.5: Will be removed in v1.7.0,\
defaults to None
:type dims: _type_, optional
:param batch_size: Specifies the size of collating individual\
fetched data samples, defaults to 256
:type batch_size: int, optional
:param num_workers: Specifies the data loader instance how many sub-processes\
to use for data loading, defaults to 0
:type num_workers: int, optional
:param unpack_n_workers: Specifies the number of workers\
that will be used for unpacking the archive, defaults to 16
:type unpack_n_workers: int, optional
:param validator: Specifies the validation option for fetched data, defaults to None
:type validator: Callable | None, optional
"""
def __init__(
self,
replaypacks: List[Tuple[str, str]],
download_dir: str = "./data/download",
unpack_dir: str = "./data/unpack",
download: bool = True,
transform=None,
dims=None,
batch_size: int = 256,
num_workers: int = 0,
unpack_n_workers: int = 16,
validator: Callable | None = None,
):
super().__init__()
# PyTorch fields:
self.transform = transform
self.dims = dims
self.batch_size = batch_size
self.num_workers = num_workers
# Custom fields:
self.download_dir = download_dir
self.unpack_dir = unpack_dir
self.download = download
self.unpack_n_workers = unpack_n_workers
self.validator = validator
self.replaypacks = replaypacks
def prepare_data(self) -> None:
# download, split, etc...
# only called on 1 GPU/TPU in distributed
self.dataset = SC2Dataset(
names_urls=self.replaypacks,
download=self.download,
download_dir=self.download_dir,
unpack_dir=self.unpack_dir,
transform=self.transform,
unpack_n_workers=self.unpack_n_workers,
)
def setup(self, stage: Optional[str] = None) -> None:
# make assignments here (val/train/test split)
# called on every process in DDP
total_length = len(self.dataset)
# Add these to be a parameter in the initialization:
# 16.(6)% of total entries will be used for testing:
test_length = int(total_length / 6)
# 10% of total entries will be used for validation
val_length = int(total_length / 10)
# everything else will be used for training
train_length = total_length - test_length - val_length
self.train_dataset, self.test_dataset, self.val_dataset = random_split(
self.dataset,
[train_length, test_length, val_length],
)
def train_dataloader(self) -> DataLoader:
return DataLoader(
self.train_dataset, batch_size=self.batch_size, num_workers=self.num_workers
)
def val_dataloader(self) -> DataLoader:
return DataLoader(
self.val_dataset, batch_size=self.batch_size, num_workers=self.num_workers
)
def test_dataloader(self) -> DataLoader:
return DataLoader(
self.test_dataset, batch_size=self.batch_size, num_workers=self.num_workers
)
def teardown(self, stage: Optional[str] = None) -> None:
# clean up after fit or test
# called on every process in DDP
return super().teardown(stage)
|
/sc2_datasets-1.0.2-py3-none-any.whl/sc2_datasets/lightning/datamodules/sc2_datamodule.py
| 0.936241 | 0.643931 |
sc2_datamodule.py
|
pypi
|
from collections import deque
from typing import Any, Dict, FrozenSet, Generator, List, Optional, Sequence, Set, Tuple, Union
from .pixel_map import PixelMap
from .player import Player
from .position import Point2, Rect, Size
class Ramp:
def __init__(self, points: Set[Point2], game_info: "GameInfo"):
self._points: Set[Point2] = points
self.__game_info = game_info
# tested by printing actual building locations vs calculated depot positions
self.x_offset = 0.5 # might be errors with the pixelmap?
self.y_offset = -0.5
@property
def _height_map(self):
return self.__game_info.terrain_height
@property
def _placement_grid(self):
return self.__game_info.placement_grid
@property
def size(self) -> int:
return len(self._points)
def height_at(self, p: Point2) -> int:
return self._height_map[p]
@property
def points(self) -> Set[Point2]:
return self._points.copy()
@property
def upper(self) -> Set[Point2]:
""" Returns the upper points of a ramp. """
max_height = max([self.height_at(p) for p in self._points])
return {p for p in self._points if self.height_at(p) == max_height}
@property
def upper2_for_ramp_wall(self) -> Set[Point2]:
""" Returns the 2 upper ramp points of the main base ramp required for the supply depot and barracks placement properties used in this file. """
if len(self.upper) > 5:
# NOTE: this was way too slow on large ramps
return set() # HACK: makes this work for now
# FIXME: please do
upper2 = sorted(list(self.upper), key=lambda x: x.distance_to(self.bottom_center), reverse=True)
while len(upper2) > 2:
upper2.pop()
return set(upper2)
@property
def top_center(self) -> Point2:
pos = Point2(
(sum([p.x for p in self.upper]) / len(self.upper), sum([p.y for p in self.upper]) / len(self.upper))
)
return pos
@property
def lower(self) -> Set[Point2]:
min_height = min([self.height_at(p) for p in self._points])
return {p for p in self._points if self.height_at(p) == min_height}
@property
def bottom_center(self) -> Point2:
pos = Point2(
(sum([p.x for p in self.lower]) / len(self.lower), sum([p.y for p in self.lower]) / len(self.lower))
)
return pos
@property
def barracks_in_middle(self) -> Point2:
""" Barracks position in the middle of the 2 depots """
if len(self.upper2_for_ramp_wall) == 2:
points = self.upper2_for_ramp_wall
p1 = points.pop().offset((self.x_offset, self.y_offset))
p2 = points.pop().offset((self.x_offset, self.y_offset))
# Offset from top point to barracks center is (2, 1)
intersects = p1.circle_intersection(p2, 5 ** 0.5)
anyLowerPoint = next(iter(self.lower))
return max(intersects, key=lambda p: p.distance_to(anyLowerPoint))
raise Exception("Not implemented. Trying to access a ramp that has a wrong amount of upper points.")
@property
def depot_in_middle(self) -> Point2:
""" Depot in the middle of the 3 depots """
if len(self.upper2_for_ramp_wall) == 2:
points = self.upper2_for_ramp_wall
p1 = points.pop().offset((self.x_offset, self.y_offset)) # still an error with pixelmap?
p2 = points.pop().offset((self.x_offset, self.y_offset))
# Offset from top point to depot center is (1.5, 0.5)
intersects = p1.circle_intersection(p2, 2.5 ** 0.5)
anyLowerPoint = next(iter(self.lower))
return max(intersects, key=lambda p: p.distance_to(anyLowerPoint))
raise Exception("Not implemented. Trying to access a ramp that has a wrong amount of upper points.")
@property
def corner_depots(self) -> Set[Point2]:
""" Finds the 2 depot positions on the outside """
if len(self.upper2_for_ramp_wall) == 2:
points = self.upper2_for_ramp_wall
p1 = points.pop().offset((self.x_offset, self.y_offset)) # still an error with pixelmap?
p2 = points.pop().offset((self.x_offset, self.y_offset))
center = p1.towards(p2, p1.distance_to(p2) / 2)
depotPosition = self.depot_in_middle
# Offset from middle depot to corner depots is (2, 1)
intersects = center.circle_intersection(depotPosition, 5 ** 0.5)
return intersects
raise Exception("Not implemented. Trying to access a ramp that has a wrong amount of upper points.")
@property
def barracks_can_fit_addon(self) -> bool:
""" Test if a barracks can fit an addon at natural ramp """
# https://i.imgur.com/4b2cXHZ.png
if len(self.upper2_for_ramp_wall) == 2:
return self.barracks_in_middle.x + 1 > max(self.corner_depots, key=lambda depot: depot.x).x
raise Exception("Not implemented. Trying to access a ramp that has a wrong amount of upper points.")
@property
def barracks_correct_placement(self) -> Point2:
""" Corrected placement so that an addon can fit """
if len(self.upper2_for_ramp_wall) == 2:
if self.barracks_can_fit_addon:
return self.barracks_in_middle
else:
return self.barracks_in_middle.offset((-2, 0))
raise Exception("Not implemented. Trying to access a ramp that has a wrong amount of upper points.")
class GameInfo:
def __init__(self, proto):
# TODO: this might require an update during the game because placement grid and
# playable grid are greyed out on minerals, start locations and ramps (debris)
# but we do not want to call information in the fog of war
self._proto = proto
self.players: List[Player] = [Player.from_proto(p) for p in self._proto.player_info]
self.map_name: str = self._proto.map_name
self.local_map_path: str = self._proto.local_map_path
self.map_size: Size = Size.from_proto(self._proto.start_raw.map_size)
self.pathing_grid: PixelMap = PixelMap(self._proto.start_raw.pathing_grid)
self.terrain_height: PixelMap = PixelMap(self._proto.start_raw.terrain_height)
self.placement_grid: PixelMap = PixelMap(self._proto.start_raw.placement_grid)
self.playable_area = Rect.from_proto(self._proto.start_raw.playable_area)
self.map_ramps: List[Ramp] = None # Filled later by BotAI._prepare_first_step
self.player_races: Dict[int, "Race"] = {
p.player_id: p.race_actual or p.race_requested for p in self._proto.player_info
}
self.start_locations: List[Point2] = [Point2.from_proto(sl) for sl in self._proto.start_raw.start_locations]
self.player_start_location: Point2 = None # Filled later by BotAI._prepare_first_step
@property
def map_center(self) -> Point2:
return self.playable_area.center
def _find_ramps(self) -> List[Ramp]:
"""Calculate (self.pathing_grid - self.placement_grid) (for sets) and then find ramps by comparing heights."""
rampDict = {
Point2((x, y)): self.pathing_grid[(x, y)] == 0 and self.placement_grid[(x, y)] == 0
for x in range(self.pathing_grid.width)
for y in range(self.pathing_grid.height)
}
rampPoints = {p for p in rampDict if rampDict[p]} # filter only points part of ramp
rampGroups = self._find_groups(rampPoints)
return [Ramp(group, self) for group in rampGroups]
def _find_groups(
self, points: Set[Point2], minimum_points_per_group: int = 8, max_distance_between_points: int = 2
) -> List[Set[Point2]]:
""" From a set/list of points, this function will try to group points together """
""" Paint clusters of points in rectangular map using flood fill algorithm. """
NOT_INTERESTED = -2
NOT_COLORED_YET = -1
currentColor: int = NOT_COLORED_YET
picture: List[List[int]] = [
[NOT_INTERESTED for j in range(self.pathing_grid.width)] for i in range(self.pathing_grid.height)
]
def paint(pt: Point2) -> None:
picture[pt.y][pt.x] = currentColor
nearby: Set[Point2] = set()
for dx in range(-max_distance_between_points, max_distance_between_points + 1):
for dy in range(-max_distance_between_points, max_distance_between_points + 1):
if abs(dx) + abs(dy) <= max_distance_between_points:
nearby.add(Point2((dx, dy)))
for point in points:
paint(point)
remaining: Set[Point2] = set(points)
queue: Deque[Point2] = deque()
foundGroups: List[Set[Point2]] = []
while remaining:
currentGroup: Set[Point2] = set()
if not queue:
currentColor += 1
start = remaining.pop()
paint(start)
queue.append(start)
currentGroup.add(start)
while queue:
base: Point2 = queue.popleft()
for offset in nearby:
px, py = base.x + offset.x, base.y + offset.y
if px < 0 or py < 0 or px >= self.pathing_grid.width or py >= self.pathing_grid.height:
continue
if picture[py][px] != NOT_COLORED_YET:
continue
point: Point2 = Point2((px, py))
remaining.remove(point)
paint(point)
queue.append(point)
currentGroup.add(point)
if len(currentGroup) >= minimum_points_per_group:
foundGroups.append(currentGroup)
""" Returns groups of points as list
[{p1, p2, p3}, {p4, p5, p6, p7, p8}]
"""
return foundGroups
|
/sc2-env-0.11.1.2.tar.gz/sc2-env-0.11.1.2/sc2Env/game_info.py
| 0.822759 | 0.427755 |
game_info.py
|
pypi
|
import enum
from s2clientprotocol import (
sc2api_pb2 as sc_pb,
raw_pb2 as raw_pb,
data_pb2 as data_pb,
common_pb2 as common_pb,
error_pb2 as error_pb
)
from typing import List, Dict, Set, Tuple, Any, Optional, Union # mypy type checking
from .ids.unit_typeid import UnitTypeId
from .ids.ability_id import AbilityId
""" For the list of enums, see here
https://github.com/Blizzard/s2client-api/blob/d9ba0a33d6ce9d233c2a4ee988360c188fbe9dbf/include/sc2api/sc2_gametypes.h
https://github.com/Blizzard/s2client-api/blob/d9ba0a33d6ce9d233c2a4ee988360c188fbe9dbf/include/sc2api/sc2_action.h
https://github.com/Blizzard/s2client-api/blob/d9ba0a33d6ce9d233c2a4ee988360c188fbe9dbf/include/sc2api/sc2_unit.h
https://github.com/Blizzard/s2client-api/blob/d9ba0a33d6ce9d233c2a4ee988360c188fbe9dbf/include/sc2api/sc2_data.h
"""
CreateGameError = enum.Enum("CreateGameError", sc_pb.ResponseCreateGame.Error.items())
PlayerType = enum.Enum("PlayerType", sc_pb.PlayerType.items())
Difficulty = enum.Enum("Difficulty", sc_pb.Difficulty.items())
Status = enum.Enum("Status", sc_pb.Status.items())
Result = enum.Enum("Result", sc_pb.Result.items())
Alert = enum.Enum("Alert", sc_pb.Alert.items())
ChatChannel = enum.Enum("ChatChannel", sc_pb.ActionChat.Channel.items())
Race = enum.Enum("Race", common_pb.Race.items())
DisplayType = enum.Enum("DisplayType", raw_pb.DisplayType.items())
Alliance = enum.Enum("Alliance", raw_pb.Alliance.items())
CloakState = enum.Enum("CloakState", raw_pb.CloakState.items())
Attribute = enum.Enum("Attribute", data_pb.Attribute.items())
TargetType = enum.Enum("TargetType", data_pb.Weapon.TargetType.items())
Target = enum.Enum("Target", data_pb.AbilityData.Target.items())
ActionResult = enum.Enum("ActionResult", error_pb.ActionResult.items())
race_worker: Dict[Race, UnitTypeId] = {
Race.Protoss: UnitTypeId.PROBE,
Race.Terran: UnitTypeId.SCV,
Race.Zerg: UnitTypeId.DRONE
}
race_townhalls: Dict[Race, Set[UnitTypeId]] = {
Race.Protoss: {UnitTypeId.NEXUS},
Race.Terran: {UnitTypeId.COMMANDCENTER, UnitTypeId.ORBITALCOMMAND, UnitTypeId.PLANETARYFORTRESS},
Race.Zerg: {UnitTypeId.HATCHERY, UnitTypeId.LAIR, UnitTypeId.HIVE}
}
warpgate_abilities: Dict[AbilityId, AbilityId] = {
AbilityId.GATEWAYTRAIN_ZEALOT: AbilityId.WARPGATETRAIN_ZEALOT,
AbilityId.GATEWAYTRAIN_STALKER: AbilityId.WARPGATETRAIN_STALKER,
AbilityId.GATEWAYTRAIN_HIGHTEMPLAR: AbilityId.WARPGATETRAIN_HIGHTEMPLAR,
AbilityId.GATEWAYTRAIN_DARKTEMPLAR: AbilityId.WARPGATETRAIN_DARKTEMPLAR,
AbilityId.GATEWAYTRAIN_SENTRY: AbilityId.WARPGATETRAIN_SENTRY,
AbilityId.TRAIN_ADEPT: AbilityId.TRAINWARP_ADEPT
}
race_gas: Dict[Race, UnitTypeId] = {
Race.Protoss: UnitTypeId.ASSIMILATOR,
Race.Terran: UnitTypeId.REFINERY,
Race.Zerg: UnitTypeId.EXTRACTOR
}
|
/sc2-env-0.11.1.2.tar.gz/sc2-env-0.11.1.2/sc2Env/data.py
| 0.617628 | 0.21892 |
data.py
|
pypi
|
import random
from .unit import Unit
from .ids.unit_typeid import UnitTypeId
from .position import Point2, Point3
from typing import List, Dict, Set, Tuple, Any, Optional, Union # mypy type checking
class Units(list):
"""A collection for units. Makes it easy to select units by selectors."""
@classmethod
def from_proto(cls, units, game_data):
return cls((Unit(u, game_data) for u in units), game_data)
def __init__(self, units, game_data):
super().__init__(units)
self.game_data = game_data
def __call__(self, *args, **kwargs):
return UnitSelection(self, *args, **kwargs)
def select(self, *args, **kwargs):
return UnitSelection(self, *args, **kwargs)
def copy(self):
return self.subgroup(self)
def __or__(self, other: "Units") -> "Units":
if self is None:
return other
if other is None:
return self
tags = {unit.tag for unit in self}
units = self + [unit for unit in other if unit.tag not in tags]
return Units(units, self.game_data)
def __and__(self, other: "Units") -> "Units":
if self is None:
return other
if other is None:
return self
tags = {unit.tag for unit in self}
units = [unit for unit in other if unit.tag in tags]
return Units(units, self.game_data)
def __sub__(self, other: "Units") -> "Units":
if self is None:
return Units([], self.game_data)
if other is None:
return self
tags = {unit.tag for unit in other}
units = [unit for unit in self if unit.tag not in tags]
return Units(units, self.game_data)
def __hash__(self):
return hash(unit.tag for unit in self)
@property
def amount(self) -> int:
return len(self)
@property
def empty(self) -> bool:
return not bool(self)
@property
def exists(self) -> bool:
return bool(self)
def find_by_tag(self, tag) -> Optional[Unit]:
for unit in self:
if unit.tag == tag:
return unit
return None
def by_tag(self, tag):
unit = self.find_by_tag(tag)
if unit is None:
raise KeyError("Unit not found")
return unit
@property
def first(self) -> Unit:
assert self
return self[0]
def take(self, n: int, require_all: bool = True) -> "Units":
assert (not require_all) or len(self) >= n
return self[:n]
@property
def random(self) -> Unit:
assert self.exists
return random.choice(self)
def random_or(self, other: any) -> Unit:
if self.exists:
return random.choice(self)
else:
return other
def random_group_of(self, n):
# TODO allow n > amount with n = min(n,amount)?
assert 0 <= n <= self.amount
if n == 0:
return self.subgroup([])
elif self.amount == n:
return self
else:
return self.subgroup(random.sample(self, n))
def in_attack_range_of(self, unit: Unit, bonus_distance: Union[int, float] = 0) -> "Units":
""" Filters units that are in attack range of the unit in parameter """
return self.filter(lambda x: unit.target_in_range(x, bonus_distance=bonus_distance))
def closest_distance_to(self, position: Union[Unit, Point2, Point3]) -> Union[int, float]:
""" Returns the distance between the closest unit from this group to the target unit """
assert self.exists
if isinstance(position, Unit):
position = position.position
return position.distance_to_closest(
[u.position for u in self]
) # Note: list comprehension creation is 0-5% faster than set comprehension
def furthest_distance_to(self, position: Union[Unit, Point2, Point3]) -> Union[int, float]:
""" Returns the distance between the furthest unit from this group to the target unit """
assert self.exists
if isinstance(position, Unit):
position = position.position
return position.distance_to_furthest([u.position for u in self])
def closest_to(self, position: Union[Unit, Point2, Point3]) -> Unit:
assert self.exists
if isinstance(position, Unit):
position = position.position
return position.closest(self)
def furthest_to(self, position: Union[Unit, Point2, Point3]) -> Unit:
assert self.exists
if isinstance(position, Unit):
position = position.position
return position.furthest(self)
def closer_than(self, distance: Union[int, float], position: Union[Unit, Point2, Point3]) -> "Units":
if isinstance(position, Unit):
position = position.position
distance_squared = distance ** 2
return self.filter(lambda unit: unit.position._distance_squared(position.to2) < distance_squared)
def further_than(self, distance: Union[int, float], position: Union[Unit, Point2, Point3]) -> "Units":
if isinstance(position, Unit):
position = position.position
distance_squared = distance ** 2
return self.filter(lambda unit: unit.position._distance_squared(position.to2) > distance_squared)
def subgroup(self, units):
return Units(list(units), self.game_data)
def filter(self, pred: callable) -> "Units":
return self.subgroup(filter(pred, self))
def sorted(self, keyfn: callable, reverse: bool = False) -> "Units":
if len(self) in {0, 1}:
return self
return self.subgroup(sorted(self, key=keyfn, reverse=reverse))
def sorted_by_distance_to(self, position: Union[Unit, Point2], reverse: bool = False) -> "Units":
""" This function should be a bit faster than using units.sorted(keyfn=lambda u: u.distance_to(position)) """
if len(self) in [0, 1]:
return self
position = position.position
return self.sorted(keyfn=lambda unit: unit.position._distance_squared(position), reverse=reverse)
def tags_in(self, other: Union[Set[int], List[int], Dict[int, Any]]) -> "Units":
""" Filters all units that have their tags in the 'other' set/list/dict """
# example: self.units(QUEEN).tags_in(self.queen_tags_assigned_to_do_injects)
if isinstance(other, list):
other = set(other)
return self.filter(lambda unit: unit.tag in other)
def tags_not_in(self, other: Union[Set[int], List[int], Dict[int, Any]]) -> "Units":
""" Filters all units that have their tags not in the 'other' set/list/dict """
# example: self.units(QUEEN).tags_not_in(self.queen_tags_assigned_to_do_injects)
if isinstance(other, list):
other = set(other)
return self.filter(lambda unit: unit.tag not in other)
def of_type(self, other: Union[UnitTypeId, Set[UnitTypeId], List[UnitTypeId], Dict[UnitTypeId, Any]]) -> "Units":
""" Filters all units that are of a specific type """
# example: self.units.of_type([ZERGLING, ROACH, HYDRALISK, BROODLORD])
if isinstance(other, UnitTypeId):
other = {other}
if isinstance(other, list):
other = set(other)
return self.filter(lambda unit: unit.type_id in other)
def exclude_type(
self, other: Union[UnitTypeId, Set[UnitTypeId], List[UnitTypeId], Dict[UnitTypeId, Any]]
) -> "Units":
""" Filters all units that are not of a specific type """
# example: self.known_enemy_units.exclude_type([OVERLORD])
if isinstance(other, UnitTypeId):
other = {other}
if isinstance(other, list):
other = set(other)
return self.filter(lambda unit: unit.type_id not in other)
def same_tech(self, other: Union[UnitTypeId, Set[UnitTypeId], List[UnitTypeId], Dict[UnitTypeId, Any]]) -> "Units":
""" Usage:
'self.units.same_tech(UnitTypeId.COMMANDCENTER)' or 'self.units.same_tech(UnitTypeId.ORBITALCOMMAND)'
returns all CommandCenter, CommandCenterFlying, OrbitalCommand, OrbitalCommandFlying, PlanetaryFortress
This also works with a set/list/dict parameter, e.g. 'self.units.same_tech({UnitTypeId.COMMANDCENTER, UnitTypeId.SUPPLYDEPOT})'
Untested: This should return the equivalents for Hatchery, WarpPrism, Observer, Overseer, SupplyDepot and others
"""
if isinstance(other, UnitTypeId):
other = {other}
tech_alias_types = set(other)
for unitType in other:
tech_alias = self.game_data.units[unitType.value].tech_alias
if tech_alias:
for same in tech_alias:
tech_alias_types.add(same)
return self.filter(
lambda unit: unit.type_id in tech_alias_types
or unit._type_data.tech_alias is not None
and any(same in tech_alias_types for same in unit._type_data.tech_alias)
)
def same_unit(self, other: Union[UnitTypeId, Set[UnitTypeId], List[UnitTypeId], Dict[UnitTypeId, Any]]) -> "Units":
""" Usage:
'self.units.same_tech(UnitTypeId.COMMANDCENTER)'
returns CommandCenter and CommandCenterFlying,
'self.units.same_tech(UnitTypeId.ORBITALCOMMAND)'
returns OrbitalCommand and OrbitalCommandFlying
This also works with a set/list/dict parameter, e.g. 'self.units.same_tech({UnitTypeId.COMMANDCENTER, UnitTypeId.SUPPLYDEPOT})'
Untested: This should return the equivalents for WarpPrism, Observer, Overseer, SupplyDepot and others
"""
if isinstance(other, UnitTypeId):
other = {other}
unit_alias_types = set(other)
for unitType in other:
unit_alias = self.game_data.units[unitType.value].unit_alias
if unit_alias:
unit_alias_types.add(unit_alias)
return self.filter(
lambda unit: unit.type_id in unit_alias_types
or unit._type_data.unit_alias is not None
and unit._type_data.unit_alias in unit_alias_types
)
@property
def center(self) -> Point2:
""" Returns the central point of all units in this list """
assert self
pos = Point2(
(
sum([unit.position.x for unit in self]) / self.amount,
sum([unit.position.y for unit in self]) / self.amount,
)
)
return pos
@property
def selected(self) -> "Units":
return self.filter(lambda unit: unit.is_selected)
@property
def tags(self) -> Set[int]:
return {unit.tag for unit in self}
@property
def ready(self) -> "Units":
return self.filter(lambda unit: unit.is_ready)
@property
def not_ready(self) -> "Units":
return self.filter(lambda unit: not unit.is_ready)
@property
def noqueue(self) -> "Units":
return self.filter(lambda unit: unit.noqueue)
@property
def idle(self) -> "Units":
return self.filter(lambda unit: unit.is_idle)
@property
def owned(self) -> "Units":
return self.filter(lambda unit: unit.is_mine)
@property
def enemy(self) -> "Units":
return self.filter(lambda unit: unit.is_enemy)
@property
def flying(self) -> "Units":
return self.filter(lambda unit: unit.is_flying)
@property
def not_flying(self) -> "Units":
return self.filter(lambda unit: not unit.is_flying)
@property
def structure(self) -> "Units":
return self.filter(lambda unit: unit.is_structure)
@property
def not_structure(self) -> "Units":
return self.filter(lambda unit: not unit.is_structure)
@property
def gathering(self) -> "Units":
return self.filter(lambda unit: unit.is_gathering)
@property
def returning(self) -> "Units":
return self.filter(lambda unit: unit.is_returning)
@property
def collecting(self) -> "Units":
return self.filter(lambda unit: unit.is_collecting)
@property
def visible(self) -> "Units":
return self.filter(lambda unit: unit.is_visible)
@property
def mineral_field(self) -> "Units":
return self.filter(lambda unit: unit.is_mineral_field)
@property
def vespene_geyser(self) -> "Units":
return self.filter(lambda unit: unit.is_vespene_geyser)
@property
def prefer_idle(self) -> "Units":
return self.sorted(lambda unit: unit.is_idle, reverse=True)
def prefer_close_to(self, p: Union[Unit, Point2, Point3]) -> "Units":
# TODO redundant?
return self.sorted_by_distance_to(p)
class UnitSelection(Units):
def __init__(self, parent, unit_type_id=None):
assert unit_type_id is None or isinstance(unit_type_id, (UnitTypeId, set))
if isinstance(unit_type_id, set):
assert all(isinstance(t, UnitTypeId) for t in unit_type_id)
self.unit_type_id = unit_type_id
super().__init__([u for u in parent if self.matches(u)], parent.game_data)
def matches(self, unit):
if self.unit_type_id is None:
# empty selector matches everything
return True
elif isinstance(self.unit_type_id, set):
return unit.type_id in self.unit_type_id
else:
return self.unit_type_id == unit.type_id
|
/sc2-env-0.11.1.2.tar.gz/sc2-env-0.11.1.2/sc2Env/units.py
| 0.851429 | 0.448245 |
units.py
|
pypi
|
from .data import PlayerType, Race, Difficulty
from .bot_ai import BotAI
class AbstractPlayer:
def __init__(self, p_type, race=None, name=None, difficulty=None):
assert isinstance(p_type, PlayerType)
assert name is None or isinstance(name, str)
self.name = name
if p_type == PlayerType.Computer:
assert isinstance(difficulty, Difficulty)
elif p_type == PlayerType.Observer:
assert race is None
assert difficulty is None
else:
assert isinstance(race, Race)
assert difficulty is None
self.type = p_type
if race is not None:
self.race = race
if p_type == PlayerType.Computer:
self.difficulty = difficulty
class Human(AbstractPlayer):
def __init__(self, race, name=None):
super().__init__(PlayerType.Participant, race, name=name)
def __str__(self):
if self.name is not None:
return f"Human({self.race}, name={self.name !r})"
else:
return f"Human({self.race})"
class Bot(AbstractPlayer):
def __init__(self, race, ai, name=None):
"""
AI can be None if this player object is just used to inform the
server about player types.
"""
assert isinstance(ai, BotAI) or ai is None
super().__init__(PlayerType.Participant, race, name=name)
self.ai = ai
def __str__(self):
if self.name is not None:
return f"Bot({self.race}, {self.ai}, name={self.name !r})"
else:
return f"Bot({self.race}, {self.ai})"
class Computer(AbstractPlayer):
def __init__(self, race, difficulty=Difficulty.Easy):
super().__init__(PlayerType.Computer, race, difficulty=difficulty)
def __str__(self):
return f"Computer({self.race}, {self.difficulty})"
class Observer(AbstractPlayer):
def __init__(self):
super().__init__(PlayerType.Observer)
def __str__(self):
return f"Observer()"
class Player(AbstractPlayer):
@classmethod
def from_proto(cls, proto):
if PlayerType(proto.type) == PlayerType.Observer:
return cls(proto.player_id, PlayerType(proto.type), None, None, None)
return cls(
proto.player_id,
PlayerType(proto.type),
Race(proto.race_requested),
Difficulty(proto.difficulty) if proto.HasField("difficulty") else None,
Race(proto.race_actual) if proto.HasField("race_actual") else None,
proto.player_name if proto.HasField("player_name") else None,
)
def __init__(self, player_id, type, requested_race, difficulty=None, actual_race=None, name=None):
super().__init__(type, requested_race, difficulty=difficulty, name=name)
self.id: int = player_id
self.actual_race: Race = actual_race
|
/sc2-env-0.11.1.2.tar.gz/sc2-env-0.11.1.2/sc2Env/player.py
| 0.717804 | 0.332351 |
player.py
|
pypi
|
from bisect import bisect_left
from functools import lru_cache, reduce
from typing import List, Dict, Set, Tuple, Any, Optional, Union # mypy type checking
from .data import Attribute, Race
from .unit_command import UnitCommand
from .ids.unit_typeid import UnitTypeId
from .ids.ability_id import AbilityId
from .constants import ZERGLING
FREE_MORPH_ABILITY_CATEGORIES = [
"Lower", "Raise", # SUPPLYDEPOT
"Land", "Lift", # Flying buildings
]
def split_camel_case(text) -> list:
"""Splits words from CamelCase text."""
return list(reduce(
lambda a, b: (a + [b] if b.isupper() else a[:-1] + [a[-1] + b]),
text,
[]
))
class GameData:
def __init__(self, data):
ids = set(a.value for a in AbilityId if a.value != 0)
self.abilities = {a.ability_id: AbilityData(self, a) for a in data.abilities if a.ability_id in ids}
self.units = {u.unit_id: UnitTypeData(self, u) for u in data.units if u.available}
self.upgrades = {u.upgrade_id: UpgradeData(self, u) for u in data.upgrades}
self.unit_types: Dict[int, UnitTypeId] = {}
@lru_cache(maxsize=256)
def calculate_ability_cost(self, ability) -> "Cost":
if isinstance(ability, AbilityId):
ability = self.abilities[ability.value]
elif isinstance(ability, UnitCommand):
ability = self.abilities[ability.ability.value]
assert isinstance(ability, AbilityData), f"C: {ability}"
for unit in self.units.values():
if unit.creation_ability is None:
continue
if not AbilityData.id_exists(unit.creation_ability.id.value):
continue
if unit.creation_ability.is_free_morph:
continue
if unit.creation_ability == ability:
if unit.id == ZERGLING:
# HARD CODED: zerglings are generated in pairs
return Cost(
unit.cost.minerals * 2,
unit.cost.vespene * 2,
unit.cost.time
)
# Correction for morphing units, e.g. orbital would return 550/0 instead of actual 150/0
morph_cost = unit.morph_cost
if morph_cost: # can be None
return morph_cost
# Correction for zerg structures without morph: Extractor would return 75 instead of actual 25
return unit.cost_zerg_corrected
for upgrade in self.upgrades.values():
if upgrade.research_ability == ability:
return upgrade.cost
return Cost(0, 0)
class AbilityData:
ability_ids: List[int] = [] # sorted list
for ability_id in AbilityId: # 1000 items Enum is slow
ability_ids.append(ability_id.value)
ability_ids.remove(0)
ability_ids.sort()
@classmethod
def id_exists(cls, ability_id):
assert isinstance(ability_id, int), f"Wrong type: {ability_id} is not int"
if ability_id == 0:
return False
i = bisect_left(cls.ability_ids, ability_id) # quick binary search
return i != len(cls.ability_ids) and cls.ability_ids[i] == ability_id
def __init__(self, game_data, proto):
self._game_data = game_data
self._proto = proto
assert self.id != 0
def __repr__(self) -> str:
return f"AbilityData(name={self._proto.button_name})"
@property
def id(self) -> AbilityId:
if self._proto.remaps_to_ability_id:
return AbilityId(self._proto.remaps_to_ability_id)
return AbilityId(self._proto.ability_id)
@property
def link_name(self) -> str:
""" For Stimpack this returns 'BarracksTechLabResearch' """
return self._proto.link_name
@property
def button_name(self) -> str:
""" For Stimpack this returns 'Stimpack' """
return self._proto.button_name
@property
def friendly_name(self) -> str:
""" For Stimpack this returns 'Research Stimpack' """
return self._proto.friendly_name
@property
def is_free_morph(self) -> bool:
parts = split_camel_case(self._proto.link_name)
for p in parts:
if p in FREE_MORPH_ABILITY_CATEGORIES:
return True
return False
@property
def cost(self) -> "Cost":
return self._game_data.calculate_ability_cost(self.id)
class UnitTypeData:
def __init__(self, game_data, proto):
self._game_data = game_data
self._proto = proto
def __repr__(self) -> str:
return f"UnitTypeData(name={self.name})"
@property
def id(self) -> UnitTypeId:
return UnitTypeId(self._proto.unit_id)
@property
def name(self) -> str:
return self._proto.name
@property
def creation_ability(self) -> AbilityData:
if self._proto.ability_id == 0:
return None
if self._proto.ability_id not in self._game_data.abilities:
return None
return self._game_data.abilities[self._proto.ability_id]
@property
def attributes(self) -> List[Attribute]:
return self._proto.attributes
def has_attribute(self, attr) -> bool:
assert isinstance(attr, Attribute)
return attr in self.attributes
@property
def has_minerals(self) -> bool:
return self._proto.has_minerals
@property
def has_vespene(self) -> bool:
return self._proto.has_vespene
@property
def cargo_size(self) -> int:
""" How much cargo this unit uses up in cargo_space """
return self._proto.cargo_size
@property
def tech_requirement(self) -> Optional[UnitTypeId]:
""" Tech-building requirement of buildings - may work for units but unreliably """
if self._proto.tech_requirement == 0:
return None
if self._proto.tech_requirement not in self._game_data.units:
return None
return UnitTypeId(self._proto.tech_requirement)
@property
def tech_alias(self) -> Optional[List[UnitTypeId]]:
""" Building tech equality, e.g. OrbitalCommand is the same as CommandCenter """
""" Building tech equality, e.g. Hive is the same as Lair and Hatchery """
return_list = []
for tech_alias in self._proto.tech_alias:
if tech_alias in self._game_data.units:
return_list.append(UnitTypeId(tech_alias))
""" For Hive, this returns [UnitTypeId.Hatchery, UnitTypeId.Lair] """
""" For SCV, this returns None """
if return_list:
return return_list
return None
@property
def unit_alias(self) -> Optional[UnitTypeId]:
""" Building type equality, e.g. FlyingOrbitalCommand is the same as OrbitalCommand """
if self._proto.unit_alias == 0:
return None
if self._proto.unit_alias not in self._game_data.units:
return None
""" For flying OrbitalCommand, this returns UnitTypeId.OrbitalCommand """
return UnitTypeId(self._proto.unit_alias)
@property
def race(self) -> Race:
return Race(self._proto.race)
@property
def cost(self) -> "Cost":
return Cost(
self._proto.mineral_cost,
self._proto.vespene_cost,
self._proto.build_time
)
@property
def cost_zerg_corrected(self) -> "Cost":
""" This returns 25 for extractor and 200 for spawning pool instead of 75 and 250 respectively """
if self.race == Race.Zerg and Attribute.Structure.value in self.attributes:
# a = self._game_data.units(UnitTypeId.ZERGLING)
# print(a)
# print(vars(a))
return Cost(
self._proto.mineral_cost - 50,
self._proto.vespene_cost,
self._proto.build_time
)
else:
return self.cost
@property
def morph_cost(self) -> Optional["Cost"]:
""" This returns 150 minerals for OrbitalCommand instead of 550 """
# Fix for BARRACKSREACTOR which has tech alias [REACTOR] which has (0, 0) cost
if self.tech_alias is None or self.tech_alias[0] in {UnitTypeId.TECHLAB, UnitTypeId.REACTOR}:
return None
# Morphing a HIVE would have HATCHERY and LAIR in the tech alias - now subtract HIVE cost from LAIR cost instead of from HATCHERY cost
tech_alias_cost_minerals = max([self._game_data.units[tech_alias.value].cost.minerals for tech_alias in self.tech_alias])
tech_alias_cost_vespene = max([self._game_data.units[tech_alias.value].cost.vespene for tech_alias in self.tech_alias])
return Cost(
self._proto.mineral_cost - tech_alias_cost_minerals,
self._proto.vespene_cost - tech_alias_cost_vespene,
self._proto.build_time
)
class UpgradeData:
def __init__(self, game_data, proto):
self._game_data = game_data
self._proto = proto
def __repr__(self):
return f"UpgradeData({self.name} - research ability: {self.research_ability}, {self.cost})"
@property
def name(self) -> str:
return self._proto.name
@property
def research_ability(self) -> Optional[AbilityData]:
if self._proto.ability_id == 0:
return None
if self._proto.ability_id not in self._game_data.abilities:
return None
return self._game_data.abilities[self._proto.ability_id]
@property
def cost(self) -> "Cost":
return Cost(
self._proto.mineral_cost,
self._proto.vespene_cost,
self._proto.research_time
)
class Cost:
def __init__(self, minerals, vespene, time=None):
self.minerals = minerals
self.vespene = vespene
self.time = time
def __repr__(self) -> str:
return f"Cost({self.minerals}, {self.vespene})"
def __eq__(self, other) -> bool:
return self.minerals == other.minerals and self.vespene == other.vespene
def __ne__(self, other) -> bool:
return self.minerals != other.minerals or self.vespene != other.vespene
|
/sc2-env-0.11.1.2.tar.gz/sc2-env-0.11.1.2/sc2Env/game_data.py
| 0.895981 | 0.450239 |
game_data.py
|
pypi
|
from typing import Any, Dict, List, Optional, Set, Tuple, Union # mypy type checking
from sc2.ids.buff_id import BuffId
from .cache import property_mutable_cache, property_immutable_cache
from . import unit_command
from .data import Alliance, Attribute, CloakState, DisplayType, Race, TargetType, warpgate_abilities
from .game_data import GameData
from .ids.ability_id import AbilityId
from .ids.unit_typeid import UnitTypeId
from .position import Point2, Point3
class PassengerUnit:
def __init__(self, proto_data, game_data):
assert isinstance(game_data, GameData)
self._proto = proto_data
self._game_data = game_data
self.cache = {}
def __repr__(self):
""" Will return string of this form: Unit(name='SCV', tag=4396941328) """
return f"{self.__class__.__name__}(name={self.name !r}, tag={self.tag})"
@property
def type_id(self) -> UnitTypeId:
""" UnitTypeId found in sc2/ids/unit_typeid
Caches all type_ids of the same unit type"""
unit_type = self._proto.unit_type
if unit_type not in self._game_data.unit_types:
self._game_data.unit_types[unit_type] = UnitTypeId(unit_type)
return self._game_data.unit_types[unit_type]
@property_immutable_cache
def _type_data(self) -> "UnitTypeData":
return self._game_data.units[self._proto.unit_type]
@property_immutable_cache
def name(self) -> str:
return self._type_data.name
@property_immutable_cache
def race(self) -> Race:
return Race(self._type_data._proto.race)
@property_immutable_cache
def tag(self) -> int:
return self._proto.tag
@property_immutable_cache
def is_structure(self) -> bool:
return Attribute.Structure.value in self._type_data.attributes
@property_immutable_cache
def is_light(self) -> bool:
return Attribute.Light.value in self._type_data.attributes
@property_immutable_cache
def is_armored(self) -> bool:
return Attribute.Armored.value in self._type_data.attributes
@property_immutable_cache
def is_biological(self) -> bool:
return Attribute.Biological.value in self._type_data.attributes
@property_immutable_cache
def is_mechanical(self) -> bool:
return Attribute.Mechanical.value in self._type_data.attributes
@property_immutable_cache
def is_robotic(self) -> bool:
return Attribute.Robotic.value in self._type_data.attributes
@property_immutable_cache
def is_massive(self) -> bool:
return Attribute.Massive.value in self._type_data.attributes
@property_immutable_cache
def is_psionic(self) -> bool:
return Attribute.Psionic.value in self._type_data.attributes
@property_immutable_cache
def cargo_size(self) -> Union[float, int]:
""" How much cargo this unit uses up in cargo_space """
return self._type_data.cargo_size
@property_immutable_cache
def _weapons(self):
if hasattr(self._type_data._proto, "weapons"):
return self._type_data._proto.weapons
return False
@property_immutable_cache
def can_attack(self) -> bool:
""" Can attack at all"""
return bool(self._weapons)
@property_immutable_cache
def can_attack_ground(self) -> bool:
if self._weapons:
weapon = next(
(weapon for weapon in self._weapons if weapon.type in {TargetType.Ground.value, TargetType.Any.value}),
None,
)
return weapon is not None
return False
@property_immutable_cache
def ground_dps(self) -> Union[int, float]:
""" Does not include upgrades """
if self._weapons:
weapon = next(
(weapon for weapon in self._weapons if weapon.type in {TargetType.Ground.value, TargetType.Any.value}),
None,
)
if weapon:
return (weapon.damage * weapon.attacks) / weapon.speed
return 0
@property_immutable_cache
def ground_range(self) -> Union[int, float]:
""" Does not include upgrades """
if self._weapons:
weapon = next(
(weapon for weapon in self._weapons if weapon.type in {TargetType.Ground.value, TargetType.Any.value}),
None,
)
if weapon:
return weapon.range
return 0
@property_immutable_cache
def can_attack_air(self) -> bool:
""" Does not include upgrades """
if self._weapons:
weapon = next(
(weapon for weapon in self._weapons if weapon.type in {TargetType.Air.value, TargetType.Any.value}),
None,
)
return weapon is not None
return False
@property_immutable_cache
def air_dps(self) -> Union[int, float]:
""" Does not include upgrades """
if self._weapons:
weapon = next(
(weapon for weapon in self._weapons if weapon.type in {TargetType.Air.value, TargetType.Any.value}),
None,
)
if weapon:
return (weapon.damage * weapon.attacks) / weapon.speed
return 0
@property_immutable_cache
def air_range(self) -> Union[int, float]:
""" Does not include upgrades """
if self._weapons:
weapon = next(
(weapon for weapon in self._weapons if weapon.type in {TargetType.Air.value, TargetType.Any.value}),
None,
)
if weapon:
return weapon.range
return 0
@property_immutable_cache
def bonus_damage(self):
""" Returns a tuple of form 'bonus damage, armor type' if unit does bonus damage against armor type
Light = 1; Armored = 2; Biological = 3; Mechanical = 4; Robotic = 5; Psionic = 6; Massive = 7;
Structure = 8; Hover = 9; Heroic = 10; Summoned = 11 """
# TODO Consider unit with ability attacks like Oracle, Thor, Baneling
if self._weapons:
for weapon in self._weapons:
if weapon.damage_bonus:
b = weapon.damage_bonus[0]
return b.bonus, b.attribute
@property_immutable_cache
def armor(self) -> Union[int, float]:
""" Does not include upgrades """
return self._type_data._proto.armor
@property_immutable_cache
def sight_range(self) -> Union[int, float]:
return self._type_data._proto.sight_range
@property_immutable_cache
def movement_speed(self) -> Union[int, float]:
# TODO INCLUDE BUFFS AND DEBUFFS
return self._type_data._proto.movement_speed
@property_immutable_cache
def health(self) -> Union[int, float]:
""" Does not include shields """
return self._proto.health
@property_immutable_cache
def health_max(self) -> Union[int, float]:
""" Does not include shields """
return self._proto.health_max
@property_immutable_cache
def health_percentage(self) -> Union[int, float]:
""" Does not include shields """
if self._proto.health_max == 0:
return 0
return self._proto.health / self._proto.health_max
@property_immutable_cache
def shield(self) -> Union[int, float]:
return self._proto.shield
@property_immutable_cache
def shield_max(self) -> Union[int, float]:
return self._proto.shield_max
@property_immutable_cache
def shield_percentage(self) -> Union[int, float]:
if self._proto.shield_max == 0:
return 0
return self._proto.shield / self._proto.shield_max
@property_immutable_cache
def energy(self) -> Union[int, float]:
return self._proto.energy
@property_immutable_cache
def energy_max(self) -> Union[int, float]:
return self._proto.energy_max
@property_immutable_cache
def energy_percentage(self) -> Union[int, float]:
if self._proto.energy_max == 0:
return 0
return self._proto.energy / self._proto.energy_max
class Unit(PassengerUnit):
@property_immutable_cache
def is_snapshot(self) -> bool:
return self._proto.display_type == DisplayType.Snapshot.value
@property_immutable_cache
def is_visible(self) -> bool:
return self._proto.display_type == DisplayType.Visible.value
@property_immutable_cache
def alliance(self) -> Alliance:
return self._proto.alliance
@property_immutable_cache
def is_mine(self) -> bool:
return self._proto.alliance == Alliance.Self.value
@property_immutable_cache
def is_enemy(self) -> bool:
return self._proto.alliance == Alliance.Enemy.value
@property_immutable_cache
def owner_id(self) -> int:
return self._proto.owner
@property_immutable_cache
def position(self) -> Point2:
return Point2((self._proto.pos.x, self._proto.pos.y))
@property_immutable_cache
def position3d(self) -> Point3:
"""3d position of the unit."""
return Point3.from_proto(self._proto.pos)
def distance_to(self, p: Union["Unit", Point2, Point3], bot: "BotAI" = None) -> Union[int, float]:
""" Using the 2d distance between self and p. To calculate the 3d distance,
use unit.position3d.distance_to(p) """
if bot and isinstance(p, Unit):
index = bot.distances_tag_dict
return (bot.unit_distances_dict[index[self.tag]][index[p.tag]]) ** 0.5
return self.position.distance_to_point2(p.position)
@property_immutable_cache
def facing(self) -> Union[int, float]:
""" Returns float in range [0,2p). 0 means in direction of x axis."""
return self._proto.facing
@property_immutable_cache
def radius(self) -> Union[int, float]:
""" Half of unit size. See https://liquipedia.net/starcraft2/Unit_Statistics_(Legacy_of_the_Void) """
return self._proto.radius
@property_immutable_cache
def detect_range(self) -> Union[int, float]:
return self._proto.detect_range
@property_immutable_cache
def radar_range(self) -> Union[int, float]:
return self._proto.radar_range
@property_immutable_cache
def build_progress(self) -> Union[int, float]:
""" Returns completion in range [0,1]."""
return self._proto.build_progress
@property_immutable_cache
def is_ready(self) -> bool:
return self.build_progress == 1
@property_immutable_cache
def cloak(self) -> CloakState:
""" Returns cloak state.
See https://github.com/Blizzard/s2client-api/blob/d9ba0a33d6ce9d233c2a4ee988360c188fbe9dbf/include/sc2api/sc2_unit.h#L95 """
return self._proto.cloak
@property_immutable_cache
def is_cloaked(self) -> bool:
return self._proto.cloak in {CloakState.Cloaked.value, CloakState.CloakedDetected.value}
@property_immutable_cache
def is_blip(self) -> bool:
""" Detected by sensor tower. """
return self._proto.is_blip
@property_immutable_cache
def is_powered(self) -> bool:
""" Is powered by a pylon nearby. """
return self._proto.is_powered
@property_immutable_cache
def is_burrowed(self) -> bool:
return self._proto.is_burrowed
@property_immutable_cache
def is_flying(self) -> bool:
return self._proto.is_flying
@property_immutable_cache
def is_psionic(self) -> bool:
return Attribute.Psionic.value in self._type_data.attributes
@property_immutable_cache
def is_mineral_field(self) -> bool:
return self._type_data.has_minerals
@property_immutable_cache
def is_vespene_geyser(self) -> bool:
return self._type_data.has_vespene
@property
def tech_alias(self) -> Optional[List[UnitTypeId]]:
""" Building tech equality, e.g. OrbitalCommand is the same as CommandCenter
For Hive, this returns [UnitTypeId.Hatchery, UnitTypeId.Lair]
For SCV, this returns None """
return self._type_data.tech_alias
@property_immutable_cache
def unit_alias(self) -> Optional[UnitTypeId]:
""" Building type equality, e.g. FlyingOrbitalCommand is the same as OrbitalCommand
For flying OrbitalCommand, this returns UnitTypeId.OrbitalCommand
For SCV, this returns None """
return self._type_data.unit_alias
@property_immutable_cache
def mineral_contents(self) -> int:
""" How many minerals a mineral field has left to mine """
return self._proto.mineral_contents
@property_immutable_cache
def vespene_contents(self) -> int:
""" How much gas is remaining in a geyser """
return self._proto.vespene_contents
@property_immutable_cache
def has_vespene(self) -> bool:
""" Checks if a geyser has any gas remaining (can't build extractors on empty geysers), useful for lategame """
return bool(self._proto.vespene_contents)
@property_immutable_cache
def weapon_cooldown(self) -> Union[int, float]:
""" Returns some time (more than game loops) until the unit can fire again,
returns -1 for units that can't attack.
Usage:
if unit.weapon_cooldown == 0:
await self.do(unit.attack(target))
elif unit.weapon_cooldown < 0:
await self.do(unit.move(closest_allied_unit_because_cant_attack))
else:
await self.do(unit.move(retreatPosition))
"""
if self.can_attack:
return self._proto.weapon_cooldown
return -1
@property_immutable_cache
def has_cargo(self) -> bool:
""" If this unit has units loaded """
return bool(self._proto.cargo_space_taken)
@property_immutable_cache
def cargo_used(self) -> Union[float, int]:
""" How much cargo space is used (some units take up more than 1 space) """
return self._proto.cargo_space_taken
@property_immutable_cache
def cargo_max(self) -> Union[float, int]:
""" How much cargo space is totally available - CC: 5, Bunker: 4, Medivac: 8
and Bunker can only load infantry, CC only SCVs """
return self._proto.cargo_space_max
@property_mutable_cache
def passengers(self) -> Set["PassengerUnit"]:
""" Units inside a Bunker, CommandCenter, Nydus, Medivac, WarpPrism, Overlord """
return {PassengerUnit(unit, self._game_data) for unit in self._proto.passengers}
@property_mutable_cache
def passengers_tags(self) -> Set[int]:
return {unit.tag for unit in self._proto.passengers}
def target_in_range(self, target: "Unit", bonus_distance: Union[int, float] = 0) -> bool:
""" Includes the target's radius when calculating distance to target """
if self.can_attack_ground and not target.is_flying:
unit_attack_range = self.ground_range
elif self.can_attack_air and (target.is_flying or target.type_id == UnitTypeId.COLOSSUS):
unit_attack_range = self.air_range
else:
unit_attack_range = -1
return (
self.position._distance_squared(target.position)
<= (self.radius + target.radius + unit_attack_range - bonus_distance) ** 2
)
@property_immutable_cache
def is_carrying_minerals(self) -> bool:
""" Checks if a worker or MULE is carrying (gold-)minerals. """
return any(
buff.value in self._proto.buff_ids
for buff in {BuffId.CARRYMINERALFIELDMINERALS, BuffId.CARRYHIGHYIELDMINERALFIELDMINERALS}
)
@property_immutable_cache
def is_carrying_vespene(self) -> bool:
""" Checks if a worker is carrying vespene. """
return any(
buff.value in self._proto.buff_ids
for buff in {
BuffId.CARRYHARVESTABLEVESPENEGEYSERGAS,
BuffId.CARRYHARVESTABLEVESPENEGEYSERGASPROTOSS,
BuffId.CARRYHARVESTABLEVESPENEGEYSERGASZERG,
}
)
@property_immutable_cache
def is_selected(self) -> bool:
return self._proto.is_selected
@property_mutable_cache
def orders(self) -> List["UnitOrder"]:
return [UnitOrder.from_proto(o, self._game_data) for o in self._proto.orders]
@property_immutable_cache
def noqueue(self) -> bool:
return not self.orders
@property_immutable_cache
def is_moving(self) -> bool:
return self.orders and self.orders[0].ability.id is AbilityId.MOVE
@property_immutable_cache
def is_attacking(self) -> bool:
return self.orders and self.orders[0].ability.id in {
AbilityId.ATTACK,
AbilityId.ATTACK_ATTACK,
AbilityId.ATTACK_ATTACKTOWARDS,
AbilityId.ATTACK_ATTACKBARRAGE,
AbilityId.SCAN_MOVE,
}
@property_immutable_cache
def is_patrolling(self) -> bool:
""" Checks if a unit is patrolling. """
return self.orders and self.orders[0].ability.id is AbilityId.PATROL
@property_immutable_cache
def is_gathering(self) -> bool:
""" Checks if a unit is on its way to a mineral field / vespene geyser to mine. """
return self.orders and self.orders[0].ability.id is AbilityId.HARVEST_GATHER
@property_immutable_cache
def is_returning(self) -> bool:
""" Checks if a unit is returning from mineral field / vespene geyser to deliver resources to townhall. """
return self.orders and self.orders[0].ability.id is AbilityId.HARVEST_RETURN
@property_immutable_cache
def is_collecting(self) -> bool:
""" Combines the two properties above. """
return self.orders and self.orders[0].ability.id in {AbilityId.HARVEST_GATHER, AbilityId.HARVEST_RETURN}
@property_immutable_cache
def is_constructing_scv(self) -> bool:
""" Checks if the unit is an SCV that is currently building. """
return self.orders and self.orders[0].ability.id in {
AbilityId.TERRANBUILD_ARMORY,
AbilityId.TERRANBUILD_BARRACKS,
AbilityId.TERRANBUILD_BUNKER,
AbilityId.TERRANBUILD_COMMANDCENTER,
AbilityId.TERRANBUILD_ENGINEERINGBAY,
AbilityId.TERRANBUILD_FACTORY,
AbilityId.TERRANBUILD_FUSIONCORE,
AbilityId.TERRANBUILD_GHOSTACADEMY,
AbilityId.TERRANBUILD_MISSILETURRET,
AbilityId.TERRANBUILD_REFINERY,
AbilityId.TERRANBUILD_SENSORTOWER,
AbilityId.TERRANBUILD_STARPORT,
AbilityId.TERRANBUILD_SUPPLYDEPOT,
}
@property_immutable_cache
def is_repairing(self) -> bool:
return self.orders and self.orders[0].ability.id in {
AbilityId.EFFECT_REPAIR,
AbilityId.EFFECT_REPAIR_MULE,
AbilityId.EFFECT_REPAIR_SCV,
}
@property_immutable_cache
def order_target(self) -> Optional[Union[int, Point2]]:
""" Returns the target tag (if it is a Unit) or Point2 (if it is a Position)
from the first order, returns None if the unit is idle """
if self.orders:
if isinstance(self.orders[0].target, int):
return self.orders[0].target
else:
return Point2.from_proto(self.orders[0].target)
return None
@property_immutable_cache
def is_idle(self) -> bool:
return not self.orders
@property_immutable_cache
def add_on_tag(self) -> int:
return self._proto.add_on_tag
@property_immutable_cache
def add_on_land_position(self) -> Point2:
""" If unit is addon (techlab or reactor), returns the position
where a terran building has to land to connect to addon """
return self.position.offset(Point2((-2.5, 0.5)))
@property_immutable_cache
def has_add_on(self) -> bool:
return not self.add_on_tag
@property_immutable_cache
def assigned_harvesters(self) -> int:
""" Number of workers currently gathering resources at a geyser or mining base."""
return self._proto.assigned_harvesters
@property_immutable_cache
def ideal_harvesters(self) -> int:
""" Returns 3 for geysers, 2*n for n mineral patches on that base."""
return self._proto.ideal_harvesters
@property_immutable_cache
def surplus_harvesters(self) -> int:
""" Returns a positive number if it has too many harvesters mining,
a negative number if it has too few mining """
return self._proto.assigned_harvesters - self._proto.ideal_harvesters
def train(self, unit, *args, **kwargs):
return self(self._game_data.units[unit.value].creation_ability.id, *args, **kwargs)
def build(self, unit, *args, **kwargs):
return self(self._game_data.units[unit.value].creation_ability.id, *args, **kwargs)
def research(self, upgrade, *args, **kwargs):
""" Requires UpgradeId to be passed instead of AbilityId """
return self(self._game_data.upgrades[upgrade.value].research_ability.id, *args, **kwargs)
def has_buff(self, buff):
assert isinstance(buff, BuffId)
return buff.value in self._proto.buff_ids
def warp_in(self, unit, placement, *args, **kwargs):
normal_creation_ability = self._game_data.units[unit.value].creation_ability.id
return self(warpgate_abilities[normal_creation_ability], placement, *args, **kwargs)
def attack(self, *args, **kwargs):
""" Target can be a Unit or Point2 """
return self(AbilityId.ATTACK, *args, **kwargs)
def gather(self, *args, **kwargs):
""" Target can be a mineral patch or geyser """
return self(AbilityId.HARVEST_GATHER, *args, **kwargs)
def return_resource(self, *args, **kwargs):
""" Does not need a target """
return self(AbilityId.HARVEST_RETURN, *args, **kwargs)
def move(self, *args, **kwargs):
""" Target can be a Unit (to follow that unit) or Point2 """
return self(AbilityId.MOVE, *args, **kwargs)
def scan_move(self, *args, **kwargs):
""" TODO: What does this actually do? """
return self(AbilityId.SCAN_MOVE, *args, **kwargs)
def hold_position(self, *args, **kwargs):
return self(AbilityId.HOLDPOSITION, *args, **kwargs)
def stop(self, *args, **kwargs):
return self(AbilityId.STOP, *args, **kwargs)
def patrol(self, *args, **kwargs):
return self(AbilityId.PATROL, *args, **kwargs)
def repair(self, *args, **kwargs):
return self(AbilityId.EFFECT_REPAIR, *args, **kwargs)
def __hash__(self):
return hash(self.tag)
def __call__(self, ability, *args, **kwargs):
return unit_command.UnitCommand(ability, self, *args, **kwargs)
class UnitOrder:
@classmethod
def from_proto(cls, proto, game_data):
return cls(
game_data.abilities[proto.ability_id],
(proto.target_world_space_pos if proto.HasField("target_world_space_pos") else proto.target_unit_tag),
proto.progress,
)
def __init__(self, ability, target, progress=None):
self.ability = ability
self.target = target
self.progress = progress
def __repr__(self):
return f"UnitOrder({self.ability}, {self.target}, {self.progress})"
|
/sc2-env-0.11.1.2.tar.gz/sc2-env-0.11.1.2/sc2Env/unit.py
| 0.874788 | 0.275973 |
unit.py
|
pypi
|
from typing import Callable, Set, FrozenSet, List
from .position import Point2
class PixelMap:
def __init__(self, proto):
self._proto = proto
assert self.bits_per_pixel % 8 == 0, "Unsupported pixel density"
assert self.width * self.height * self.bits_per_pixel / 8 == len(self._proto.data)
self.data = bytearray(self._proto.data)
@property
def width(self):
return self._proto.size.x
@property
def height(self):
return self._proto.size.y
@property
def bits_per_pixel(self):
return self._proto.bits_per_pixel
@property
def bytes_per_pixel(self):
return self._proto.bits_per_pixel // 8
def __getitem__(self, pos):
x, y = pos
assert 0 <= x < self.width, f"x is {x}, self.width is {self.width}"
assert 0 <= y < self.height, f"y is {y}, self.height is {self.height}"
index = -self.width * y + x
# print(f"INDEX IS {index} FOR {pos}")
start = index * self.bytes_per_pixel
data = self.data[start : start + self.bytes_per_pixel]
return int.from_bytes(data, byteorder="little", signed=False)
def __setitem__(self, pos, val):
""" Example usage: self._game_info.pathing_grid[Point2((20, 20))] = [255] """
x, y = pos
assert 0 <= x < self.width, f"x is {x}, self.width is {self.width}"
assert 0 <= y < self.height, f"y is {y}, self.height is {self.height}"
index = -self.width * y + x
start = index * self.bytes_per_pixel
self.data[start : start + self.bytes_per_pixel] = val
def is_set(self, p):
return self[p] != 0
def is_empty(self, p):
return not self.is_set(p)
def invert(self):
raise NotImplementedError
def flood_fill(self, start_point: Point2, pred: Callable[[int], bool]) -> Set[Point2]:
nodes: Set[Point2] = set()
queue: List[Point2] = [start_point]
while queue:
x, y = queue.pop()
if not (0 <= x < self.width and 0 <= y < self.height):
continue
if Point2((x, y)) in nodes:
continue
if pred(self[x, y]):
nodes.add(Point2((x, y)))
for a in [-1, 0, 1]:
for b in [-1, 0, 1]:
if not (a == 0 and b == 0):
queue.append(Point2((x + a, y + b)))
return nodes
def flood_fill_all(self, pred: Callable[[int], bool]) -> Set[FrozenSet[Point2]]:
groups: Set[FrozenSet[Point2]] = set()
for x in range(self.width):
for y in range(self.height):
if any((x, y) in g for g in groups):
continue
if pred(self[x, y]):
groups.add(frozenset(self.flood_fill(Point2((x, y)), pred)))
return groups
def print(self, wide=False):
for y in range(self.height):
for x in range(self.width):
print("#" if self.is_set((x, y)) else " ", end=(" " if wide else ""))
print("")
def save_image(self, filename):
data = [(0, 0, self[x, y]) for y in range(self.height) for x in range(self.width)]
from PIL import Image
im = Image.new("RGB", (self.width, self.height))
im.putdata(data)
im.save(filename)
|
/sc2-env-0.11.1.2.tar.gz/sc2-env-0.11.1.2/sc2Env/pixel_map.py
| 0.793066 | 0.513668 |
pixel_map.py
|
pypi
|
import logging
from select import select
from gym.spaces import Space
from .bot_ai import BotAI
logger = logging.getLogger(__name__)
class Sc2Bot(BotAI):
def __init__(self, runner_pipe, initializer, observer, actuator):
'''
Sc2 bot launched in an other thread by Sc2Env.
params:
- initializer: fn
Called on start, must set BotAI.action_space and
BotAI.observation_space.
The defined spaces must inherit from gym.spaces.space.Space.
example:
def initializer(bot):
bot.action_space = gym.spaces.Box(5,10)
bot.observation_space = gym.spaces.Box(3,11)
- observer: async fn
Called on step, takes a python-sc2 bot instance, must return
an observation and a reward.
An observation is a numpy array matching the observation space
and a reward is number.
- actuator: async fn
Called on step, takes a python-sc2 bot instance and an action.
An action is a numpy array matching the action space.
Used to run actions using on python-sc2.
'''
self.runner_pipe = runner_pipe
self.initalizer = initializer
self.observer = observer
self.actuator = actuator
def on_start(self):
self.initalizer(self)
assert hasattr(self, 'action_space') and hasattr(self, 'observation_space')
assert isinstance(self.action_space, Space) and isinstance(self.observation_space, Space)
self.runner_pipe.send([self.action_space, self.observation_space])
async def on_step(self, iteration: int):
self.runner_pipe.send(await self.observer(self))
logger.debug('waiting Sc2Env action')
select([self.runner_pipe], [], [], 10)
action = self.runner_pipe.recv()
if action is None:
raise Exception('End of training')
await self.actuator(self, action)
def on_end(self, result):
self.runner_pipe.send(None)
|
/sc2-env-0.11.1.2.tar.gz/sc2-env-0.11.1.2/sc2Env/Sc2Bot.py
| 0.802865 | 0.333354 |
Sc2Bot.py
|
pypi
|
import datetime
from s2clientprotocol import (
score_pb2 as score_pb,
)
from sc2.position import Point2
class Renderer(object):
def __init__(self, client, map_size, minimap_size) -> None:
self._client = client
self._window = None
self._map_size = map_size
self._map_image = None
self._minimap_size = minimap_size
self._minimap_image = None
self._mouse_x, self._mouse_y = None, None
self._text_supply = None
self._text_vespene = None
self._text_minerals = None
self._text_score = None
self._text_time = None
async def render(self, observation):
render_data = observation.observation.render_data
map_size = render_data.map.size
map_data = render_data.map.data
minimap_size = render_data.minimap.size
minimap_data = render_data.minimap.data
map_width, map_height = map_size.x, map_size.y
map_pitch = -map_width * 3
minimap_width, minimap_height = minimap_size.x, minimap_size.y
minimap_pitch = -minimap_width * 3
if not self._window:
from pyglet.window import Window
from pyglet.image import ImageData
from pyglet.text import Label
self._window = Window(width=map_width, height=map_height)
self._window.on_mouse_press = self._on_mouse_press
self._window.on_mouse_release = self._on_mouse_release
self._window.on_mouse_drag = self._on_mouse_drag
self._map_image = ImageData(map_width, map_height, 'RGB', map_data, map_pitch)
self._minimap_image = ImageData(minimap_width, minimap_height, 'RGB', minimap_data,
minimap_pitch)
self._text_supply = Label(
'', font_name='Arial', font_size=16, anchor_x='right', anchor_y='top',
x=self._map_size[0] - 10, y=self._map_size[1] - 10, color=(200, 200, 200, 255)
)
self._text_vespene = Label(
'', font_name='Arial', font_size=16, anchor_x='right', anchor_y='top',
x=self._map_size[0] - 130, y=self._map_size[1] - 10, color=(28, 160, 16, 255)
)
self._text_minerals = Label(
'', font_name='Arial', font_size=16, anchor_x='right', anchor_y='top',
x=self._map_size[0] - 200, y=self._map_size[1] - 10, color=(68, 140, 255, 255)
)
self._text_score = Label(
'', font_name='Arial', font_size=16, anchor_x='left', anchor_y='top',
x=10, y=self._map_size[1] - 10, color=(219, 30, 30, 255)
)
self._text_time = Label(
'', font_name='Arial', font_size=16, anchor_x='right', anchor_y='bottom',
x=self._minimap_size[0] - 10, y=self._minimap_size[1] + 10, color=(255, 255, 255, 255)
)
else:
self._map_image.set_data('RGB', map_pitch, map_data)
self._minimap_image.set_data('RGB', minimap_pitch, minimap_data)
self._text_time.text = str(datetime.timedelta(seconds=(observation.observation.game_loop * 0.725) // 16))
if observation.observation.HasField('player_common'):
self._text_supply.text = "{} / {}".format(observation.observation.player_common.food_used,
observation.observation.player_common.food_cap)
self._text_vespene.text = str(observation.observation.player_common.vespene)
self._text_minerals.text = str(observation.observation.player_common.minerals)
if observation.observation.HasField('score'):
self._text_score.text = "{} score: {}".format(
score_pb._SCORE_SCORETYPE.values_by_number[observation.observation.score.score_type].name,
observation.observation.score.score
)
await self._update_window()
if self._client.in_game and (not observation.player_result) and self._mouse_x and self._mouse_y:
await self._client.move_camera_spatial(Point2((self._mouse_x, self._minimap_size[0] - self._mouse_y)))
self._mouse_x, self._mouse_y = None, None
async def _update_window(self):
self._window.switch_to()
self._window.dispatch_events()
self._window.clear()
self._map_image.blit(0, 0)
self._minimap_image.blit(0, 0)
self._text_time.draw()
self._text_score.draw()
self._text_minerals.draw()
self._text_vespene.draw()
self._text_supply.draw()
self._window.flip()
def _on_mouse_press(self, x, y, button, modifiers):
if button != 1: # 1: mouse.LEFT
return
if x > self._minimap_size[0] or y > self._minimap_size[1]:
return
self._mouse_x, self._mouse_y = x, y
def _on_mouse_release(self, x, y, button, modifiers):
if button != 1: # 1: mouse.LEFT
return
if x > self._minimap_size[0] or y > self._minimap_size[1]:
return
self._mouse_x, self._mouse_y = x, y
def _on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):
if not buttons & 1: # 1: mouse.LEFT
return
if x > self._minimap_size[0] or y > self._minimap_size[1]:
return
self._mouse_x, self._mouse_y = x, y
|
/sc2-env-0.11.1.2.tar.gz/sc2-env-0.11.1.2/sc2Env/renderer.py
| 0.657428 | 0.210644 |
renderer.py
|
pypi
|
class ScoreDetails:
""" Accessable in self.state.score during step function
For more information, see https://github.com/Blizzard/s2client-proto/blob/master/s2clientprotocol/score.proto
"""
def __init__(self, proto):
self._data = proto
self._proto = proto.score_details
@property
def score_type(self):
return self._data.score_type
@property
def score(self):
return self._data.score
@property
def idle_production_time(self):
return self._proto.idle_production_time
@property
def idle_worker_time(self):
return self._proto.idle_worker_time
@property
def total_value_units(self):
return self._proto.total_value_units
@property
def total_value_structures(self):
return self._proto.total_value_structures
@property
def killed_value_units(self):
return self._proto.killed_value_units
@property
def killed_value_structures(self):
return self._proto.killed_value_structures
@property
def collected_minerals(self):
return self._proto.collected_minerals
@property
def collected_vespene(self):
return self._proto.collected_vespene
@property
def collection_rate_minerals(self):
return self._proto.collection_rate_minerals
@property
def collection_rate_vespene(self):
return self._proto.collection_rate_vespene
@property
def spent_minerals(self):
return self._proto.spent_minerals
@property
def spent_vespene(self):
return self._proto.spent_vespene
@property
def food_used_none(self):
return self._proto.food_used.none
@property
def food_used_army(self):
return self._proto.food_used.army
@property
def food_used_economy(self):
return self._proto.food_used.economy
@property
def food_used_technology(self):
return self._proto.food_used.technology
@property
def food_used_upgrade(self):
return self._proto.food_used.upgrade
@property
def killed_minerals_none(self):
return self._proto.killed_minerals.none
@property
def killed_minerals_army(self):
return self._proto.killed_minerals.army
@property
def killed_minerals_economy(self):
return self._proto.killed_minerals.economy
@property
def killed_minerals_technology(self):
return self._proto.killed_minerals.technology
@property
def killed_minerals_upgrade(self):
return self._proto.killed_minerals.upgrade
@property
def killed_vespene_none(self):
return self._proto.killed_vespene.none
@property
def killed_vespene_army(self):
return self._proto.killed_vespene.army
@property
def killed_vespene_economy(self):
return self._proto.killed_vespene.economy
@property
def killed_vespene_technology(self):
return self._proto.killed_vespene.technology
@property
def killed_vespene_upgrade(self):
return self._proto.killed_vespene.upgrade
@property
def lost_minerals_none(self):
return self._proto.lost_minerals.none
@property
def lost_minerals_army(self):
return self._proto.lost_minerals.army
@property
def lost_minerals_economy(self):
return self._proto.lost_minerals.economy
@property
def lost_minerals_technology(self):
return self._proto.lost_minerals.technology
@property
def lost_minerals_upgrade(self):
return self._proto.lost_minerals.upgrade
@property
def lost_vespene_none(self):
return self._proto.lost_vespene.none
@property
def lost_vespene_army(self):
return self._proto.lost_vespene.army
@property
def lost_vespene_economy(self):
return self._proto.lost_vespene.economy
@property
def lost_vespene_technology(self):
return self._proto.lost_vespene.technology
@property
def lost_vespene_upgrade(self):
return self._proto.lost_vespene.upgrade
@property
def friendly_fire_minerals_none(self):
return self._proto.friendly_fire_minerals.none
@property
def friendly_fire_minerals_army(self):
return self._proto.friendly_fire_minerals.army
@property
def friendly_fire_minerals_economy(self):
return self._proto.friendly_fire_minerals.economy
@property
def friendly_fire_minerals_technology(self):
return self._proto.friendly_fire_minerals.technology
@property
def friendly_fire_minerals_upgrade(self):
return self._proto.friendly_fire_minerals.upgrade
@property
def friendly_fire_vespene_none(self):
return self._proto.friendly_fire_vespene.none
@property
def friendly_fire_vespene_army(self):
return self._proto.friendly_fire_vespene.army
@property
def friendly_fire_vespene_economy(self):
return self._proto.friendly_fire_vespene.economy
@property
def friendly_fire_vespene_technology(self):
return self._proto.friendly_fire_vespene.technology
@property
def friendly_fire_vespene_upgrade(self):
return self._proto.friendly_fire_vespene.upgrade
@property
def used_minerals_none(self):
return self._proto.used_minerals.none
@property
def used_minerals_army(self):
return self._proto.used_minerals.army
@property
def used_minerals_economy(self):
return self._proto.used_minerals.economy
@property
def used_minerals_technology(self):
return self._proto.used_minerals.technology
@property
def used_minerals_upgrade(self):
return self._proto.used_minerals.upgrade
@property
def used_vespene_none(self):
return self._proto.used_vespene.none
@property
def used_vespene_army(self):
return self._proto.used_vespene.army
@property
def used_vespene_economy(self):
return self._proto.used_vespene.economy
@property
def used_vespene_technology(self):
return self._proto.used_vespene.technology
@property
def used_vespene_upgrade(self):
return self._proto.used_vespene.upgrade
@property
def total_used_minerals_none(self):
return self._proto.total_used_minerals.none
@property
def total_used_minerals_army(self):
return self._proto.total_used_minerals.army
@property
def total_used_minerals_economy(self):
return self._proto.total_used_minerals.economy
@property
def total_used_minerals_technology(self):
return self._proto.total_used_minerals.technology
@property
def total_used_minerals_upgrade(self):
return self._proto.total_used_minerals.upgrade
@property
def total_used_vespene_none(self):
return self._proto.total_used_vespene.none
@property
def total_used_vespene_army(self):
return self._proto.total_used_vespene.army
@property
def total_used_vespene_economy(self):
return self._proto.total_used_vespene.economy
@property
def total_used_vespene_technology(self):
return self._proto.total_used_vespene.technology
@property
def total_used_vespene_upgrade(self):
return self._proto.total_used_vespene.upgrade
@property
def total_damage_dealt_life(self):
return self._proto.total_damage_dealt.life
@property
def total_damage_dealt_shields(self):
return self._proto.total_damage_dealt.shields
@property
def total_damage_dealt_energy(self):
return self._proto.total_damage_dealt.energy
@property
def total_damage_taken_life(self):
return self._proto.total_damage_taken.life
@property
def total_damage_taken_shields(self):
return self._proto.total_damage_taken.shields
@property
def total_damage_taken_energy(self):
return self._proto.total_damage_taken.energy
@property
def total_healed_life(self):
return self._proto.total_healed.life
@property
def total_healed_shields(self):
return self._proto.total_healed.shields
@property
def total_healed_energy(self):
return self._proto.total_healed.energy
|
/sc2-env-0.11.1.2.tar.gz/sc2-env-0.11.1.2/sc2Env/score.py
| 0.842831 | 0.339992 |
score.py
|
pypi
|
import sc2
from sc2 import Race
from sc2.player import Bot
from sc2.units import Units
from sc2.unit import Unit
from sc2.position import Point2, Point3
from sc2.ids.unit_typeid import UnitTypeId
from sc2.ids.upgrade_id import UpgradeId
from sc2.ids.buff_id import BuffId
from sc2.ids.ability_id import AbilityId
from typing import List, Dict, Set, Tuple, Any, Optional, Union # mypy type checking
"""
To play an arcade map, you need to download the map first.
Open the StarCraft2 Map Editor through the Battle.net launcher, in the top left go to
File -> Open -> (Tab) Blizzard -> Log in -> with "Source: Map/Mod Name" search for your desired map, in this example "Marine Split Challenge-LOTV" map created by printf
Hit "Ok" and confirm the download. Now that the map is opened, go to "File -> Save as" to store it on your hard drive.
Now load the arcade map by entering your map name below in
sc2.maps.get("YOURMAPNAME") without the .SC2Map extension
Map info:
You start with 30 marines, level N has 15+N speed banelings on creep
Type in game "sling" to activate zergling+baneling combo
Type in game "stim" to activate stimpack
Improvements that could be made:
- Make marines constantly run if they have a ling/bane very close to them
- Split marines before engaging
"""
class MarineSplitChallenge(sc2.BotAI):
async def on_step(self, iteration):
if iteration == 0:
await self.on_first_iteration()
actions = []
# do marine micro vs zerglings
for unit in self.units(UnitTypeId.MARINE):
if self.known_enemy_units:
# attack (or move towards) zerglings / banelings
if unit.weapon_cooldown <= self._client.game_step / 2:
enemies_in_range = self.known_enemy_units.filter(lambda u: unit.target_in_range(u))
# attack lowest hp enemy if any enemy is in range
if enemies_in_range:
# Use stimpack
if self.already_pending_upgrade(UpgradeId.STIMPACK) == 1 and not unit.has_buff(BuffId.STIMPACK) and unit.health > 10:
actions.append(unit(AbilityId.EFFECT_STIM))
# attack baneling first
filtered_enemies_in_range = enemies_in_range.of_type(UnitTypeId.BANELING)
if not filtered_enemies_in_range:
filtered_enemies_in_range = enemies_in_range.of_type(UnitTypeId.ZERGLING)
# attack lowest hp unit
lowest_hp_enemy_in_range = min(filtered_enemies_in_range, key=lambda u: u.health)
actions.append(unit.attack(lowest_hp_enemy_in_range))
# no enemy is in attack-range, so give attack command to closest instead
else:
closest_enemy = self.known_enemy_units.closest_to(unit)
actions.append(unit.attack(closest_enemy))
# move away from zergling / banelings
else:
stutter_step_positions = self.position_around_unit(unit, distance=4)
# filter in pathing grid
stutter_step_positions = {p for p in stutter_step_positions if self.in_pathing_grid(p)}
# find position furthest away from enemies and closest to unit
enemies_in_range = self.known_enemy_units.filter(lambda u: unit.target_in_range(u, -0.5))
if stutter_step_positions and enemies_in_range:
retreat_position = max(stutter_step_positions, key=lambda x: x.distance_to(enemies_in_range.center) - x.distance_to(unit))
actions.append(unit.move(retreat_position))
else:
print("No retreat positions detected for unit {} at {}.".format(unit, unit.position.rounded))
await self.do_actions(actions)
async def on_first_iteration(self):
await self.chat_send("Edit this message for automatic chat commands.")
self._client.game_step = 4 # do actions every X frames instead of every 8th
def position_around_unit(self, pos: Union[Unit, Point2, Point3], distance: int=1, step_size: int=1, exclude_out_of_bounds: bool=True):
pos = pos.position.to2.rounded
positions = {pos.offset(Point2((x, y)))
for x in range(-distance, distance+1, step_size)
for y in range(-distance, distance+1, step_size)
if (x, y) != (0, 0)}
# filter positions outside map size
if exclude_out_of_bounds:
positions = {p for p in positions if 0 <= p[0] < self._game_info.pathing_grid.width and 0 <= p[1] < self._game_info.pathing_grid.height}
return positions
def main():
sc2.run_game(sc2.maps.get("Marine Split Challenge"), [
Bot(Race.Terran, MarineSplitChallenge()),
], realtime=False, save_replay_as="Example.SC2Replay")
if __name__ == '__main__':
main()
|
/sc2-0.11.2.tar.gz/sc2-0.11.2/examples/arcade_bot.py
| 0.614625 | 0.382776 |
arcade_bot.py
|
pypi
|
from functools import reduce
from operator import or_
import random
import sc2
from sc2 import Race, Difficulty
from sc2.constants import *
from sc2.player import Bot, Computer
from sc2.data import race_townhalls
import enum
class Hydralisk(sc2.BotAI):
def select_target(self):
if self.known_enemy_structures.exists:
return random.choice(self.known_enemy_structures).position
return self.enemy_start_locations[0]
async def on_step(self, iteration):
larvae = self.units(LARVA)
forces = self.units(ZERGLING) | self.units(HYDRALISK)
if self.units(HYDRALISK).amount > 10 and iteration % 50 == 0:
for unit in forces.idle:
await self.do(unit.attack(self.select_target()))
if self.supply_left < 2:
if self.can_afford(OVERLORD) and larvae.exists:
await self.do(larvae.random.train(OVERLORD))
return
if self.units(HYDRALISKDEN).ready.exists:
if self.can_afford(HYDRALISK) and larvae.exists:
await self.do(larvae.random.train(HYDRALISK))
return
if not self.townhalls.exists:
for unit in self.units(DRONE) | self.units(QUEEN) | forces:
await self.do(unit.attack(self.enemy_start_locations[0]))
return
else:
hq = self.townhalls.first
for queen in self.units(QUEEN).idle:
abilities = await self.get_available_abilities(queen)
if AbilityId.EFFECT_INJECTLARVA in abilities:
await self.do(queen(EFFECT_INJECTLARVA, hq))
if not (self.units(SPAWNINGPOOL).exists or self.already_pending(SPAWNINGPOOL)):
if self.can_afford(SPAWNINGPOOL):
await self.build(SPAWNINGPOOL, near=hq)
if self.units(SPAWNINGPOOL).ready.exists:
if not self.units(LAIR).exists and hq.is_idle:
if self.can_afford(LAIR):
await self.do(hq.build(LAIR))
if self.units(LAIR).ready.exists:
if not (self.units(HYDRALISKDEN).exists or self.already_pending(HYDRALISKDEN)):
if self.can_afford(HYDRALISKDEN):
await self.build(HYDRALISKDEN, near=hq)
if self.units(EXTRACTOR).amount < 2 and not self.already_pending(EXTRACTOR):
if self.can_afford(EXTRACTOR):
drone = self.workers.random
target = self.state.vespene_geyser.closest_to(drone.position)
err = await self.do(drone.build(EXTRACTOR, target))
if hq.assigned_harvesters < hq.ideal_harvesters:
if self.can_afford(DRONE) and larvae.exists:
larva = larvae.random
await self.do(larva.train(DRONE))
return
for a in self.units(EXTRACTOR):
if a.assigned_harvesters < a.ideal_harvesters:
w = self.workers.closer_than(20, a)
if w.exists:
await self.do(w.random.gather(a))
if self.units(SPAWNINGPOOL).ready.exists:
if not self.units(QUEEN).exists and hq.is_ready and hq.is_idle:
if self.can_afford(QUEEN):
await self.do(hq.train(QUEEN))
if self.units(ZERGLING).amount < 20 and self.minerals > 1000:
if larvae.exists and self.can_afford(ZERGLING):
await self.do(larvae.random.train(ZERGLING))
def main():
sc2.run_game(sc2.maps.get("(2)CatalystLE"), [
Bot(Race.Zerg, Hydralisk()),
Computer(Race.Terran, Difficulty.Medium)
], realtime=False, save_replay_as="ZvT.SC2Replay")
if __name__ == '__main__':
main()
|
/sc2-0.11.2.tar.gz/sc2-0.11.2/examples/zerg/hydralisk_push.py
| 0.470007 | 0.232746 |
hydralisk_push.py
|
pypi
|
from functools import reduce
from operator import or_
import random
import sc2
from sc2 import Race, Difficulty
from sc2.constants import *
from sc2.player import Bot, Computer
from sc2.data import race_townhalls
import enum
class BroodlordBot(sc2.BotAI):
def select_target(self):
if self.known_enemy_structures.exists:
return random.choice(self.known_enemy_structures).position
return self.enemy_start_locations[0]
async def on_step(self, iteration):
larvae = self.units(LARVA)
forces = self.units(ZERGLING) | self.units(CORRUPTOR) | self.units(BROODLORD)
if self.units(BROODLORD).amount > 2 and iteration % 50 == 0:
for unit in forces:
await self.do(unit.attack(self.select_target()))
if self.supply_left < 2:
if self.can_afford(OVERLORD) and larvae.exists:
await self.do(larvae.random.train(OVERLORD))
return
if self.units(GREATERSPIRE).ready.exists:
corruptors = self.units(CORRUPTOR)
# build half-and-half corruptors and broodlords
if corruptors.exists and corruptors.amount > self.units(BROODLORD).amount:
if self.can_afford(BROODLORD):
await self.do(corruptors.random.train(BROODLORD))
elif self.can_afford(CORRUPTOR) and larvae.exists:
await self.do(larvae.random.train(CORRUPTOR))
return
if not self.townhalls.exists:
for unit in self.units(DRONE) | self.units(QUEEN) | forces:
await self.do(unit.attack(self.enemy_start_locations[0]))
return
else:
hq = self.townhalls.first
for queen in self.units(QUEEN).idle:
abilities = await self.get_available_abilities(queen)
if AbilityId.EFFECT_INJECTLARVA in abilities:
await self.do(queen(EFFECT_INJECTLARVA, hq))
if not (self.units(SPAWNINGPOOL).exists or self.already_pending(SPAWNINGPOOL)):
if self.can_afford(SPAWNINGPOOL):
await self.build(SPAWNINGPOOL, near=hq)
if self.units(SPAWNINGPOOL).ready.exists:
if not self.units(LAIR).exists and not self.units(HIVE).exists and hq.is_idle:
if self.can_afford(LAIR):
await self.do(hq.build(LAIR))
if self.units(LAIR).ready.exists:
if not (self.units(INFESTATIONPIT).exists or self.already_pending(INFESTATIONPIT)):
if self.can_afford(INFESTATIONPIT):
await self.build(INFESTATIONPIT, near=hq)
if not (self.units(SPIRE).exists or self.already_pending(SPIRE)):
if self.can_afford(SPIRE):
await self.build(SPIRE, near=hq)
if self.units(INFESTATIONPIT).ready.exists and not self.units(HIVE).exists and hq.is_idle:
if self.can_afford(HIVE):
await self.do(hq.build(HIVE))
if self.units(HIVE).ready.exists:
spires = self.units(SPIRE).ready
if spires.exists:
spire = spires.random
if self.can_afford(GREATERSPIRE) and spire.is_idle:
await self.do(spire.build(GREATERSPIRE))
if self.units(EXTRACTOR).amount < 2 and not self.already_pending(EXTRACTOR):
if self.can_afford(EXTRACTOR):
drone = self.workers.random
target = self.state.vespene_geyser.closest_to(drone.position)
err = await self.do(drone.build(EXTRACTOR, target))
if hq.assigned_harvesters < hq.ideal_harvesters:
if self.can_afford(DRONE) and larvae.exists:
larva = larvae.random
await self.do(larva.train(DRONE))
return
for a in self.units(EXTRACTOR):
if a.assigned_harvesters < a.ideal_harvesters:
w = self.workers.closer_than(20, a)
if w.exists:
await self.do(w.random.gather(a))
if self.units(SPAWNINGPOOL).ready.exists:
if not self.units(QUEEN).exists and hq.is_ready and hq.is_idle:
if self.can_afford(QUEEN):
await self.do(hq.train(QUEEN))
if self.units(ZERGLING).amount < 40 and self.minerals > 1000:
if larvae.exists and self.can_afford(ZERGLING):
await self.do(larvae.random.train(ZERGLING))
def main():
sc2.run_game(sc2.maps.get("(2)CatalystLE"), [
Bot(Race.Zerg, BroodlordBot()),
Computer(Race.Terran, Difficulty.Medium)
], realtime=False, save_replay_as="ZvT.SC2Replay")
if __name__ == '__main__':
main()
|
/sc2-0.11.2.tar.gz/sc2-0.11.2/examples/zerg/onebase_broodlord.py
| 0.459561 | 0.240574 |
onebase_broodlord.py
|
pypi
|
[](https://pypi.org/project/sc2gameLobby/)
[](https://travis-ci.org/ttinies/sc2gameLobby)
[](https://coveralls.io/github/ttinies/sc2gameLobby?branch=master)

# Play Starcraft 2 on a ladder as a human or AI against other human AI
## About
The objective of this repository is enable casual Starcraft 2 players, AI developers and proficient coders to all create
a player that competes against others in a Starcraft 2 match. The strategy is to create an increasingly user-friendly
interface so that most anybody can readily set up matches and play against
#### Rationale: Why Create this Repository?
There is an existing ladder for AI-only developers, [sc2ai.net](https://sc2ai.net/). While that project is under active
development as of July 6, 2018, its roadmap doesn't support several critical features which impedes developers' efforts
(such as ours) to create AI that is publicly visible. Here are several features which this ladder supports that
sc2ai.net may not.
* Play on your own machine against others on their own machines. You're no longer limited by some other person's
machine who is sharing system resources with other players in the game.
* Support AI vs AI, AI vs human and human vs human play.
* AI developers aren't required to share their source code or any executable.
* Fast, user-friendly setup that non-programmers or programmers with lower proficiency in a specific language can set
up. No need to hunt + edit files by hand.
#### Brief Functional Overview
This sc2gameLobby package's primary functions are as follows:
1. Issue match requests such that other available players or static bots can be matched against you. When being matched
against opponents, if no valid opponents are available for a match with you in the queue, you are automatically matched
instead against other publicly available bot algorithms or Blizzard's built-in bots depending on your estimated
proficiency.
2. Launch a Starcraft 2 client that will automatically manage the match along with other Starcraft 2 clients (as
needed). The match is played until its finished.
3. Each player using sc2gameLobby reports results back to the ladder. The ladder verifies each player's reported results
against its own intimate knowledge of the match to accurately determine the proper match result and update the ladder
accordingly.
Communication with the ladder server occurs via TCP connection with a django server available on the internet. It is
possible to communicate with [alternative ladders](https://github.com/ttinies/sc2ladderMgmt), but we've established this
server form normal sc2gameLobby gameplay.
## Installation
#### System Requirements
* sc2gameLobby is proven using both Linux (using wine) and Windows. While untested on OSX, because OSX is also widely
tested using pysc2, sc2gameLobby is also expected to work without issue. NOTE: Linux is not a platform officially
supported by Blizzard for Starcraft 2, but it does work, both using headless and regular setups using full graphics.
* As a human, your own system must be able to support a Starcraft 2 client application (the window that actually plays
the game). Reference standard [Starcraft 2 game requirements](https://us.battle.net/support/en/article/27575) for details.
* As an AI, in addition to the requirements for human play, as well as any other resources your AI may require. If
your AI architecture can run, it is allowed on this ladder.
#### Instructions
1. Install Starcraft 2 normally. **IMPORTANT** If you use an install destination path other than the default, ensure
the environment variable `SC2PATH` is set with the path to your customized installation path.
2. Install any(?) version of [python](https://www.python.org/downloads/) that is compatible with your system.
3. Use conda **or** pip via instructions below to install sc2gameLobby.
> NOTE: you can also install this package via other means, but you may have to manage your environment to ensure all
dependencies are available on your system. If you're not familiar with these utilities, follow the installation
instructions provided by their authors available on the internet.
##### Conda
From a command line, enter a standard [conda](https://conda.io/docs/user-guide/index.html) install command that's
compatible with your system setup. Be sure you're targeting the intended environment!
> `EXAMPLE: conda install sc2gameLobby -n <your development environment name>`
##### Pip
From a command line, enter a standard [pip](http://pip.pypa.io/en/stable/user_guide/) install command that's compatible
with your system setup.
> `EXAMPLE: pip install sc2gameLobby`
#### Dependencies
This sc2gameLobby package is indended to run with python version >= 3.5. Package dependencies are defined in
[requirements.txt](https://github.com/ttinies/sc2gameLobby/blob/master/requirements.txt).
Dependencies are installed automatically when using the installation methods above.
#### Verification of Valid Installation
If your setup is fully successful, the following test commands should work as follows:
test command: `python -m sc2gameLobby --help`
```
usage: __main__.py [-h] [--nogui] [--search PLAYERS] [--history] [-l] [-p]
...
PURPOSE: front-end interface to easily and reliably match against opponents
and run Starcraft2 opponents.
...
version: 1.0.0
```
test command: `> python -m sc2gameLobby --versions`
```
...
4.4.0
base-version: 65895
data-hash: BF41339C22AE2EDEBEEADC8C75028F7D
fixed-hash:
label: 4.4.0
replay-hash:
version: 65895
```
test command: `python -m sc2gameLobby --search mapexplorer`
```
<PlayerRecord mapexplorer ai>
type : <PlayerDesigns ai>
difficulty : None
initCmd : sc2ai.agents.newExplorer
rating : 0
created : 2018-05-28
```
#### Troubleshooting
```
ERROR: A connection could not be made. <Ladder versentiedge> may not be available or you may not be connected to the internet.
```
This means that the ladder server instance you are attempting to communicate with could not be reached. It may not be
online or your connection to the internet may be compromised.
**<reserved for additional issues if/when such are reported>**
## Recommended Usage
Great, now you're set to rock ladder matches versus humans and AI opponents! Refer to [python](https://github.com/ttinies/sc2gameLobby/blob/master/USAGE_PYTHON.md)-specific or [non python](https://github.com/ttinies/sc2gameLobby/blob/master/USAGE_NON_PYTHON.md)-specific usage documents. Good luck!
## Further Development and Augmentation
#### Add New Features to the Code?
This is an open-use repository. Feel free to fork and issue pull requests. Feature enhancements, especially for
to-be-developed features, and bug fixes are especially appreciated.
###### Anticipated Useful, To-Be-Developed Features
* User-friendly GUI front end that abstracts the command-line utilities.
* Web browser integration to perform match requests.
* Publicly available web page statistics from data mining match results.
* Additional game modes beyond 1v1.
|
/sc2gameLobby-1.1.11.tar.gz/sc2gameLobby-1.1.11/README.md
| 0.462959 | 0.928279 |
README.md
|
pypi
|
[](https://pypi.org/project/sc2maptool/)
[](https://travis-ci.org/ttinies/sc2gameMapRepo)
[](https://coveralls.io/github/ttinies/sc2gameMapRepo?branch=master)

# Starcraft2 Maps with Simple, Universal Retrieval
---
## About
The objective of this repository is to consolidate known Starcraft2 (SC2) maps
for use by developers creating bots, AI agents or some other custom code
project.
#### Simple. Effective. Useful.
The implementation of this code base is intended to not only consolidate SC2
maps into a single location, but also provide a simple interface to reliably
access and use basic map information. This includes selecting a map for
play (optionally specifying user-defined criteria), easily identify the .SC2Map
file absolute path, identify automatically generated tags that describe the
map(s) and identify any collection of maps using keywords to describe the map.
The intent is to provide a minimal (*simple!*) interface so that new-commers
can easily use developed functionality. This small project, independent of
other code, should prove more reliable to retrieve desired map information
(*effective!*). By being simple and effective, hopefully this repository proves
helpful to new and existing SC2 AI developers (*useful!*).
#### Rationale: Why Create this Repository?
* One, single location where many SC2 AI-relevant maps are accumulated. No need to use mutliple user's repositories with their own map management systems.
* OS/installation independent. This package manages the maps itself without the user needing to install them at a particular location.
* Remove the burden from the user to have to know where to install the maps so their SC2 client can find the maps.
* SC2 map editor does not appear to be compatible non-Blizzard code to programmatically extract relevant .SC2Map information.
#### Functional Overview
All .SC2Map files are located within the `Maps` subfolder or subsequent
subfolders. Each subsequent subfolder encodes an attribute that describes all
subsequent .SC2Map files contained within it or its subfolders. Using this
format, an index file that maps attribute tags to filenames is not needed.
This repository does not prevent potential .SC2Map file redundancy. Instead,
this storage approach allows files to be _very easily_ added into the repo w/o
having to additionally maintain a mapping file for each new file that's added.
Allowing duplicates also allows multiple versions of the same file to exist
within the repository, granted each file will have a unique set of automatic-
generated attribute tags to distinguish between them.
When searching for maps given user-defined, by first restricting which maps are
examined by first matching attributes first and then the map name, if specified.
The current implementation performs the lookup [O(n)](https://en.wikipedia.org/wiki/Big_O_notation) time where N is the number
of maps managed within the repository. If N becomes large, this may need to
be optomized further for timely lookups.
---
## Installation
#### Dependencies
This package is mostly self-contained with only one external package
dependency: [six](https://pypi.org/project/six/) (python2 and python 3 compatibility)
#### Instructions
1. Install any(?) version of [python](https://www.python.org/downloads/) and use [pip](https://pypi.org/project/pip/) to install [six](https://pypi.org/project/six/).
2. git clone https://github.com/ttinies/sc2gameMapRepo (or fork and clone your repo) to `<destination>`.
3. Ensure `<destination>` is in your `PYTHONPATH`. Options:
* `pip install -e <destination>` and then `import sc2maptools` to ensure the package was installed correctly.
* install this package with `<destination>` within your own project
* add `<destination>` to the environment variable: `PYTHONPATH`
* `.../<Python folder>/Lib/site-packages/sc2gameMapRepo/` (similar to what a pip install would do)
---
## Recommended Usage
Refer to [python](https://github.com/ttinies/sc2gameMapRepo/blob/master/USAGE_PYTHON.md)-specific or [non python](https://github.com/ttinies/sc2gameMapRepo/blob/master/USAGE_NON_PYTHON.md)-specific usage documents.
---
## Troubleshooting
In the event that a map request issued by no matching map is found, an
`InvalidMapSelection` Exception is raised. If encountered, your query must be
revised to properly select maps.
> EXAMPLE: given criteria `Foo=True` results in an exception because none of the
> maps exist in a subfolder named `foo` (ignoring folder's case).
`sc2gameMapRepo.constants.InvalidMapSelection: could not find any matching maps given criteria: {'foo': True}`
> EXAMPLE: given criteria `year=2017` and `Combat=True`, an exception is raised
> because none of the maps exist in a subfolder structure with both of these
> attributes in the path.
`sc2gameMapRepo.constants.InvalidMapSelection: could not find any matching maps given criteria: {'year': 2017, 'Combat': True}`
---
## Further Development and Augmentation
#### Add New Maps?
New .SC2Map files need to be added to the `Maps` subfolder. The files can be
placed in any subfolder structure the user desires. Each subfolder represents
an attribute that describes every map file it contains, including its own
subfolders. The folder name is deemed case-insensitive for the purpose of
attribute identification.
The subfolder name is interpreted in one of two ways according to its format:
1. non-numeric chars mean the attribute is interpreted with a `bool` value.
2. if a numeric char is included, that char signals the beginning of an `int` or `string` typed value.
> EXAMPLE: a hypothetical folder, `MaxPlayers6`, would be interpreted with an
> attribute name `maxplayers` with an `int` value `6`.
> EXAMPLE: all .SC2Map files within this subfolder are `Ladder` maps.
`/Maps/Economy/`
> EXAMPLE: all .SC2Map files within this subfolder are official `Ladder` maps
> which are 1v1 maps released in 2018.
`/Maps/Ladder/mode1v1/year2018`
#### Add New Features to the Code?
This is an open-use repository. Feel free to fork and issue pull requests.
However, changing the defined interface is discouraged in order to promote
backward compatibility. Valuable feature enhancements and bug fixes are
welcome.
###### Anticipated Useful, To-Be-Developed Features
* Additional language-specific interfaces beyond Python.
* package management support: [PyPi](https://pypi.org/) / [pip](https://pypi.org/project/pip/) and [conda](https://www.anaconda.com/what-is-anaconda/).
* Accomodations for unforseen/unhandled incompatibility issues.
---
## Credits for Single-Player Scenario author/GitHub Repositories:
Many scenarios have already been created that involve having a single agent
solve a specifically defined task. These are included within this repository
too for completeness.
* DeepMind [PYSC2](https://github.com/deepmind/pysc2/blob/master/README.md)
* SoyGema [pySC2_minigames](https://github.com/SoyGema/pySC2_minigames/blob/master/README.md)
* SoyGema [Startcraft_pysc2_minigames](https://github.com/SoyGema/Startcraft_pysc2_minigames)
* SoyGema [minigames_pysc2](https://github.com/SoyGema/minigames_pysc2)
* 4rChon [sc2-ai-mini-games](https://github.com/4rChon/sc2-ai-mini-games/blob/master/README.md)
|
/sc2maptool-1.1.2.tar.gz/sc2maptool-1.1.2/README.md
| 0.9189 | 0.781539 |
README.md
|
pypi
|
import enum
from sc2 import Race
from sc2.player import Bot
from sc2rl.environments.EnvironmentBase import SC2EnvironmentBase
from sc2rl.environments.SC2BotAI import SimpleSC2BotAI
from sc2rl.utils.sc2_utils import get_random_action
VERY_LARGE_NUMBER = 1e10
class Status(enum.Enum):
RUNNING = 0
END = 1
class MicroTestEnvironment(SC2EnvironmentBase):
def __init__(self, map_name, reward_func, state_proc_func, realtime=False, max_steps=25000,
winning_ratio_gamma=0.1, frame_skip_rate=1):
"""
:param map_name:
:param reward_func:
:param state_proc_func:
:param realtime:
:param max_steps: (int) max step integrations. 50000 is tested.
"""
allies = Bot(Race.Terran, SimpleSC2BotAI())
super(MicroTestEnvironment, self).__init__(map_name=map_name,
allies=allies,
realtime=realtime,
frame_skip_rate=frame_skip_rate)
self.max_steps = max_steps
self._step_count = 0
self.status = Status.RUNNING
self.reward_func = reward_func
self.state_proc_func = state_proc_func
self.prev_health = VERY_LARGE_NUMBER
self.curr_health = VERY_LARGE_NUMBER
self.winning_ratio = 0.0
self.winning_ratio_gamma = winning_ratio_gamma
@property
def step_count(self):
return self._step_count
@step_count.setter
def step_count(self, s_count):
self._step_count = s_count
if self.step_count >= self.max_steps:
self.status = Status.END
def reset(self):
sc2_game_state = self._reset()
self.step_count = 0
self.status = Status.RUNNING
return self.state_proc_func(sc2_game_state)
def observe(self):
sc2_game_state = self._observe()
return self.state_proc_func(sc2_game_state)
def _check_done(self, sc2_game_state):
num_allies = len(sc2_game_state.units.owned)
num_enemies = len(sc2_game_state.units.enemy)
cur_health = 0
for u in sc2_game_state.units:
cur_health += u.health
self.curr_health = cur_health
done_increase = num_allies == 0 or num_enemies == 0
if self.prev_health < self.curr_health:
done_zero_units = True
else:
done_zero_units = False
self.prev_health = self.curr_health
return done_increase or done_zero_units
def step(self, action=None):
self.step_count = self.step_count + 1
sc2_cur_state = self._observe()
if action is None:
action = get_random_action(sc2_cur_state)
sc2_next_state, _ = self._step(action_args=action)
# additional routine for checking done!
# Done checking behaviour of the variants of 'MicroTest' are different from the standard checking done routine.
done = self._check_done(sc2_next_state)
cur_state = self.state_proc_func(sc2_cur_state)
next_state = self.state_proc_func(sc2_next_state)
reward = self.reward_func(cur_state, next_state, done)
if done: # Burn few remaining frames
win = int(len(sc2_next_state.units.owned) >= len(sc2_next_state.units.enemy))
self.burn_last_frames()
if self.status == Status.END:
_ = self.reset()
gamma = self.winning_ratio_gamma
self.winning_ratio = gamma * win + (1 - gamma) * self.winning_ratio
return next_state, reward, done
def burn_last_frames(self):
while True:
self.step_count = self.step_count + 1
sc2_cur_state = self._observe()
done = self._check_done(sc2_cur_state)
_, _ = self._step(action_args=None)
if not done:
break
|
/environments/MicroTestEnvironment.py
| 0.662796 | 0.317797 |
MicroTestEnvironment.py
|
pypi
|
[](https://pypi.org/project/sc2simulator/)
[](https://travis-ci.org/ttinies/sc2simulator)
[](https://coveralls.io/github/ttinies/sc2simulator?branch=master)

# [Starcraft 2 Scenario Simulator](https://github.com/ttinies/sc2simulator)
## About
This package's purpose to enable an interface for multiple players with various
Starcraft 2 agents to play a variety of pre-built or generated scenarios. The
uses of this package are diverse, including AI agent training.
#### Editor Screenshots



#### Example simulator gameplay
(Reserved)
#### Status
This package is in **beta testing**. Reference the defined [issues](https://github.com/ttinies/sc2simulator/issues)
to get a better idea of what is and is not working. If something is discovered
to not be working, kindly do submit a new issue!
#### Rationale: Why Create this Repository?
While a variety of situations can be encountered over the course of many, many
melee games, there are several problems with this approach. Specific situations
occur infrequently, possibly once in the course of a match (which normally
elapses ~20 minutes, up to over an hour at realtime speed) and may not occur
even once in many hundreds of matches. This makes training difficult, slow
and require significantly more data.
By allowing situations to be created artificially, the user may test their
agent's functionality against it. A specific battery of tests can be created
to compare performance of implementations against each other. It also allows
for a specific type of situation to be created and tested quickly with slight
variations to enhance the player's learing speed.
## Functionality
#### Brief Overview
1. The simulator is invoked with specific options.
* *The scenario mini-editor:* if the editor is invoked using --editor, a
mini-editor appears to create or modify a scenario for play. Unless the
option is selected to also play the specified scenario, the editor closes.
* *Regression testing:* when specifying --regression, a predefined battery of
test scenarios is run using same functionality as custom games except scenario
selection criteria are ignored in favor of each predefined scenario setup.
* *Custom Scenarios:* The --custom option allows a player to set up a specific
scenario to test, including the opponent's setup. Each agent joins an existing
scenario by using the --join option.
* *Join:* The --join option allows a player to specify at most its own agent and
optionally its required opponent. All other parameters of the scenario are
determined by the scenario creator.
2. Each player connects to the game automatically via the sc2gameLobby package.
This occurs by default over Versentiedge's public matchmaking ladder server.
3. Once in-game, the scenario is setup.
* if upgrades are specified, each player's client controller creates the
tech producing units and (with cheats enabled) automatically researches
the scenario-specified upgrades. This will elapse at most an additional
21 seconds on top of the specified scenario duration. (This is required
due to behavior in Blizzard's API protocol.)
* The host removes existing units and then creates the units as specified
by the scenario.
4. gameplay continues for as long as is specified using the --duration option.
5. the scenario can be repeated multiple times as specified using the --loops
option. Steps 2-4 are repeated for each loop of the same scenario.
6. A replay is saved locally by each player for each scenario iteration.
#### Example Commands
`python -m sc2simulator --editor --mapname=parasite`
`python -m sc2simulator --custom --unitsMax=7 --ground --players=defaulthuman,blizzbot5_hard --ladder=True`
`python -m sc2simulator --race=zerg --enemyrace=terran --defense=3 --year=2018 --season=3 --players=defaulthuman,blizzbot5_hard`
`python -m sc2simulator --cases=<yourScenarioName> --mapname=MechDepot --players=test,blizzbot5_hard`
NOTE: selecting player 'test' or 'defaulthuman' will allow you to play as a human.
Playing with your own custom agent requires additional player setup to define
the agents setup and execution/callback functions.
#### Cautions
* If your installed Starcraft 2 Maps directory (e.g. C:\Program Files (x86)\Starcraft II\Maps),
these maps can be deleted by the editor. Maps of the same name in subfolders
beneath Maps\... are safe.
* Including tech upgrades and some features (such as balancing on mineral cost,
unit dps, etc.) are only available if you have also access to the sc2techTree
package. If interested, petition @ttinies.
* When playing with your AI/bot, your bot may need to wait a few moments in-game
before the scenario is fully setup.
|
/sc2simulator-0.8.2.tar.gz/sc2simulator-0.8.2/README.md
| 0.864382 | 0.952131 |
README.md
|
pypi
|
import spotipy
from spotipy.oauth2 import SpotifyClientCredentials
# spotify api
cid = '591812685f3f4a16bac164011f7f3e33'
secret = 'cec92bbc981b41d49fc7dadcbce0d0f6'
class SpotipyObj():
def __init__(self, cid=cid, secret=secret):
self.cid = cid
self.secret = secret
def sp(self):
client_credentials_manager = SpotifyClientCredentials(client_id=cid, client_secret=secret)
sp = spotipy.Spotify(client_credentials_manager=client_credentials_manager)
return sp
# 플레이 리스트의 트랙과 트랙들의 정보를 얻어온다.
class GetMyPlaylistTracks(SpotipyObj):
def __init__(self, username, playlist_id, liked:bool):
self.username = username
self.playlist_id = playlist_id
self.liked = liked
def get_playlist_tracks(self):
"""
나의 추천 플레이리스트를 list형태로 반환합니다.
return: [[artist_1, song_1], [artist_2, song_2], ...]
"""
sp = super().sp()
results = sp.user_playlist_tracks(self.username, self.playlist_id, fields='items, uri, name, id, next', limit = 100, market='kr')
tracks = results['items']
# 플레이 리스트에 100곡이 넘어가도 계속 불러오기
while results['next']:
results = sp.next(results)
tracks.extend(results['items'])
artist_song_list = [[tracks[i]['track']['artists'][j]['name'], tracks[i]['track']['name'], tracks[i]['track']['id']] for i in range(len(tracks)) for j in range(len(tracks[i]['track']['artists']))]
return artist_song_list
def get_features(self, track_id):
"""
음악의 feature를 추출합니다.
return: dict list
"""
sp = super().sp()
# get audio_feature
features = sp.audio_features(tracks=[track_id])
danceability = features[0]["danceability"]
energy = features[0]["energy"]
key = features[0]["key"]
loudness = features[0]["loudness"]
mode = features[0]["mode"]
speechiness = features[0]["speechiness"]
acousticness = features[0]["acousticness"]
instrumentalness = features[0]["instrumentalness"]
liveness = features[0]["liveness"]
valence = features[0]["valence"]
tempo = features[0]["tempo"]
duration_ms = features[0]["duration_ms"]
time_signature = features[0]["time_signature"]
tracks_features = {
"id" : str(track_id),
"danceability" : danceability,
"energy" : energy,
"key" : key,
"loudness" : loudness,
"mode" : mode,
"speechiness" : speechiness,
"acousticness" : acousticness,
"instrumentalness" : instrumentalness,
"liveness" : liveness,
"valence" : valence,
"tempo" : tempo,
"duration_ms" : duration_ms,
"time_signature": time_signature,
"liked" : int(self.liked)
}
return tracks_features
class SongArtist(SpotipyObj):
def __init__(self):
pass
def get_song_artist(self, tracks_id):
"""
track id를 입력하면 노래 제목과 아티스트 이름을 반환합니다.
Args:
tracks_id ([list]): track id 의 list
"""
sp = super().sp()
songs = [sp.track(t)['name'] for t in tracks_id]
artists = [sp.track(t)['artists'][0]['name'] for t in tracks_id]
lst = [pair for pair in zip(artists, songs)]
return lst
|
/sc3mylibrary-1.0.0-py3-none-any.whl/lib/get_myplaylist_api.py
| 0.510008 | 0.231343 |
get_myplaylist_api.py
|
pypi
|
## Helper functions
```
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import sc3nb as scn
```
* SuperCollider coders are familiar and frequently use a number of useful converter functions
* the helper functions provide pythonic pendants namely currently for (to be extended):
### `linlin(x, x1, x2, y1, y2, clip)`
* to linearly map x from between [x1, x2] to [y1, y2]
* no range check is done, clipping as specified (None, "min", "max" or anything for "minmax")
```
xs = np.linspace(1, 9, 100)
plt.figure(figsize=(15,2))
for i, clip in enumerate([None, "min", "max", "minmax"]):
plt.subplot(1, 4, i+1);
plt.plot(xs, scn.linlin(xs, 3, 7, 500, 300, clip))
```
### `midicps` and `cpsmidi`
```
scn.midicps(69.2) # convert MIDI note to cycles per second (cps) in [Hz]
scn.cpsmidi(440) # and back to MIDI note (in float resolution)
```
### `clip(value, minimim, maximum)`
```
xs = np.linspace(1,9,100)
plt.plot([scn.clip(x, 5, 7) for x in xs]);
```
### `ampdb` and `dbamp`
```
# dbamp(db) converts dB value in amplitude, 0 dB = 1, '*2' \approx +6dB
dbs = np.linspace(-20, 20)
plt.plot(dbs, [scn.dbamp(d) for d in dbs]);
# plt.semilogy()
# ampdb(amp) converts an amplitude to dB, assuming 0dB=1
scn.ampdb(0.2)
```
|
/sc3nb-1.1.0.tar.gz/sc3nb-1.1.0/examples/helper-examples.ipynb
| 0.428233 | 0.922761 |
helper-examples.ipynb
|
pypi
|
```
# header / imports
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import sc3nb as scn
from sc3nb import Buffer
example_file = "../media/blip.wav"
sc = scn.startup()
```
# Buffer
Buffer is the Python class in sc3nb to interface with Buffers on the SuperCollider server.
```
# uncomment following line to see help for the Buffer class:
# help(scn.Buffer)
```
## Create Buffer from a numpy.Array
```
d0 = np.random.rand(30000, 1)
buf0 = Buffer().load_data(d0)
buf0
```
In this case a default buffer with default sample rate (44100) and default insert mode is created.
If you want to create a buffer with a specific sample rate or OSC insertion method, etc. look at ```load_data```
```
scn.Buffer.load_data?
```
Attention: insertion via OSC is particularly useful for small datasets (e.g. less than 1000 entries). For larger datasets the default 'file' mode is much faster.
```
d0 = np.random.rand(30000, 1)
buf1 = Buffer().load_data(d0, sr=5000, mode='osc')
buf1
```
## Create Buffer with data from PyA Asig
This only works if using pya package: skip if you dont use pya
```
try:
from pya import Ugen
except ImportError:
pass
else:
a1 = Ugen().sine(440, dur=1.0, sr=2000, channels=2).fade_out(0.5) # 1.0s sine tone of 440 Hz
a1.plot()
print(a1)
buf1 = Buffer().load_asig(a1)
buf1
```
Again, default transport method is mode='file', i.e. using a temporary file and fill the buffer on sc
with this content.
* use mode="osc" to select the direct transfer of data via OSC messages
## Create Buffer of .wav File
```
buf2 = Buffer().read(example_file)
buf2
```
The buffer method will automatically read the sample reate of the file and set it to Buffer.sr
You can specify further arguments to `read`
```
scn.Buffer.read?
buf = Buffer().read(example_file, starting_frame=18000, num_frames=20000, channels=[1])
buf
```
## Allocate an empty Buffer
```
buf3 = Buffer().alloc(2.5*44100, sr=44100)
buf3
```
## Reuse an existing SC buffer
`Buffer.use_existing(bufnum)` will force the Buffer to (re-)use a buffer that already exists on the server, identified via its bufnum on the scsynth.
```
# create a Buffer in SuperCollider
%sc b = Buffer.read(s, Platform.resourceDir +/+ "sounds/a11wlk01.wav");
bufnum = %scg b.bufnum
bufnum
buf4 = Buffer()
buf4
buf4.use_existing(bufnum)
buf4 # bufnum has now changed to be bufnum
buf4.play()
```
## Copy an existing SC buffer
``copy_existing`` allows to copy an already existing buffer into another buffer.
```
buf5 = Buffer().read(example_file)
buf6 = Buffer().copy_existing(buf5)
```
This method will automatically use an intern SuperCollider copy method, if both buffer objects use the same sc instance. Otherwise the buffer will be loaded via filesystem. For this to happen, both sc instance should use the same filesystem.
```
server2 = scn.SCServer(options=scn.ServerOptions(udp_port=57778))
server2.boot(kill_others=False)
sc.server.dump_osc()
server2.dump_osc()
buf7 = Buffer(server=server2).copy_existing(buf6)
buf5sig = buf5.to_array()
buf6sig = buf6.to_array()
buf7sig = buf7.to_array()
fig, axs = plt.subplots(4,1)
axs[0].plot(buf5sig) # signal
axs[1].plot(buf6sig) # copied signal
axs[2].plot(buf7sig) # copied signal on other server
axs[3].plot(buf6sig-buf7sig); # difference (should be 0)
plt.tight_layout()
```
With this method, the complete buffer with all samples is copied. If you want to copy only a selection of samples, you can use `gen_copy()` (see below).
## Play Buffer
If you want to listen to the buffer, you can use ``play``.
```
d = np.sin(2 * np.pi * 440 * np.linspace(0, 3, 3 * 44100)**0.9)
buf8 = Buffer().load_data(d)
playbuf_synth = buf8.play()
playbuf_synth
```
As you can see `play()` returns an sc3nb Synth object for the Buffer.
This allows to control the playback via the synth class while the synth is running.
```
playbuf_synth.rate = 0.5
if not playbuf_synth.freed: # stop the playback if not done already
playbuf_synth.free()
playbuf_synth.wait()
playbuf_synth = buf8.play(rate=10, amp=0.15, pan=1) # play at given rate and pan
playbuf_synth.wait(timeout=6) # wait for synth to finish
```
You can get a description of the possible arguments with
```
scn.SynthDef.get_description(playbuf_synth.name)
```
and even can see the SynthDef here:
```
buf8._synth_def
```
You can get a description of the possible arguments with
```
scn.SynthDef.get_description(playbuf_synth.name)
```
As you can see the SC synth will free itself when done if you are not using the loop argument.
However with loop enabled you need to free the synth manually.
```
synth = buf8.play(rate=-4, loop=True) # play looped
synth.rate = 1 # change controls as needed
synth.free()
```
For more information regarding the Synth class, please refer to the [Node guide](node-examples.ipynb).
## Write Buffer content to file
Write the content of a buffer into a file. By default it is a .wav File with float as sample. You can change it via parameters "header" and "sample".
```
buf9 = Buffer().load_data(np.random.rand(10000)-0.5)
buf9.write("../media/output.wav")
# !ls -la ../media # uncomment if your shell offers ls
```
## Fetch Buffer content to array
```
# create a buffer
buf2 = Buffer().read(example_file)
data = buf2.to_array()
plt.plot(data);
buf2.play(rate=1)
```
## Fill Buffer with values
### Fill a Buffer with zeros:
```
scn.Buffer.zero?
buf = Buffer().alloc(100)
buf.zero()
plt.plot(buf.to_array());
```
### Fill a Buffer range with values:
```
scn.Buffer.fill?
buf = Buffer().alloc(500).fill(0, 90, 22).fill(200, 100, 5)
plt.plot(buf.to_array());
```
Alternatively: fill buffer with single fill statement using multiple value triplets
```
buf.fill([20, 50, -8000, 200, 100, 8000])
plt.plot(buf.to_array());
```
### Fill Buffer with sine wave harmonics of given amplitudes.
```
scn.Buffer.gen_sine1?
buf = Buffer().alloc(500).gen_sine1([1,-0.5,0,1.4,0,0,0.2])
plt.plot(buf.to_array());
```
### Fill Buffer with sine wave partials using specified frequencies and amplitudes.
```
scn.Buffer.gen_sine2?
buf = Buffer().alloc(1024).gen_sine2([[3.1, 1], [0.2, -2.5], [30, 0.3]])
plt.plot(buf.to_array());
```
### Fill Buffer with sinus waves and given frequency, amplitude, phase
```
scn.Buffer.gen_sine3?
buf = Buffer().alloc(1024).gen_sine3(
[[1, 0.9, 1], [2, 0.3, +np.pi/2], [3, 0.3, 3]])
plt.plot(buf.to_array());
```
### Fill Buffer with series of chebyshev polynomials:
```
scn.Buffer.gen_cheby?
```
$\textrm{cheby}(n) = \textrm{amplitude} \cdot \cos(n \cdot \arccos(x))$
```
buf = Buffer().alloc(1024)
ch = [1]
for i in range(4):
ch.insert(0, 0)
buf.gen_cheby(ch)
plt.plot(buf.to_array(), label=str(i));
plt.legend();
```
`gen_sine1` to `gen_sine3` and `gen_cheby` have the optional parameters:
* **normalize**: Normalize peak amplitude of wave to 1.0.
* **wavetable**: If set, then the buffer is written in wavetable format so that it can be read by interpolating oscillators.
* **clear**: if set then the buffer is cleared before new partials are written into it. Otherwise the new partials are summed with the existing contents of the buffer.
### Copy data of another Buffer:
```
scn.Buffer.gen_copy?
buf1 = Buffer().alloc(1024).fill(1024, 0, 0)
plt.plot(buf1.to_array());
buf2 = Buffer().alloc(1024).gen_sine1([1,0.5,0,1.4,0,0.5,0.2])
# copy samples 0..0+400 of buf2 into buf1 at position 2++
buf1.gen_copy(buf2, 0, 2, 400)
plt.plot(buf1.to_array());
# copy samples 250..end(=<0) of buf2 into buf1 at position 250++
buf1.gen_copy(buf2, 0, 250, 400)
plt.plot(buf1.to_array());
```
Here we copy 100 samples of `buf2` at starting pos 1 to buf3 at position 2. Use a negative amount of samples to copy all available samples
## Get information about the Buffer
Information about the buffer object:
```
buf3
```
Information about the buffer in SC
```
buf3.query?
buf3.query()
```
## Free Buffers
start with a buffer
```
buf = Buffer().read(example_file)
buf
buf.query() # works as intended
buf.free()
buf # listed as not loaded, python Buffer instance still exists
try:
buf.query() # raises an error after buf.free
except RuntimeError:
pass
else:
print("Buffer query on freed buffer should raise RuntimeError")
sc.exit()
```
|
/sc3nb-1.1.0.tar.gz/sc3nb-1.1.0/examples/supercollider-objects/buffer-examples.ipynb
| 0.738763 | 0.887058 |
buffer-examples.ipynb
|
pypi
|
|PyPI| |travis| |Docs|
scArches (PyTorch) - single-cell architecture surgery
=========================================================================
.. raw:: html
<img src="https://user-images.githubusercontent.com/33202701/89729020-15f7c200-da32-11ea-989b-1b9a3283f642.png" width="700px" align="center">
scArches is a package to integrate newly produced single-cell datasets into integrated reference atlases. Our method can facilitate large collaborative projects with decentralized training and integration of multiple datasets by different groups. scArches is compatible with `scanpy <https://scanpy.readthedocs.io/en/stable/>`_. and hosts efficient implementations of all conditional generative models for single-cell data.
.. note::
expiMap has been added to scArches code base. It allows interpretable representation learning from scRNA-seq data and also reference mapping. Try it in the tutorial section.
What can you do with scArches?
-------------------------------
- Construct single or multi-modal (CITE-seq) reference atlases and share the trained model and the data (if possible).
- Download a pre-trained model for your atlas of interest, update it with new datasets and share with your collaborators.
- Project and integrate query datasets on the top of a reference and use latent representation for downstream tasks, e.g.:diff testing, clustering, classification
What are the different models?
---------------
scArches is itself an algorithm to map to project query on the top of reference datasets and applies
to different models. Here we provide a short explanation and hints on when to use which model. Our models are:
- **scVI** (`Lopez et al., 2018 <https://www.nature.com/articles/s41592-018-0229-2>`_): Requires access to raw counts values for data integration and assumes count distribution on the data (NB, ZINB, Poisson).
- **trVAE** (`Lotfollahi et al.,2020 <https://academic.oup.com/bioinformatics/article/36/Supplement_2/i610/6055927?guestAccessKey=71253caa-1779-40e8-8597-c217db539fb5>`_): It supports both normalized log-transformed or count data as input and applies additional MMD loss to have better merging in the latent space.
- **scANVI** (`Xu et al., 2019 <https://www.biorxiv.org/content/10.1101/532895v1>`_): It needs cell type labels for reference data. Your query data can be either unlabeled or labeled. In the case of unlabeled query data, you can use this method also to classify your query cells using reference labels.
- **scGen** (`Lotfollahi et al., 2019 <https://www.nature.com/articles/s41592-019-0494-8>`_): This method requires cell-type labels for both reference building and Mapping. The reference mapping for this method solely relies on the integrated reference and requires no fine-tuning.
- **expiMap** (`Lotfollahi*, Rybakov* et al., 2023 <https://www.nature.com/articles/s41556-022-01072-x>`_): This method takes prior knowledge from gene sets databases or users allowing to analyze your query data in the context of known gene programs.
- **totalVI** (`Gayoso al., 2019 <https://www.biorxiv.org/content/10.1101/532895v1>`_): This model can be used to build multi-modal CITE-seq reference atalses.
- **treeArches** (`Michielsen*, Lotfollahi* et al., 2022 <https://www.biorxiv.org/content/10.1101/2022.07.07.499109v1>`_): This model builds a hierarchical tree for cell-types in the reference atlas and when mapping the query data can annotate and also identify novel cell-states and populations present in the query data.
- **SageNet** (`Heidari et al., 2022 <https://www.biorxiv.org/content/10.1101/2022.04.14.488419v1>`_): This model allows constrcution of a spatial atlas by mapping query dissociated single cells/spots (e.g., from scRNAseq or visium datasets) into a common coordinate framework using one or more spatially resolved reference datasets.
- **mvTCR** (`Drost et al., 2022 <https://www.biorxiv.org/content/10.1101/2021.06.24.449733v2.abstract?%3Fcollection=>`_): Using this model you will be able to integrate T-cell receptor (TCR, treated as a sequence) and scRNA-seq dataset across multiple donors into a joint representation capturing information from both modalities.
- **scPoli** (`De Donno et al., 2022 <https://www.biorxiv.org/content/10.1101/2022.11.28.517803v1>`_): This model allows data integration of scRNA-seq dataset, prototype-based label transfer and reference mapping. scPoli learns both sample embeddings and integrated cell embeddings, thus providing the user with a multi-scale view of the data, especially useful in the case of many samples to integrate.
Where to start?
---------------
To get a sense of how the model works please go through `this <https://scarches.readthedocs.io/en/latest/trvae_surgery_pipeline.html>`__ tutorial.
To find out how to construct and share or use pre-trained models example sections.
Reference
-------------------------------
If scArches is useful in your research, please consider citing the `paper <https://www.nature.com/articles/s41587-021-01001-7>`_.
.. |PyPI| image:: https://img.shields.io/pypi/v/scarches.svg
:target: https://pypi.org/project/scarches
.. |PyPIDownloads| image:: https://pepy.tech/badge/scarches
:target: https://pepy.tech/project/scarches
.. |Docs| image:: https://readthedocs.org/projects/scarches/badge/?version=latest
:target: https://scarches.readthedocs.io
.. |travis| image:: https://travis-ci.com/theislab/scarches.svg?branch=master
:target: https://travis-ci.com/theislab/scarches
|
/scArches-0.5.9.tar.gz/scArches-0.5.9/docs/about.rst
| 0.931001 | 0.865906 |
about.rst
|
pypi
|
from collections import Counter
import numpy as np
import torch
from torch.utils.data import Dataset
from scipy import sparse
from ._utils import label_encoder, remove_sparsity
class MultiConditionAnnotatedDataset(Dataset):
"""Dataset handler for scPoli model and trainer.
Parameters
----------
adata: : `~anndata.AnnData`
Annotated data matrix.
condition_key: String
column name of conditions in `adata.obs` data frame.
condition_encoder: Dict
dictionary of encoded conditions.
cell_type_keys: List
List of column names of different celltype hierarchies in `adata.obs` data frame.
cell_type_encoder: Dict
dictionary of encoded celltypes.
"""
def __init__(self,
adata,
condition_keys=None,
condition_encoders=None,
conditions_combined_encoder=None,
cell_type_keys=None,
cell_type_encoder=None,
labeled_array=None
):
self.condition_keys = condition_keys
self.condition_encoders = condition_encoders
self.conditions_combined_encoder = conditions_combined_encoder
self.cell_type_keys = cell_type_keys
self.cell_type_encoder = cell_type_encoder
self._is_sparse = sparse.issparse(adata.X)
self.data = adata.X if self._is_sparse else torch.tensor(adata.X)
size_factors = np.ravel(adata.X.sum(1))
self.size_factors = torch.tensor(size_factors)
labeled_array = np.zeros((len(adata), 1)) if labeled_array is None else labeled_array
self.labeled_vector = torch.tensor(labeled_array)
# Encode condition strings to integer
if self.condition_keys is not None:
self.conditions = [label_encoder(
adata,
encoder=self.condition_encoders[condition_keys[i]],
condition_key=condition_keys[i],
) for i in range(len(self.condition_encoders))]
self.conditions = torch.tensor(self.conditions, dtype=torch.long).T
self.conditions_combined = label_encoder(
adata,
encoder=self.conditions_combined_encoder,
condition_key='conditions_combined'
)
self.conditions_combined=torch.tensor(self.conditions_combined, dtype=torch.long)
# Encode cell type strings to integer
if self.cell_type_keys is not None:
self.cell_types = list()
for cell_type_key in cell_type_keys:
level_cell_types = label_encoder(
adata,
encoder=self.cell_type_encoder,
condition_key=cell_type_key,
)
self.cell_types.append(level_cell_types)
self.cell_types = np.stack(self.cell_types).T
self.cell_types = torch.tensor(self.cell_types, dtype=torch.long)
def __getitem__(self, index):
outputs = dict()
if self._is_sparse:
x = torch.tensor(np.squeeze(self.data[index].toarray()))
else:
x = self.data[index]
outputs["x"] = x
outputs["labeled"] = self.labeled_vector[index]
outputs["sizefactor"] = self.size_factors[index]
if self.condition_keys:
outputs["batch"] = self.conditions[index, :]
outputs["combined_batch"] = self.conditions_combined[index]
if self.cell_type_keys:
outputs["celltypes"] = self.cell_types[index, :]
return outputs
def __len__(self):
return self.data.shape[0]
@property
def condition_label_encoder(self) -> dict:
return self.condition_encoder
@condition_label_encoder.setter
def condition_label_encoder(self, value: dict):
if value is not None:
self.condition_encoder = value
@property
def cell_type_label_encoder(self) -> dict:
return self.cell_type_encoder
@cell_type_label_encoder.setter
def cell_type_label_encoder(self, value: dict):
if value is not None:
self.cell_type_encoder = value
@property
def stratifier_weights(self):
conditions = self.conditions.detach().cpu().numpy()
condition_coeff = 1. / len(conditions)
condition2count = Counter(conditions)
counts = np.array([condition2count[cond] for cond in conditions])
weights = condition_coeff / counts
return weights.astype(float)
|
/scArches-0.5.9.tar.gz/scArches-0.5.9/scarches/dataset/scpoli/anndata.py
| 0.928627 | 0.486575 |
anndata.py
|
pypi
|
from collections import Counter
import numpy as np
import torch
from torch.utils.data import Dataset
from scipy import sparse
from .data_handling import remove_sparsity
from ._utils import label_encoder
class AnnotatedDataset(Dataset):
"""Dataset handler for TRVAE model and trainer.
Parameters
----------
adata: : `~anndata.AnnData`
Annotated data matrix.
condition_key: String
column name of conditions in `adata.obs` data frame.
condition_encoder: Dict
dictionary of encoded conditions.
cell_type_keys: List
List of column names of different celltype hierarchies in `adata.obs` data frame.
cell_type_encoder: Dict
dictionary of encoded celltypes.
"""
def __init__(self,
adata,
condition_key=None,
condition_encoder=None,
cell_type_keys=None,
cell_type_encoder=None,
labeled_array=None
):
self.condition_key = condition_key
self.condition_encoder = condition_encoder
self.cell_type_keys = cell_type_keys
self.cell_type_encoder = cell_type_encoder
self._is_sparse = sparse.issparse(adata.X)
self.data = adata.X if self._is_sparse else torch.tensor(adata.X)
size_factors = np.ravel(adata.X.sum(1))
self.size_factors = torch.tensor(size_factors)
labeled_array = np.zeros((len(adata), 1)) if labeled_array is None else labeled_array
self.labeled_vector = torch.tensor(labeled_array)
# Encode condition strings to integer
if self.condition_key is not None:
self.conditions = label_encoder(
adata,
encoder=self.condition_encoder,
condition_key=condition_key,
)
self.conditions = torch.tensor(self.conditions, dtype=torch.long)
# Encode cell type strings to integer
if self.cell_type_keys is not None:
self.cell_types = list()
for cell_type_key in cell_type_keys:
level_cell_types = label_encoder(
adata,
encoder=self.cell_type_encoder,
condition_key=cell_type_key,
)
self.cell_types.append(level_cell_types)
self.cell_types = np.stack(self.cell_types).T
self.cell_types = torch.tensor(self.cell_types, dtype=torch.long)
def __getitem__(self, index):
outputs = dict()
if self._is_sparse:
x = torch.tensor(np.squeeze(self.data[index].toarray()))
else:
x = self.data[index]
outputs["x"] = x
outputs["labeled"] = self.labeled_vector[index]
outputs["sizefactor"] = self.size_factors[index]
if self.condition_key:
outputs["batch"] = self.conditions[index]
if self.cell_type_keys:
outputs["celltypes"] = self.cell_types[index, :]
return outputs
def __len__(self):
return self.data.shape[0]
@property
def condition_label_encoder(self) -> dict:
return self.condition_encoder
@condition_label_encoder.setter
def condition_label_encoder(self, value: dict):
if value is not None:
self.condition_encoder = value
@property
def cell_type_label_encoder(self) -> dict:
return self.cell_type_encoder
@cell_type_label_encoder.setter
def cell_type_label_encoder(self, value: dict):
if value is not None:
self.cell_type_encoder = value
@property
def stratifier_weights(self):
conditions = self.conditions.detach().cpu().numpy()
condition_coeff = 1. / len(conditions)
condition2count = Counter(conditions)
counts = np.array([condition2count[cond] for cond in conditions])
weights = condition_coeff / counts
return weights.astype(float)
|
/scArches-0.5.9.tar.gz/scArches-0.5.9/scarches/dataset/trvae/anndata.py
| 0.933408 | 0.485478 |
anndata.py
|
pypi
|
import inspect
import os
import torch
import pickle
import numpy as np
from copy import deepcopy
from anndata import AnnData, read
from typing import Optional, Union
from torch.distributions import Normal
from scipy.sparse import issparse
from ._utils import _validate_var_names
class BaseMixin:
""" Adapted from
Title: scvi-tools
Authors: Romain Lopez <[email protected]>,
Adam Gayoso <[email protected]>,
Galen Xing <[email protected]>
Date: 14.12.2020
Code version: 0.8.0-beta.0
Availability: https://github.com/YosefLab/scvi-tools
Link to the used code:
https://github.com/YosefLab/scvi-tools/blob/0.8.0-beta.0/scvi/core/models/base.py
"""
def _get_user_attributes(self):
# returns all the self attributes defined in a model class, eg, self.is_trained_
attributes = inspect.getmembers(self, lambda a: not (inspect.isroutine(a)))
attributes = [
a for a in attributes if not (a[0].startswith("__") and a[0].endswith("__"))
]
attributes = [a for a in attributes if not a[0].startswith("_abc_")]
return attributes
def _get_public_attributes(self):
public_attributes = self._get_user_attributes()
public_attributes = {a[0]: a[1] for a in public_attributes if a[0][-1] == "_"}
return public_attributes
def save(
self,
dir_path: str,
overwrite: bool = False,
save_anndata: bool = False,
**anndata_write_kwargs,
):
"""Save the state of the model.
Neither the trainer optimizer state nor the trainer history are saved.
Parameters
----------
dir_path
Path to a directory.
overwrite
Overwrite existing data or not. If `False` and directory
already exists at `dir_path`, error will be raised.
save_anndata
If True, also saves the anndata
anndata_write_kwargs
Kwargs for anndata write function
"""
# get all the public attributes
public_attributes = self._get_public_attributes()
# save the model state dict and the trainer state dict only
if not os.path.exists(dir_path) or overwrite:
os.makedirs(dir_path, exist_ok=overwrite)
else:
raise ValueError(
"{} already exists. Please provide an unexisting directory for saving.".format(
dir_path
)
)
if save_anndata:
self.adata.write(
os.path.join(dir_path, "adata.h5ad"), **anndata_write_kwargs
)
model_save_path = os.path.join(dir_path, "model_params.pt")
attr_save_path = os.path.join(dir_path, "attr.pkl")
varnames_save_path = os.path.join(dir_path, "var_names.csv")
var_names = self.adata.var_names.astype(str)
var_names = var_names.to_numpy()
np.savetxt(varnames_save_path, var_names, fmt="%s")
torch.save(self.model.state_dict(), model_save_path)
with open(attr_save_path, "wb") as f:
pickle.dump(public_attributes, f)
def _load_expand_params_from_dict(self, state_dict):
load_state_dict = state_dict.copy()
device = next(self.model.parameters()).device
new_state_dict = self.model.state_dict()
for key, load_ten in load_state_dict.items():
new_ten = new_state_dict[key]
if new_ten.size() == load_ten.size():
continue
# new categoricals changed size
else:
load_ten = load_ten.to(device)
# only one dim diff
new_shape = new_ten.shape
n_dims = len(new_shape)
sel = [slice(None)] * n_dims
for i in range(n_dims):
dim_diff = new_shape[i] - load_ten.shape[i]
axs = i
sel[i] = slice(-dim_diff, None)
if dim_diff > 0:
break
fixed_ten = torch.cat([load_ten, new_ten[tuple(sel)]], dim=axs)
load_state_dict[key] = fixed_ten
for key, ten in new_state_dict.items():
if key not in load_state_dict:
load_state_dict[key] = ten
self.model.load_state_dict(load_state_dict)
@classmethod
def _load_params(cls, dir_path: str, map_location: Optional[str] = None):
setup_dict_path = os.path.join(dir_path, "attr.pkl")
model_path = os.path.join(dir_path, "model_params.pt")
varnames_path = os.path.join(dir_path, "var_names.csv")
with open(setup_dict_path, "rb") as handle:
attr_dict = pickle.load(handle)
model_state_dict = torch.load(model_path, map_location=map_location)
var_names = np.genfromtxt(varnames_path, delimiter=",", dtype=str)
return attr_dict, model_state_dict, var_names
@classmethod
def load(
cls,
dir_path: str,
adata: Optional[AnnData] = None,
map_location = None
):
"""Instantiate a model from the saved output.
Parameters
----------
dir_path
Path to saved outputs.
adata
AnnData object.
If None, will check for and load anndata saved with the model.
map_location
a function, torch.device, string or a dict specifying
how to remap storage locations
Returns
-------
Model with loaded state dictionaries.
"""
adata_path = os.path.join(dir_path, "adata.h5ad")
load_adata = adata is None
if os.path.exists(adata_path) and load_adata:
adata = read(adata_path)
elif not os.path.exists(adata_path) and load_adata:
raise ValueError("Save path contains no saved anndata and no adata was passed.")
attr_dict, model_state_dict, var_names = cls._load_params(dir_path, map_location)
# Overwrite adata with new genes
adata = _validate_var_names(adata, var_names)
cls._validate_adata(adata, attr_dict)
init_params = cls._get_init_params_from_dict(attr_dict)
model = cls(adata, **init_params)
model.model.to(next(iter(model_state_dict.values())).device)
model.model.load_state_dict(model_state_dict)
model.model.eval()
model.is_trained_ = attr_dict['is_trained_']
return model
class SurgeryMixin:
@classmethod
def load_query_data(
cls,
adata: AnnData,
reference_model: Union[str, 'Model'],
freeze: bool = True,
freeze_expression: bool = True,
remove_dropout: bool = True,
map_location = None,
**kwargs
):
"""Transfer Learning function for new data. Uses old trained model and expands it for new conditions.
Parameters
----------
adata
Query anndata object.
reference_model
A model to expand or a path to a model folder.
freeze: Boolean
If 'True' freezes every part of the network except the first layers of encoder/decoder.
freeze_expression: Boolean
If 'True' freeze every weight in first layers except the condition weights.
remove_dropout: Boolean
If 'True' remove Dropout for Transfer Learning.
map_location
map_location to remap storage locations (as in '.load') of 'reference_model'.
Only taken into account if 'reference_model' is a path to a model on disk.
kwargs
kwargs for the initialization of the query model.
Returns
-------
new_model
New model to train on query data.
"""
if isinstance(reference_model, str):
attr_dict, model_state_dict, var_names = cls._load_params(reference_model, map_location)
adata = _validate_var_names(adata, var_names)
else:
attr_dict = reference_model._get_public_attributes()
model_state_dict = reference_model.model.state_dict()
adata = _validate_var_names(adata, reference_model.adata.var_names)
init_params = deepcopy(cls._get_init_params_from_dict(attr_dict))
conditions = init_params['conditions']
condition_key = init_params['condition_key']
new_conditions = []
adata_conditions = adata.obs[condition_key].unique().tolist()
# Check if new conditions are already known
for item in adata_conditions:
if item not in conditions:
new_conditions.append(item)
# Add new conditions to overall conditions
for condition in new_conditions:
conditions.append(condition)
if remove_dropout:
init_params['dr_rate'] = 0.0
init_params.update(kwargs)
new_model = cls(adata, **init_params)
new_model.model.to(next(iter(model_state_dict.values())).device)
new_model._load_expand_params_from_dict(model_state_dict)
if freeze:
new_model.model.freeze = True
for name, p in new_model.model.named_parameters():
p.requires_grad = False
if 'theta' in name:
p.requires_grad = True
if freeze_expression:
if 'cond_L.weight' in name:
p.requires_grad = True
else:
if "L0" in name or "N0" in name:
p.requires_grad = True
return new_model
class CVAELatentsMixin:
def get_latent(
self,
x: Optional[np.ndarray] = None,
c: Optional[np.ndarray] = None,
mean: bool = False,
mean_var: bool = False
):
"""Map `x` in to the latent space. This function will feed data in encoder and return z for each sample in
data.
Parameters
----------
x
Numpy nd-array to be mapped to latent space. `x` has to be in shape [n_obs, input_dim].
If None, then `self.adata.X` is used.
c
`numpy nd-array` of original (unencoded) desired labels for each sample.
mean
return mean instead of random sample from the latent space
mean_var
return mean and variance instead of random sample from the latent space
if `mean=False`.
Returns
-------
Returns array containing latent space encoding of 'x'.
"""
device = next(self.model.parameters()).device
if x is None and c is None:
x = self.adata.X
if self.conditions_ is not None:
c = self.adata.obs[self.condition_key_]
if c is not None:
c = np.asarray(c)
if not set(c).issubset(self.conditions_):
raise ValueError("Incorrect conditions")
labels = np.zeros(c.shape[0])
for condition, label in self.model.condition_encoder.items():
labels[c == condition] = label
c = torch.tensor(labels, device=device)
latents = []
indices = torch.arange(x.shape[0])
subsampled_indices = indices.split(512)
for batch in subsampled_indices:
x_batch = x[batch, :]
if issparse(x_batch):
x_batch = x_batch.toarray()
x_batch = torch.tensor(x_batch, device=device)
latent = self.model.get_latent(x_batch, c[batch], mean, mean_var)
latent = (latent,) if not isinstance(latent, tuple) else latent
latents += [tuple(l.cpu().detach() for l in latent)]
result = tuple(np.array(torch.cat(l)) for l in zip(*latents))
result = result[0] if len(result) == 1 else result
return result
def get_y(
self,
x: Optional[np.ndarray] = None,
c: Optional[np.ndarray] = None,
):
"""Map `x` in to the latent space. This function will feed data in encoder and return z for each sample in
data.
Parameters
----------
x
Numpy nd-array to be mapped to latent space. `x` has to be in shape [n_obs, input_dim].
If None, then `self.adata.X` is used.
c
`numpy nd-array` of original (unencoded) desired labels for each sample.
Returns
-------
Returns array containing output of first decoder layer.
"""
device = next(self.model.parameters()).device
if x is None and c is None:
x = self.adata.X
if self.conditions_ is not None:
c = self.adata.obs[self.condition_key_]
if c is not None:
c = np.asarray(c)
if not set(c).issubset(self.conditions_):
raise ValueError("Incorrect conditions")
labels = np.zeros(c.shape[0])
for condition, label in self.model.condition_encoder.items():
labels[c == condition] = label
c = torch.tensor(labels, device=device)
latents = []
indices = torch.arange(x.shape[0])
subsampled_indices = indices.split(512)
for batch in subsampled_indices:
x_batch = x[batch, :]
if issparse(x_batch):
x_batch = x_batch.toarray()
x_batch = torch.tensor(x_batch, device=device)
latent = self.model.get_y(x_batch, c[batch])
latents += [latent.cpu().detach()]
return np.array(torch.cat(latents))
class CVAELatentsModelMixin:
def sampling(self, mu, log_var):
"""Samples from standard Normal distribution and applies re-parametrization trick.
It is actually sampling from latent space distributions with N(mu, var), computed by encoder.
Parameters
----------
mu: torch.Tensor
Torch Tensor of Means.
log_var: torch.Tensor
Torch Tensor of log. variances.
Returns
-------
Torch Tensor of sampled data.
"""
var = torch.exp(log_var) + 1e-4
return Normal(mu, var.sqrt()).rsample()
def get_latent(self, x, c=None, mean=False, mean_var=False):
"""Map `x` in to the latent space. This function will feed data in encoder and return z for each sample in
data.
Parameters
----------
x: torch.Tensor
Torch Tensor to be mapped to latent space. `x` has to be in shape [n_obs, input_dim].
c: torch.Tensor
Torch Tensor of condition labels for each sample.
mean: boolean
Returns
-------
Returns Torch Tensor containing latent space encoding of 'x'.
"""
x_ = torch.log(1 + x)
if self.recon_loss == 'mse':
x_ = x
z_mean, z_log_var = self.encoder(x_, c)
latent = self.sampling(z_mean, z_log_var)
if mean:
return z_mean
elif mean_var:
return (z_mean, torch.exp(z_log_var) + 1e-4)
return latent
def get_y(self, x, c=None):
"""Map `x` in to the y dimension (First Layer of Decoder). This function will feed data in encoder and return
y for each sample in data.
Parameters
----------
x: torch.Tensor
Torch Tensor to be mapped to latent space. `x` has to be in shape [n_obs, input_dim].
c: torch.Tensor
Torch Tensor of condition labels for each sample.
Returns
-------
Returns Torch Tensor containing output of first decoder layer.
"""
x_ = torch.log(1 + x)
if self.recon_loss == 'mse':
x_ = x
z_mean, z_log_var = self.encoder(x_, c)
latent = self.sampling(z_mean, z_log_var)
output = self.decoder(latent, c)
return output[-1]
|
/scArches-0.5.9.tar.gz/scArches-0.5.9/scarches/models/base/_base.py
| 0.863204 | 0.24495 |
_base.py
|
pypi
|
import inspect
import os
import torch
import pickle
import numpy as np
import pandas as pd
from anndata import AnnData, read
from copy import deepcopy
from typing import Optional, Union
from .expimap import expiMap
from ...trainers import expiMapTrainer
from ..base._utils import _validate_var_names
from ..base._base import BaseMixin, SurgeryMixin, CVAELatentsMixin
class EXPIMAP(BaseMixin, SurgeryMixin, CVAELatentsMixin):
"""Model for scArches class. This class contains the implementation of Conditional Variational Auto-encoder.
Parameters
----------
adata: : `~anndata.AnnData`
Annotated data matrix. Has to be count data for 'nb' and 'zinb' loss and normalized log transformed data
for 'mse' loss.
condition_key: String
column name of conditions in `adata.obs` data frame.
conditions: List
List of Condition names that the used data will contain to get the right encoding when used after reloading.
hidden_layer_sizes: List
A list of hidden layer sizes for encoder network. Decoder network will be the reversed order.
latent_dim: Integer
Bottleneck layer (z) size.
dr_rate: Float
Dropput rate applied to all layers, if `dr_rate`==0 no dropout will be applied.
recon_loss: String
Definition of Reconstruction-Loss-Method, 'mse' or 'nb'.
use_l_encoder: Boolean
If True and `decoder_last_layer`='softmax', libary size encoder is used.
use_bn: Boolean
If `True` batch normalization will be applied to layers.
use_ln: Boolean
If `True` layer normalization will be applied to layers.
mask: Array or List
if not None, an array of 0s and 1s from utils.add_annotations to create VAE with a masked linear decoder.
mask_key: String
A key in `adata.varm` for the mask if the mask is not provided.
decoder_last_layer: String or None
The last layer of the decoder. Must be 'softmax' (default for 'nb' loss), identity(default for 'mse' loss),
'softplus', 'exp' or 'relu'.
soft_mask: Boolean
Use soft mask option. If True, the model will enforce mask with L1 regularization
instead of multipling weight of the linear decoder by the binary mask.
n_ext: Integer
Number of unconstarined extension terms.
Used for query mapping.
n_ext_m: Integer
Number of constrained extension terms.
Used for query mapping.
use_hsic: Boolean
If True, add HSIC regularization for unconstarined extension terms.
Used for query mapping.
hsic_one_vs_all: Boolean
If True, calculates the sum of HSIC losses for each unconstarined term vs the other terms.
If False, calculates HSIC for all unconstarined terms vs the other terms.
Used for query mapping.
ext_mask: Array or List
Mask (similar to the mask argument) for unconstarined extension terms.
Used for query mapping.
soft_ext_mask: Boolean
Use the soft mask mode for training with the constarined extension terms.
Used for query mapping.
"""
def __init__(
self,
adata: AnnData,
condition_key: str = None,
conditions: Optional[list] = None,
hidden_layer_sizes: list = [256, 256],
dr_rate: float = 0.05,
recon_loss: str = 'nb',
use_l_encoder: bool = False,
use_bn: bool = False,
use_ln: bool = True,
mask: Optional[Union[np.ndarray, list]] = None,
mask_key: str = 'I',
decoder_last_layer: Optional[str] = None,
soft_mask: bool = False,
n_ext: int = 0,
n_ext_m: int = 0,
use_hsic: bool = False,
hsic_one_vs_all: bool = False,
ext_mask: Optional[Union[np.ndarray, list]] = None,
soft_ext_mask: bool = False
):
self.adata = adata
if mask is None and mask_key not in self.adata.varm:
raise ValueError('Please provide mask.')
self.condition_key_ = condition_key
if conditions is None:
if condition_key is not None:
self.conditions_ = adata.obs[condition_key].unique().tolist()
else:
self.conditions_ = []
else:
self.conditions_ = conditions
self.hidden_layer_sizes_ = hidden_layer_sizes
self.dr_rate_ = dr_rate
self.recon_loss_ = recon_loss
self.use_bn_ = use_bn
self.use_ln_ = use_ln
self.input_dim_ = adata.n_vars
self.use_l_encoder_ = use_l_encoder
self.decoder_last_layer_ = decoder_last_layer
if mask is None:
mask = adata.varm[mask_key].T
self.mask_ = mask if isinstance(mask, list) else mask.tolist()
mask = torch.tensor(mask).float()
self.latent_dim_ = len(self.mask_)
self.ext_mask_ = None
if ext_mask is not None:
self.ext_mask_ = ext_mask if isinstance(ext_mask, list) else ext_mask.tolist()
ext_mask = torch.tensor(ext_mask).float()
self.n_ext_ = n_ext
self.n_ext_m_ = n_ext_m
self.soft_mask_ = soft_mask
self.soft_ext_mask_ = soft_ext_mask
self.use_hsic_ = use_hsic and n_ext > 0
self.hsic_one_vs_all_ = hsic_one_vs_all
self.model = expiMap(
self.input_dim_,
self.latent_dim_,
mask,
self.conditions_,
self.hidden_layer_sizes_,
self.dr_rate_,
self.recon_loss_,
self.use_l_encoder_,
self.use_bn_,
self.use_ln_,
self.decoder_last_layer_,
self.soft_mask_,
self.n_ext_,
self.n_ext_m_,
self.use_hsic_,
self.hsic_one_vs_all_,
ext_mask,
self.soft_ext_mask_
)
self.is_trained_ = False
self.trainer = None
def train(
self,
n_epochs: int = 400,
lr: float = 1e-3,
eps: float = 0.01,
alpha: Optional[float] = None,
omega: Optional[torch.Tensor] = None,
**kwargs
):
"""Train the model.
Parameters
----------
n_epochs: Integer
Number of epochs for training the model.
lr: Float
Learning rate for training the model.
eps: Float
torch.optim.Adam eps parameter
alpha_kl: Float
Multiplies the KL divergence part of the loss. Set to 0.35 by default.
alpha_epoch_anneal: Integer or None
If not 'None', the KL Loss scaling factor (alpha_kl) will be annealed from 0 to 1 every epoch until the input
integer is reached. By default is set to 130 epochs or to n_epochs if n_epochs < 130.
alpha: Float
Group Lasso regularization coefficient
omega: Tensor or None
If not 'None', vector of coefficients for each group
alpha_l1: Float
L1 regularization coefficient for the soft mask of reference (old) and new constrained terms.
Specifies the strength for deactivating the genes which are not in the corresponding annotations \ groups
in the mask.
alpha_l1_epoch_anneal: Integer
If not 'None', the alpha_l1 scaling factor will be annealed from 0 to 1 every 'alpha_l1_anneal_each' epochs
until the input integer is reached.
alpha_l1_anneal_each: Integer
Anneal alpha_l1 every alpha_l1_anneal_each'th epoch, i.e. for 5 (default)
do annealing every 5th epoch.
gamma_ext: Float
L1 regularization coefficient for the new unconstrained terms. Specifies the strength of
sparcity enforcement.
gamma_epoch_anneal: Integer
If not 'None', the gamma_ext scaling factor will be annealed from 0 to 1 every 'gamma_anneal_each' epochs
until the input integer is reached.
gamma_anneal_each: Integer
Anneal gamma_ext every gamma_anneal_each'th epoch, i.e. for 5 (default)
do annealing every 5th epoch.
beta: Float
HSIC regularization coefficient for the unconstrained terms.
Multiplies the HSIC loss terms if not 'None'.
kwargs
kwargs for the expiMap trainer.
"""
if "alpha_kl" not in kwargs:
print("The default value of alpha_kl was changed to 0.35. from 1. "
"This may case inconsistency with previous training results. Set alpha_kl=1. to reproduce the previous results.")
kwargs["alpha_kl"] = 0.35
if "alpha_epoch_anneal" not in kwargs:
print("alpha_epoch_anneal is used by default now. "
"This may case inconsistency with previous training results. Set alpha_epoch_anneal=None to reproduce the previous results.")
epochs_anneal = 130
if n_epochs < 130:
epochs_anneal = n_epochs
kwargs["alpha_epoch_anneal"] = epochs_anneal
self.trainer = expiMapTrainer(
self.model,
self.adata,
alpha=alpha,
omega=omega,
condition_key=self.condition_key_,
**kwargs
)
self.trainer.train(n_epochs, lr, eps)
self.is_trained_ = True
def nonzero_terms(self):
"""Return indices of active terms.
Active terms are the terms which were not deactivated by the group lasso regularization.
"""
return self.model.decoder.nonzero_terms()
def get_latent(
self,
x: Optional[np.ndarray] = None,
c: Optional[np.ndarray] = None,
only_active: bool = False,
mean: bool = False,
mean_var: bool = False
):
"""Map `x` in to the latent space. This function will feed data in encoder
and return z for each sample in data.
Parameters
----------
x
Numpy nd-array to be mapped to latent space. `x` has to be in shape [n_obs, input_dim].
If None, then `self.adata.X` is used.
c
`numpy nd-array` of original (unencoded) desired labels for each sample.
only_active
Return only the latent variables which correspond to active terms, i.e terms that
were not deactivated by the group lasso regularization.
mean
return mean instead of random sample from the latent space
mean_var
return mean and variance instead of random sample from the latent space
if `mean=False`.
Returns
-------
Returns array containing latent space encoding of 'x'.
"""
result = super().get_latent(x, c, mean, mean_var)
if not only_active:
return result
else:
active_idx = self.nonzero_terms()
if isinstance(result, tuple):
result = tuple(r[:, active_idx] for r in result)
else:
result = result[:, active_idx]
return result
def update_terms(self, terms: Union[str, list]='terms', adata=None):
"""Add extension terms' names to the terms.
"""
if isinstance(terms, str):
adata = self.adata if adata is None else adata
key = terms
terms = list(adata.uns[terms])
else:
adata = None
key = None
terms = list(terms)
lat_mask_dim = self.latent_dim_ + self.n_ext_m_
if len(terms) != self.latent_dim_ and len(terms) != lat_mask_dim + self.n_ext_:
raise ValueError('The list of terms should have the same length as the mask.')
if len(terms) == self.latent_dim_:
if self.n_ext_m_ > 0:
terms += ['constrained_' + str(i) for i in range(self.n_ext_m_)]
if self.n_ext_ > 0:
terms += ['unconstrained_' + str(i) for i in range(self.n_ext_)]
if adata is not None:
adata.uns[key] = terms
else:
return terms
def term_genes(self, term: Union[str, int], terms: Union[str, list]='terms'):
"""Return the dataframe with genes belonging to the term after training sorted by absolute weights in the decoder.
"""
if isinstance(terms, str):
terms = list(self.adata.uns[terms])
else:
terms = list(terms)
if len(terms) == self.latent_dim_:
if self.n_ext_m_ > 0:
terms += ['constrained_' + str(i) for i in range(self.n_ext_m_)]
if self.n_ext_ > 0:
terms += ['unconstrained_' + str(i) for i in range(self.n_ext_)]
lat_mask_dim = self.latent_dim_ + self.n_ext_m_
if len(terms) != self.latent_dim_ and len(terms) != lat_mask_dim + self.n_ext_:
raise ValueError('The list of terms should have the same length as the mask.')
term = terms.index(term) if isinstance(term, str) else term
if term < self.latent_dim_:
weights = self.model.decoder.L0.expr_L.weight[:, term].data.cpu().numpy()
mask_idx = self.mask_[term]
elif term >= lat_mask_dim:
term -= lat_mask_dim
weights = self.model.decoder.L0.ext_L.weight[:, term].data.cpu().numpy()
mask_idx = None
else:
term -= self.latent_dim_
weights = self.model.decoder.L0.ext_L_m.weight[:, term].data.cpu().numpy()
mask_idx = self.ext_mask_[term]
abs_weights = np.abs(weights)
srt_idx = np.argsort(abs_weights)[::-1][:(abs_weights > 0).sum()]
result = pd.DataFrame()
result['genes'] = self.adata.var_names[srt_idx].tolist()
result['weights'] = weights[srt_idx]
result['in_mask'] = False
if mask_idx is not None:
in_mask = np.isin(srt_idx, np.where(mask_idx)[0])
result['in_mask'][in_mask] = True
return result
def mask_genes(self, terms: Union[str, list]='terms'):
"""Return lists of genes belonging to the terms in the mask.
"""
if isinstance(terms, str):
terms = list(self.adata.uns[terms])
else:
terms = list(terms)
I = np.array(self.mask_)
if self.n_ext_m_ > 0:
I = np.concatenate((I, self.ext_mask_))
if len(terms) == self.latent_dim_:
terms += ['constrained_' + str(i) for i in range(self.n_ext_m_)]
elif len(terms) == self.latent_dim_ + self.n_ext_m_ + self.n_ext_:
terms = terms[:(self.latent_dim_ + self.n_ext_m_)]
else:
raise ValueError('The list of terms should have the same length as the mask.')
I = I.astype(bool)
return {term: self.adata.var_names[I[i]].tolist() for i, term in enumerate(terms)}
def latent_directions(self, method="sum", get_confidence=False,
adata=None, key_added='directions'):
"""Get directions of upregulation for each latent dimension.
Multipling this by raw latent scores ensures positive latent scores correspond to upregulation.
Parameters
----------
method: String
Method of calculation, it should be 'sum' or 'counts'.
get_confidence: Boolean
Only for method='counts'. If 'True', also calculate confidence
of the directions.
adata: AnnData
An AnnData object to store dimensions. If 'None', self.adata is used.
key_added: String
key of adata.uns where to put the dimensions.
"""
if adata is None:
adata = self.adata
terms_weights = self.model.decoder.L0.expr_L.weight.data
if self.n_ext_m_ > 0:
terms_weights = torch.cat([terms_weights, self.model.decoder.L0.ext_L_m.weight.data], dim=1)
if self.n_ext_ > 0:
terms_weights = torch.cat([terms_weights, self.model.decoder.L0.ext_L.weight.data], dim=1)
if method == "sum":
signs = terms_weights.sum(0).cpu().numpy()
signs[signs>0] = 1.
signs[signs<0] = -1.
confidence = None
elif method == "counts":
num_nz = torch.count_nonzero(terms_weights, dim=0)
upreg_genes = torch.count_nonzero(terms_weights > 0, dim=0)
signs = upreg_genes / (num_nz+(num_nz==0))
signs = signs.cpu().numpy()
confidence = signs.copy()
confidence = np.abs(confidence-0.5)/0.5
confidence[num_nz==0] = 0
signs[signs>0.5] = 1.
signs[signs<0.5] = -1.
signs[signs==0.5] = 0
signs[num_nz==0] = 0
else:
raise ValueError("Unrecognized method for getting the latent direction.")
adata.uns[key_added] = signs
if get_confidence and confidence is not None:
adata.uns[key_added + '_confindence'] = confidence
def latent_enrich(
self,
groups,
comparison='rest',
n_sample=5000,
use_directions=False,
directions_key='directions',
select_terms=None,
adata=None,
exact=True,
key_added='bf_scores'
):
"""Gene set enrichment test for the latent space. Test the hypothesis that latent scores
for each term in one group (z_1) is bigger than in the other group (z_2).
Puts results to `adata.uns[key_added]`. Results are a dictionary with
`p_h0` - probability that z_1 > z_2, `p_h1 = 1-p_h0` and `bf` - bayes factors equal to `log(p_h0/p_h1)`.
Parameters
----------
groups: String or Dict
A string with the key in `adata.obs` to look for categories or a dictionary
with categories as keys and lists of cell names as values.
comparison: String
The category name to compare against. If 'rest', then compares each category against all others.
n_sample: Integer
Number of random samples to draw for each category.
use_directions: Boolean
If 'True', multiplies the latent scores by directions in `adata`.
directions_key: String
The key in `adata.uns` for directions.
select_terms: Array
If not 'None', then an index of terms to select for the test. Only does the test
for these terms.
adata: AnnData
An AnnData object to use. If 'None', uses `self.adata`.
exact: Boolean
Use exact probabilities for comparisons.
key_added: String
key of adata.uns where to put the results of the test.
"""
if adata is None:
adata = self.adata
if isinstance(groups, str):
cats_col = adata.obs[groups]
cats = cats_col.unique()
elif isinstance(groups, dict):
cats = []
all_cells = []
for group, cells in groups.items():
cats.append(group)
all_cells += cells
adata = adata[all_cells]
cats_col = pd.Series(index=adata.obs_names, dtype=str)
for group, cells in groups.items():
cats_col[cells] = group
else:
raise ValueError("groups should be a string or a dict.")
if comparison != "rest" and isinstance(comparison, str):
comparison = [comparison]
if comparison != "rest" and not set(comparison).issubset(cats):
raise ValueError("comparison should be 'rest' or among the passed groups")
scores = {}
for cat in cats:
if cat in comparison:
continue
cat_mask = cats_col == cat
if comparison == "rest":
others_mask = ~cat_mask
else:
others_mask = cats_col.isin(comparison)
choice_1 = np.random.choice(cat_mask.sum(), n_sample)
choice_2 = np.random.choice(others_mask.sum(), n_sample)
adata_cat = adata[cat_mask][choice_1]
adata_others = adata[others_mask][choice_2]
if use_directions:
directions = adata.uns[directions_key]
else:
directions = None
z0 = self.get_latent(
adata_cat.X,
adata_cat.obs[self.condition_key_],
mean=False,
mean_var=exact
)
z1 = self.get_latent(
adata_others.X,
adata_others.obs[self.condition_key_],
mean=False,
mean_var=exact
)
if not exact:
if directions is not None:
z0 *= directions
z1 *= directions
if select_terms is not None:
z0 = z0[:, select_terms]
z1 = z1[:, select_terms]
to_reduce = z0 > z1
zeros_mask = (np.abs(z0).sum(0) == 0) | (np.abs(z1).sum(0) == 0)
else:
from scipy.special import erfc
means0, vars0 = z0
means1, vars1 = z1
if directions is not None:
means0 *= directions
means1 *= directions
if select_terms is not None:
means0 = means0[:, select_terms]
means1 = means1[:, select_terms]
vars0 = vars0[:, select_terms]
vars1 = vars1[:, select_terms]
to_reduce = (means1 - means0) / np.sqrt(2 * (vars0 + vars1))
to_reduce = 0.5 * erfc(to_reduce)
zeros_mask = (np.abs(means0).sum(0) == 0) | (np.abs(means1).sum(0) == 0)
p_h0 = np.mean(to_reduce, axis=0)
p_h1 = 1.0 - p_h0
epsilon = 1e-12
bf = np.log(p_h0 + epsilon) - np.log(p_h1 + epsilon)
p_h0[zeros_mask] = 0
p_h1[zeros_mask] = 0
bf[zeros_mask] = 0
scores[cat] = dict(p_h0=p_h0, p_h1=p_h1, bf=bf)
adata.uns[key_added] = scores
@classmethod
def load_query_data(
cls,
adata: AnnData,
reference_model: Union[str, 'TRVAE'],
freeze: bool = True,
freeze_expression: bool = True,
unfreeze_ext: bool = True,
remove_dropout: bool = True,
new_n_ext: Optional[int] = None,
new_n_ext_m: Optional[int] = None,
new_ext_mask: Optional[Union[np.ndarray, list]] = None,
new_soft_ext_mask: bool = False,
**kwargs
):
"""Transfer Learning function for new data. Uses old trained model and expands it for new conditions.
Parameters
----------
adata
Query anndata object.
reference_model
A model to expand or a path to a model folder.
freeze: Boolean
If 'True' freezes every part of the network except the first layers of encoder/decoder.
freeze_expression: Boolean
If 'True' freeze every weight in first layers except the condition weights.
remove_dropout: Boolean
If 'True' remove Dropout for Transfer Learning.
unfreeze_ext: Boolean
If 'True' do not freeze weights for new constrained and unconstrained extension terms.
new_n_ext: Integer
Number of new unconstarined extension terms to add to the reference model.
Used for query mapping.
new_n_ext_m: Integer
Number of new constrained extension terms to add to the reference model.
Used for query mapping.
new_ext_mask: Array or List
Mask (similar to the mask argument) for new unconstarined extension terms.
new_soft_ext_mask: Boolean
Use the soft mask mode for training with the constarined extension terms.
kwargs
kwargs for the initialization of the EXPIMAP class for the query model.
Returns
-------
new_model
New (query) model to train on query data.
"""
params = {}
params['adata'] = adata
params['reference_model'] = reference_model
params['freeze'] = freeze
params['freeze_expression'] = freeze_expression
params['remove_dropout'] = remove_dropout
if new_n_ext is not None:
params['n_ext'] = new_n_ext
if new_n_ext_m is not None:
params['n_ext_m'] = new_n_ext_m
if new_ext_mask is None:
raise ValueError('Provide new ext_mask')
params['ext_mask'] = new_ext_mask
params['soft_ext_mask'] = new_soft_ext_mask
params.update(kwargs)
new_model = super().load_query_data(**params)
if freeze and unfreeze_ext:
for name, p in new_model.model.named_parameters():
if 'ext_L.weight' in name or 'ext_L_m.weight' in name:
p.requires_grad = True
if 'expand_mean_encoder' in name or 'expand_var_encoder' in name:
p.requires_grad = True
return new_model
@classmethod
def _get_init_params_from_dict(cls, dct):
init_params = {
'condition_key': dct['condition_key_'],
'conditions': dct['conditions_'],
'hidden_layer_sizes': dct['hidden_layer_sizes_'],
'dr_rate': dct['dr_rate_'],
'recon_loss': dct['recon_loss_'],
'use_bn': dct['use_bn_'],
'use_ln': dct['use_ln_'],
'mask': dct['mask_'],
'decoder_last_layer': dct['decoder_last_layer_'] if 'decoder_last_layer_' in dct else "softmax",
'use_l_encoder': dct['use_l_encoder_'] if 'use_l_encoder_' in dct else False,
'n_ext': dct['n_ext_'] if 'n_ext_' in dct else 0,
'n_ext_m': dct['n_ext_m_'] if 'n_ext_m_' in dct else 0,
'soft_mask': dct['soft_mask_'] if 'soft_mask_' in dct else False,
'soft_ext_mask': dct['soft_ext_mask_'] if 'soft_ext_mask_' in dct else False,
'hsic_one_vs_all': dct['hsic_one_vs_all_'] if 'hsic_one_vs_all_' in dct else False,
'use_hsic': dct['use_hsic_'] if 'use_hsic_' in dct else False,
'ext_mask': dct['ext_mask_'] if 'ext_mask_' in dct else None
}
return init_params
@classmethod
def _validate_adata(cls, adata, dct):
if adata.n_vars != dct['input_dim_']:
raise ValueError("Incorrect var dimension")
adata_conditions = adata.obs[dct['condition_key_']].unique().tolist()
if not set(adata_conditions).issubset(dct['conditions_']):
raise ValueError("Incorrect conditions")
|
/scArches-0.5.9.tar.gz/scArches-0.5.9/scarches/models/expimap/expimap_model.py
| 0.902888 | 0.418222 |
expimap_model.py
|
pypi
|
import torch
import torch.nn as nn
import numpy as np
from typing import Optional
from ..trvae._utils import one_hot_encoder
class MaskedLinear(nn.Linear):
def __init__(self, n_in, n_out, mask, bias=True):
# mask should have the same dimensions as the transposed linear weight
# n_input x n_output_nodes
if n_in != mask.shape[0] or n_out != mask.shape[1]:
raise ValueError('Incorrect shape of the mask.')
super().__init__(n_in, n_out, bias)
self.register_buffer('mask', mask.t())
# zero out the weights for group lasso
# gradient descent won't change these zero weights
self.weight.data*=self.mask
def forward(self, input):
return nn.functional.linear(input, self.weight*self.mask, self.bias)
class MaskedCondLayers(nn.Module):
def __init__(
self,
n_in: int,
n_out: int,
n_cond: int,
bias: bool,
n_ext: int = 0,
n_ext_m: int = 0,
mask: Optional[torch.Tensor] = None,
ext_mask: Optional[torch.Tensor] = None
):
super().__init__()
self.n_cond = n_cond
self.n_ext = n_ext
self.n_ext_m = n_ext_m
if mask is None:
self.expr_L = nn.Linear(n_in, n_out, bias=bias)
else:
self.expr_L = MaskedLinear(n_in, n_out, mask, bias=bias)
if self.n_cond != 0:
self.cond_L = nn.Linear(self.n_cond, n_out, bias=False)
if self.n_ext != 0:
self.ext_L = nn.Linear(self.n_ext, n_out, bias=False)
if self.n_ext_m != 0:
if ext_mask is not None:
self.ext_L_m = MaskedLinear(self.n_ext_m, n_out, ext_mask, bias=False)
else:
self.ext_L_m = nn.Linear(self.n_ext_m, n_out, bias=False)
def forward(self, x: torch.Tensor):
if self.n_cond == 0:
expr, cond = x, None
else:
expr, cond = torch.split(x, [x.shape[1] - self.n_cond, self.n_cond], dim=1)
if self.n_ext == 0:
ext = None
else:
expr, ext = torch.split(expr, [expr.shape[1] - self.n_ext, self.n_ext], dim=1)
if self.n_ext_m == 0:
ext_m = None
else:
expr, ext_m = torch.split(expr, [expr.shape[1] - self.n_ext_m, self.n_ext_m], dim=1)
out = self.expr_L(expr)
if ext is not None:
out = out + self.ext_L(ext)
if ext_m is not None:
out = out + self.ext_L_m(ext_m)
if cond is not None:
out = out + self.cond_L(cond)
return out
class MaskedLinearDecoder(nn.Module):
def __init__(self, in_dim, out_dim, n_cond, mask, ext_mask, recon_loss,
last_layer=None, n_ext=0, n_ext_m=0):
super().__init__()
if recon_loss == "mse":
if last_layer == "softmax":
raise ValueError("Can't specify softmax last layer with mse loss.")
last_layer = "identity" if last_layer is None else last_layer
elif recon_loss == "nb":
last_layer = "softmax" if last_layer is None else last_layer
else:
raise ValueError("Unrecognized loss.")
print("Decoder Architecture:")
print("\tMasked linear layer in, ext_m, ext, cond, out: ", in_dim, n_ext_m, n_ext, n_cond, out_dim)
if mask is not None:
print('\twith hard mask.')
else:
print('\twith soft mask.')
self.n_ext = n_ext
self.n_ext_m = n_ext_m
self.n_cond = 0
if n_cond is not None:
self.n_cond = n_cond
self.L0 = MaskedCondLayers(in_dim, out_dim, n_cond, bias=False, n_ext=n_ext, n_ext_m=n_ext_m,
mask=mask, ext_mask=ext_mask)
if last_layer == "softmax":
self.mean_decoder = nn.Softmax(dim=-1)
elif last_layer == "softplus":
self.mean_decoder = nn.Softplus()
elif last_layer == "exp":
self.mean_decoder = torch.exp
elif last_layer == "relu":
self.mean_decoder = nn.ReLU()
elif last_layer == "identity":
self.mean_decoder = lambda a: a
else:
raise ValueError("Unrecognized last layer.")
print("Last Decoder layer:", last_layer)
def forward(self, z, batch=None):
if batch is not None:
batch = one_hot_encoder(batch, n_cls=self.n_cond)
z_cat = torch.cat((z, batch), dim=-1)
dec_latent = self.L0(z_cat)
else:
dec_latent = self.L0(z)
recon_x = self.mean_decoder(dec_latent)
return recon_x, dec_latent
def nonzero_terms(self):
v = self.L0.expr_L.weight.data
nz = (v.norm(p=1, dim=0)>0).cpu().numpy()
nz = np.append(nz, np.full(self.n_ext_m, True))
nz = np.append(nz, np.full(self.n_ext, True))
return nz
def n_inactive_terms(self):
n = (~self.nonzero_terms()).sum()
return int(n)
class ExtEncoder(nn.Module):
def __init__(self,
layer_sizes: list,
latent_dim: int,
use_bn: bool,
use_ln: bool,
use_dr: bool,
dr_rate: float,
num_classes: Optional[int] = None,
n_expand: int = 0):
super().__init__()
self.n_classes = 0
self.n_expand = n_expand
if num_classes is not None:
self.n_classes = num_classes
self.FC = None
if len(layer_sizes) > 1:
print("Encoder Architecture:")
self.FC = nn.Sequential()
for i, (in_size, out_size) in enumerate(zip(layer_sizes[:-1], layer_sizes[1:])):
if i == 0:
print("\tInput Layer in, out and cond:", in_size, out_size, self.n_classes)
self.FC.add_module(name="L{:d}".format(i), module=MaskedCondLayers(in_size,
out_size,
self.n_classes,
bias=True))
else:
print("\tHidden Layer", i, "in/out:", in_size, out_size)
self.FC.add_module(name="L{:d}".format(i), module=nn.Linear(in_size, out_size, bias=True))
if use_bn:
self.FC.add_module("N{:d}".format(i), module=nn.BatchNorm1d(out_size, affine=True))
elif use_ln:
self.FC.add_module("N{:d}".format(i), module=nn.LayerNorm(out_size, elementwise_affine=False))
self.FC.add_module(name="A{:d}".format(i), module=nn.ReLU())
if use_dr:
self.FC.add_module(name="D{:d}".format(i), module=nn.Dropout(p=dr_rate))
print("\tMean/Var Layer in/out:", layer_sizes[-1], latent_dim)
self.mean_encoder = nn.Linear(layer_sizes[-1], latent_dim)
self.log_var_encoder = nn.Linear(layer_sizes[-1], latent_dim)
if self.n_expand != 0:
print("\tExpanded Mean/Var Layer in/out:", layer_sizes[-1], self.n_expand)
self.expand_mean_encoder = nn.Linear(layer_sizes[-1], self.n_expand)
self.expand_var_encoder = nn.Linear(layer_sizes[-1], self.n_expand)
def forward(self, x, batch=None):
if batch is not None:
batch = one_hot_encoder(batch, n_cls=self.n_classes)
x = torch.cat((x, batch), dim=-1)
if self.FC is not None:
x = self.FC(x)
means = self.mean_encoder(x)
log_vars = self.log_var_encoder(x)
if self.n_expand != 0:
means = torch.cat((means, self.expand_mean_encoder(x)), dim=-1)
log_vars = torch.cat((log_vars, self.expand_var_encoder(x)), dim=-1)
return means, log_vars
|
/scArches-0.5.9.tar.gz/scArches-0.5.9/scarches/models/expimap/modules.py
| 0.918781 | 0.433562 |
modules.py
|
pypi
|
from typing import Optional
import torch
import torch.nn as nn
from torch.distributions import Normal, kl_divergence
import torch.nn.functional as F
from .modules import MaskedLinearDecoder, ExtEncoder
from ..trvae.losses import mse, nb
from .losses import hsic
from ..trvae._utils import one_hot_encoder
from ..base._base import CVAELatentsModelMixin
class expiMap(nn.Module, CVAELatentsModelMixin):
"""ScArches model class. This class contains the implementation of Conditional Variational Auto-encoder.
Parameters
----------
input_dim: Integer
Number of input features (i.e. gene in case of scRNA-seq).
conditions: List
List of Condition names that the used data will contain to get the right encoding when used after reloading.
hidden_layer_sizes: List
A list of hidden layer sizes for encoder network. Decoder network will be the reversed order.
latent_dim: Integer
Bottleneck layer (z) size.
dr_rate: Float
Dropput rate applied to all layers, if `dr_rate`=0 no dropout will be applied.
recon_loss: String
Definition of Reconstruction-Loss-Method, 'mse' or 'nb'.
use_l_encoder: Boolean
If True and `decoder_last_layer`='softmax', libary size encoder is used.
use_bn: Boolean
If `True` batch normalization will be applied to layers.
use_ln: Boolean
If `True` layer normalization will be applied to layers.
mask: Tensor or None
if not None, Tensor of 0s and 1s from utils.add_annotations to create VAE with a masked linear decoder.
decoder_last_layer: String or None
The last layer of the decoder. Must be 'softmax' (default for 'nb' loss), identity(default for 'mse' loss),
'softplus', 'exp' or 'relu'.
"""
def __init__(self,
input_dim: int,
latent_dim: int,
mask: torch.Tensor,
conditions: list,
hidden_layer_sizes: list = [256, 256],
dr_rate: float = 0.05,
recon_loss: str = 'nb',
use_l_encoder: bool = False,
use_bn: bool = False,
use_ln: bool = True,
decoder_last_layer: Optional[str] = None,
soft_mask: bool = False,
n_ext: int = 0,
n_ext_m: int = 0,
use_hsic: bool = False,
hsic_one_vs_all: bool = False,
ext_mask: Optional[torch.Tensor] = None,
soft_ext_mask: bool = False
):
super().__init__()
assert isinstance(hidden_layer_sizes, list)
assert isinstance(latent_dim, int)
assert isinstance(conditions, list)
assert recon_loss in ["mse", "nb"], "'recon_loss' must be 'mse' or 'nb'"
print("\nINITIALIZING NEW NETWORK..............")
self.input_dim = input_dim
self.latent_dim = latent_dim
self.n_conditions = len(conditions)
self.conditions = conditions
self.condition_encoder = {k: v for k, v in zip(conditions, range(len(conditions)))}
self.recon_loss = recon_loss
self.freeze = False
self.use_bn = use_bn
self.use_ln = use_ln
self.use_mmd = False
self.n_ext_encoder = n_ext + n_ext_m
self.n_ext_decoder = n_ext
self.n_ext_m_decoder = n_ext_m
self.use_hsic = use_hsic and self.n_ext_decoder > 0
self.hsic_one_vs_all = hsic_one_vs_all
self.soft_mask = soft_mask and mask is not None
self.soft_ext_mask = soft_ext_mask and ext_mask is not None
if decoder_last_layer is None:
if recon_loss == 'nb':
self.decoder_last_layer = 'softmax'
else:
self.decoder_last_layer = 'identity'
else:
self.decoder_last_layer = decoder_last_layer
self.use_l_encoder = use_l_encoder
self.dr_rate = dr_rate
if self.dr_rate > 0:
self.use_dr = True
else:
self.use_dr = False
if recon_loss == "nb":
self.theta = torch.nn.Parameter(torch.randn(self.input_dim, self.n_conditions))
else:
self.theta = None
self.hidden_layer_sizes = hidden_layer_sizes
encoder_layer_sizes = self.hidden_layer_sizes.copy()
encoder_layer_sizes.insert(0, self.input_dim)
decoder_layer_sizes = self.hidden_layer_sizes.copy()
decoder_layer_sizes.reverse()
decoder_layer_sizes.append(self.input_dim)
self.cell_type_encoder = None
self.encoder = ExtEncoder(encoder_layer_sizes,
self.latent_dim,
self.use_bn,
self.use_ln,
self.use_dr,
self.dr_rate,
self.n_conditions,
self.n_ext_encoder)
if self.soft_mask:
self.n_inact_genes = (1-mask).sum().item()
soft_shape = mask.shape
if soft_shape[0] != latent_dim or soft_shape[1] != input_dim:
raise ValueError('Incorrect shape of the soft mask.')
self.mask = mask.t()
mask = None
else:
self.mask = None
if self.soft_ext_mask:
self.n_inact_ext_genes = (1-ext_mask).sum().item()
ext_shape = ext_mask.shape
if ext_shape[0] != self.n_ext_m_decoder:
raise ValueError('Dim 0 of ext_mask should be the same as n_ext_m_decoder.')
if ext_shape[1] != self.input_dim:
raise ValueError('Dim 1 of ext_mask should be the same as input_dim.')
self.ext_mask = ext_mask.t()
ext_mask = None
else:
self.ext_mask = None
self.decoder = MaskedLinearDecoder(self.latent_dim,
self.input_dim,
self.n_conditions,
mask,
ext_mask,
self.recon_loss,
self.decoder_last_layer,
self.n_ext_decoder,
self.n_ext_m_decoder)
if self.use_l_encoder:
self.l_encoder = ExtEncoder([self.input_dim, 128],
1,
self.use_bn,
self.use_ln,
self.use_dr,
self.dr_rate,
self.n_conditions)
def forward(self, x=None, batch=None, sizefactor=None, labeled=None):
x_log = torch.log(1 + x)
if self.recon_loss == 'mse':
x_log = x
z1_mean, z1_log_var = self.encoder(x_log, batch)
z1 = self.sampling(z1_mean, z1_log_var)
outputs = self.decoder(z1, batch)
if self.recon_loss == "mse":
recon_x, y1 = outputs
recon_loss = mse(recon_x, x_log).sum(dim=-1).mean()
elif self.recon_loss == "nb":
if self.use_l_encoder and self.decoder_last_layer == "softmax":
sizefactor = torch.exp(self.sampling(*self.l_encoder(x_log, batch))).flatten()
dec_mean_gamma, y1 = outputs
size_factor_view = sizefactor.unsqueeze(1).expand(dec_mean_gamma.size(0), dec_mean_gamma.size(1))
if self.decoder_last_layer == "softmax":
dec_mean = dec_mean_gamma * size_factor_view
else:
dec_mean = dec_mean_gamma
dispersion = F.linear(one_hot_encoder(batch, self.n_conditions), self.theta)
dispersion = torch.exp(dispersion)
recon_loss = -nb(x=x, mu=dec_mean, theta=dispersion).sum(dim=-1).mean()
z1_var = torch.exp(z1_log_var) + 1e-4
kl_div = kl_divergence(
Normal(z1_mean, torch.sqrt(z1_var)),
Normal(torch.zeros_like(z1_mean), torch.ones_like(z1_var))
).sum(dim=1).mean()
if self.use_hsic:
if not self.hsic_one_vs_all:
z_ann = z1[:, :-self.n_ext_decoder]
z_ext = z1[:, -self.n_ext_decoder:]
hsic_loss = hsic(z_ann, z_ext)
else:
hsic_loss = 0.
sz = self.latent_dim + self.n_ext_encoder
shift = self.latent_dim + self.n_ext_m_decoder
for i in range(self.n_ext_decoder):
sel_cols = torch.full((sz,), True, device=z1.device)
sel_cols[shift + i] = False
rest = z1[:, sel_cols]
term = z1[:, ~sel_cols]
hsic_loss = hsic_loss + hsic(term, rest)
else:
hsic_loss = torch.tensor(0.0, device=z1.device)
return recon_loss, kl_div, hsic_loss
|
/scArches-0.5.9.tar.gz/scArches-0.5.9/scarches/models/expimap/expimap.py
| 0.932199 | 0.575111 |
expimap.py
|
pypi
|
import torch
import torch.nn as nn
class Encoder(nn.Module):
"""
Constructs the encoder sub-network of VAE. This class implements the
encoder part of Variational Auto-encoder. It will transform primary
data in the `n_vars` dimension-space to means and log variances of `z_dimension` latent space.
Parameters
----------
x_dimension: integer
number of gene expression space dimensions.
layer_sizes: List
List of hidden layer sizes.
z_dimension: integer
number of latent space dimensions.
dropout_rate: float
dropout rate
"""
def __init__(self, x_dimension: int, layer_sizes: list, z_dimension: int, dropout_rate: float):
super().__init__() # to run nn.Module's init method
# encoder architecture
self.FC = None
if len(layer_sizes) > 1:
print("Encoder Architecture:")
self.FC = nn.Sequential()
for i, (in_size, out_size) in enumerate(zip(layer_sizes[:-1], layer_sizes[1:])):
if i == 0:
print("\tInput Layer in, out:", in_size, out_size)
self.FC.add_module(name="L{:d}".format(i), module=nn.Linear(in_size, out_size, bias=False))
else:
print("\tHidden Layer", i, "in/out:", in_size, out_size)
self.FC.add_module(name="L{:d}".format(i), module=nn.Linear(in_size, out_size, bias=False))
self.FC.add_module("N{:d}".format(i), module=nn.BatchNorm1d(out_size))
self.FC.add_module(name="A{:d}".format(i), module=nn.LeakyReLU(negative_slope=0.3))
self.FC.add_module(name="D{:d}".format(i), module=nn.Dropout(p=dropout_rate))
#self.FC = nn.ModuleList(self.FC)
print("\tMean/Var Layer in/out:", layer_sizes[-1], z_dimension)
self.mean_encoder = nn.Linear(layer_sizes[-1], z_dimension)
self.log_var_encoder = nn.Linear(layer_sizes[-1], z_dimension)
def forward(self, x: torch.Tensor):
if self.FC is not None:
x = self.FC(x)
mean = self.mean_encoder(x)
log_var = self.log_var_encoder(x)
return mean, log_var
class Decoder(nn.Module):
"""
Constructs the decoder sub-network of VAE. This class implements the
decoder part of Variational Auto-encoder. Decodes data from latent space to data space. It will transform constructed latent space to the previous space of data with n_dimensions = n_vars.
# Parameters
z_dimension: integer
number of latent space dimensions.
layer_sizes: List
List of hidden layer sizes.
x_dimension: integer
number of gene expression space dimensions.
dropout_rate: float
dropout rate
"""
def __init__(self, z_dimension: int, layer_sizes: list, x_dimension: int, dropout_rate: float):
super().__init__()
layer_sizes = [z_dimension] + layer_sizes
# decoder architecture
print("Decoder Architecture:")
# Create first Decoder layer
self.FirstL = nn.Sequential()
print("\tFirst Layer in, out", layer_sizes[0], layer_sizes[1])
self.FirstL.add_module(name="L0", module=nn.Linear(layer_sizes[0], layer_sizes[1], bias=False))
self.FirstL.add_module("N0", module=nn.BatchNorm1d(layer_sizes[1]))
self.FirstL.add_module(name="A0", module=nn.LeakyReLU(negative_slope=0.3))
self.FirstL.add_module(name="D0", module=nn.Dropout(p=dropout_rate))
# Create all Decoder hidden layers
if len(layer_sizes) > 2:
self.HiddenL = nn.Sequential()
for i, (in_size, out_size) in enumerate(zip(layer_sizes[1:-1], layer_sizes[2:])):
if i+3 < len(layer_sizes):
print("\tHidden Layer", i+1, "in/out:", in_size, out_size)
self.HiddenL.add_module(name="L{:d}".format(i+1), module=nn.Linear(in_size, out_size, bias=False))
self.HiddenL.add_module("N{:d}".format(i+1), module=nn.BatchNorm1d(out_size, affine=True))
self.HiddenL.add_module(name="A{:d}".format(i+1), module=nn.LeakyReLU(negative_slope=0.3))
self.HiddenL.add_module(name="D{:d}".format(i+1), module=nn.Dropout(p=dropout_rate))
else:
self.HiddenL = None
# Create Output Layers
print("\tOutput Layer in/out: ", layer_sizes[-2], layer_sizes[-1], "\n")
self.recon_decoder = nn.Sequential(nn.Linear(layer_sizes[-2], layer_sizes[-1]))
def forward(self, z: torch.Tensor):
dec_latent = self.FirstL(z)
# Compute Hidden Output
if self.HiddenL is not None:
x = self.HiddenL(dec_latent)
else:
x = dec_latent
# Compute Decoder Output
recon_x = self.recon_decoder(x)
return recon_x
|
/scArches-0.5.9.tar.gz/scArches-0.5.9/scarches/models/scgen/modules.py
| 0.924993 | 0.581719 |
modules.py
|
pypi
|
import torch
import torch.nn as nn
from torch.nn import functional as F
import numpy as np
from scipy import sparse
import scanpy as sc
import anndata
from .modules import Encoder, Decoder
from ._utils import balancer, extractor
class vaeArith(nn.Module):
"""ScArches model class. This class contains the implementation of Variational Auto-encoder network with Vector Arithmetics.
Parameters
----------
x_dim: Integer
Number of input features (i.e. number of gene expression space dimensions).
hidden_layer_sizes: List
A list of hidden layer sizes for encoder network. Decoder network will be the reversed order.
latent_dim: Integer
Size of the bottleneck layer (z).
dr_rate: Float
Dropout rate applied to all layers, if `dr_rate`==0 no dropout will be applied.
"""
def __init__(self, x_dim: int, hidden_layer_sizes: list = [128,128], z_dimension: int = 10, dr_rate: float = 0.05, **kwargs):
super().__init__()
assert isinstance(hidden_layer_sizes, list)
assert isinstance(z_dimension, int)
print("\nINITIALIZING NEW NETWORK..............")
self.x_dim = x_dim
self.z_dim = z_dimension
self.hidden_layer_sizes = hidden_layer_sizes
self.dr_rate = dr_rate
encoder_layer_sizes = self.hidden_layer_sizes.copy()
encoder_layer_sizes.insert(0, self.x_dim)
decoder_layer_sizes = self.hidden_layer_sizes.copy()
decoder_layer_sizes.reverse()
decoder_layer_sizes.append(self.x_dim)
self.encoder = Encoder(self.x_dim, encoder_layer_sizes, self.z_dim, self.dr_rate)
self.decoder = Decoder(self.z_dim, decoder_layer_sizes, self.x_dim, self.dr_rate)
self.alpha = kwargs.get("alpha", 0.000001)
@staticmethod
def _sample_z(mu: torch.Tensor, log_var: torch.Tensor) -> torch.Tensor:
"""
Samples from standard Normal distribution with shape [size, z_dim] and
applies re-parametrization trick. It is actually sampling from latent
space distributions with N(mu, var) computed by the Encoder.
Parameters
----------
mean:
Mean of the latent Gaussian
log_var:
Standard deviation of the latent Gaussian
Returns
-------
Returns Torch Tensor containing latent space encoding of 'x'.
The computed Tensor of samples with shape [size, z_dim].
"""
std = torch.exp(0.5 * log_var)
eps = torch.randn_like(std)
return mu + std * eps
def get_latent(self, data: torch.Tensor) -> torch.Tensor:
"""
Map `data` in to the latent space. This function will feed data
in encoder part of VAE and compute the latent space coordinates
for each sample in data.
Parameters
----------
data:
Torch Tensor to be mapped to latent space. `data.X` has to be in shape [n_obs, x_dim].
Returns
-------
latent:
Returns Torch Tensor containing latent space encoding of 'data'
"""
if not torch.is_tensor(data):
data = torch.tensor(data) # to tensor
mu, logvar = self.encoder(data)
latent = self._sample_z(mu, logvar)
return latent
def _avg_vector(self, data: torch.Tensor) -> torch.Tensor:
"""
Computes the average of points which computed from mapping `data`
to encoder part of VAE.
Parameters
----------
data:
Torch Tensor matrix to be mapped to latent space. Note that `data.X` has to be in shape [n_obs, x_dim].
Returns
-------
The average of latent space mapping in Torch Tensor
"""
latent = self.get_latent(data)
latent_avg = torch.mean(latent, dim=0) # maybe keepdim = True, so that shape (,1)
return latent_avg
def reconstruct(self, data, use_data=False) -> torch.Tensor:
"""
Map back the latent space encoding via the decoder.
Parameters
----------
data: `~anndata.AnnData`
Annotated data matrix whether in latent space or gene expression space.
use_data: bool
This flag determines whether the `data` is already in latent space or not.
if `True`: The `data` is in latent space (`data.X` is in shape [n_obs, z_dim]).
if `False`: The `data` is not in latent space (`data.X` is in shape [n_obs, x_dim]).
Returns
-------
rec_data:
Returns Torch Tensor containing reconstructed 'data' in shape [n_obs, x_dim].
"""
if not torch.is_tensor(data):
data = torch.tensor(data) # to tensor
if use_data:
latent = data
else:
latent = self.get_latent(data)
rec_data = self.decoder(latent)
return rec_data
def _loss_function(self, x: torch.Tensor, xhat: torch.Tensor, mu: torch.Tensor, logvar: torch.Tensor) -> torch.Tensor:
"""
Defines the loss function of VAE network after constructing the whole
network. This will define the KL Divergence and Reconstruction loss for
VAE. The VAE Loss will be weighted sum of reconstruction loss and KL Divergence loss.
Parameters
----------
Returns
-------
Returns VAE Loss as Torch Tensor.
"""
kl_loss = 0.5 * torch.sum(logvar.exp() - logvar - 1 + mu.pow(2)) # check dimensions
recons_loss = F.mse_loss(xhat, x)
vae_loss = recons_loss + self.alpha * kl_loss
return vae_loss
def forward(self, x: torch.Tensor):
mu, logvar = self.encoder(x)
z = self._sample_z(mu, logvar)
x_hat = self.decoder(z)
return x_hat, mu, logvar
def predict(self, adata, conditions, cell_type_key, condition_key, adata_to_predict=None, celltype_to_predict=None, obs_key="all"):
"""
Predicts the cell type provided by the user in stimulated condition.
Parameters
----------
celltype_to_predict: basestring
The cell type you want to be predicted.
obs_key: basestring or dict
Dictionary of celltypes you want to be observed for prediction.
adata_to_predict: `~anndata.AnnData`
Adata for unpertubed cells you want to be predicted.
Returns
-------
predicted_cells: Torch Tensor
`Torch Tensor` of predicted cells in primary space.
delta: Torch Tensor
Difference between stimulated and control cells in latent space
"""
device = next(self.parameters()).device # get device of model.parameters
if obs_key == "all":
ctrl_x = adata[adata.obs[condition_key] == conditions["ctrl"], :]
stim_x = adata[adata.obs[condition_key] == conditions["stim"], :]
ctrl_x = balancer(ctrl_x, cell_type_key=cell_type_key, condition_key=condition_key)
stim_x = balancer(stim_x, cell_type_key=cell_type_key, condition_key=condition_key)
else:
key = list(obs_key.keys())[0]
values = obs_key[key]
subset = adata[adata.obs[key].isin(values)]
ctrl_x = subset[subset.obs[condition_key] == conditions["ctrl"], :]
stim_x = subset[subset.obs[condition_key] == conditions["stim"], :]
if len(values) > 1:
ctrl_x = balancer(ctrl_x, cell_type_key=cell_type_key, condition_key=condition_key)
stim_x = balancer(stim_x, cell_type_key=cell_type_key, condition_key=condition_key)
if celltype_to_predict is not None and adata_to_predict is not None:
raise Exception("Please provide either a cell type or adata not both!")
if celltype_to_predict is None and adata_to_predict is None:
raise Exception("Please provide a cell type name or adata for your unperturbed cells")
if celltype_to_predict is not None:
ctrl_pred = extractor(adata, celltype_to_predict, conditions, cell_type_key, condition_key)[1]
else:
ctrl_pred = adata_to_predict
eq = min(ctrl_x.X.shape[0], stim_x.X.shape[0])
cd_ind = np.random.choice(range(ctrl_x.shape[0]), size=eq, replace=False)
stim_ind = np.random.choice(range(stim_x.shape[0]), size=eq, replace=False)
if sparse.issparse(ctrl_x.X) and sparse.issparse(stim_x.X):
latent_ctrl = self._avg_vector(torch.tensor(ctrl_x.X.A[cd_ind, :], device=device))
latent_sim = self._avg_vector(torch.tensor(stim_x.X.A[stim_ind, :], device=device))
else:
latent_ctrl = self._avg_vector(torch.tensor(ctrl_x.X[cd_ind, :], device=device))
latent_sim = self._avg_vector(torch.tensor(stim_x.X[stim_ind, :], device=device))
delta = latent_sim - latent_ctrl
if sparse.issparse(ctrl_pred.X):
latent_cd = self.get_latent(torch.tensor(ctrl_pred.X.A, device=device))
else:
latent_cd = self.get_latent(torch.tensor(ctrl_pred.X, device=device))
stim_pred = delta + latent_cd
predicted_cells = self.reconstruct(stim_pred, use_data=True)
return predicted_cells, delta
def batch_removal(self, adata, batch_key, cell_label_key, return_latent):
"""
Removes batch effect of adata
Parameters
----------
adata: `~anndata.AnnData`
Annotated data matrix. adata must have `batch_key` and `cell_label_key` which you pass to the function in its obs.
batch_key: `str`
batch label key in adata.obs
cell_label_key: `str`
cell type label key in adata.obs
return_latent: `bool`
if `True` the returns corrected latent representation
Returns
-------
corrected: `~anndata.AnnData`
adata of corrected gene expression in adata.X and corrected latent space in adata.obsm["latent_corrected"].
"""
device = next(self.parameters()).device # get device of model.parameters
if sparse.issparse(adata.X):
latent_all = (self.get_latent(torch.tensor(adata.X.A, device=device))).cpu().detach().numpy()
else:
latent_all = (self.get_latent(torch.tensor(adata.X, device=device))).cpu().detach().numpy()
adata_latent = anndata.AnnData(latent_all)
adata_latent.obs = adata.obs.copy(deep=True)
unique_cell_types = np.unique(adata_latent.obs[cell_label_key])
shared_ct = []
not_shared_ct = []
for cell_type in unique_cell_types:
temp_cell = adata_latent[adata_latent.obs[cell_label_key] == cell_type]
if len(np.unique(temp_cell.obs[batch_key])) < 2:
cell_type_ann = adata_latent[adata_latent.obs[cell_label_key] == cell_type]
not_shared_ct.append(cell_type_ann)
continue
temp_cell = adata_latent[adata_latent.obs[cell_label_key] == cell_type]
batch_list = {}
batch_ind = {}
max_batch = 0
max_batch_ind = ""
batches = np.unique(temp_cell.obs[batch_key])
for i in batches:
temp = temp_cell[temp_cell.obs[batch_key] == i]
temp_ind = temp_cell.obs[batch_key] == i
if max_batch < len(temp):
max_batch = len(temp)
max_batch_ind = i
batch_list[i] = temp
batch_ind[i] = temp_ind
max_batch_ann = batch_list[max_batch_ind]
for study in batch_list:
delta = np.average(max_batch_ann.X, axis=0) - np.average(batch_list[study].X, axis=0)
batch_list[study].X = delta + batch_list[study].X
temp_cell[batch_ind[study]].X = batch_list[study].X
shared_ct.append(temp_cell)
all_shared_ann = anndata.AnnData.concatenate(*shared_ct, batch_key="concat_batch", index_unique=None)
if "concat_batch" in all_shared_ann.obs.columns:
del all_shared_ann.obs["concat_batch"]
if len(not_shared_ct) < 1:
corrected = sc.AnnData(self.reconstruct(torch.tensor(all_shared_ann.X, device=device), use_data=True).cpu().detach().numpy(), obs=all_shared_ann.obs)
corrected.var_names = adata.var_names.tolist()
corrected = corrected[adata.obs_names]
corrected.layers["original_data"] = adata.X
if adata.raw is not None:
adata_raw = anndata.AnnData(X=adata.raw.X, var=adata.raw.var)
adata_raw.obs_names = adata.obs_names
corrected.raw = adata_raw
corrected.obsm["original_data"] = adata.raw.X
if return_latent:
corrected.obsm["latent_corrected"] = (self.get_latent(torch.tensor(corrected.X, device=device))).cpu().detach().numpy()
return corrected
else:
all_not_shared_ann = anndata.AnnData.concatenate(*not_shared_ct, batch_key="concat_batch", index_unique=None)
all_corrected_data = anndata.AnnData.concatenate(all_shared_ann, all_not_shared_ann, batch_key="concat_batch", index_unique=None)
if "concat_batch" in all_shared_ann.obs.columns:
del all_corrected_data.obs["concat_batch"]
corrected = sc.AnnData(self.reconstruct(torch.tensor(all_corrected_data.X, device=device), use_data=True).cpu().detach().numpy(), all_corrected_data.obs)
corrected.var_names = adata.var_names.tolist()
corrected = corrected[adata.obs_names]
corrected.layers["original_data"] = adata.X
if adata.raw is not None:
adata_raw = anndata.AnnData(X=adata.raw.X, var=adata.raw.var)
adata_raw.obs_names = adata.obs_names
corrected.raw = adata_raw
corrected.obsm["original_data"] = adata.raw.X
if return_latent:
corrected.obsm["latent_corrected"] = (self.get_latent(torch.tensor(corrected.X, device=device))).cpu().detach().numpy()
return corrected
|
/scArches-0.5.9.tar.gz/scArches-0.5.9/scarches/models/scgen/vaearith.py
| 0.952309 | 0.72708 |
vaearith.py
|
pypi
|
import torch
import numpy as np
from anndata import AnnData
from typing import Optional, Union
from .vaearith import vaeArith
from ...trainers import vaeArithTrainer
from ..base._utils import _validate_var_names
from ..base._base import BaseMixin
class scgen(BaseMixin):
"""Model for scArches class. This class contains the implementation of Variational Auto-encoder network with Vector Arithmetics.
Parameters
----------
adata: : `~anndata.AnnData`
Annotated data matrix.
hidden_layer_sizes: List
A list of hidden layer sizes for encoder network. Decoder network will be the reversed order.
latent_dim: Integer
Size of the bottleneck layer (z).
dr_rate: Float
Dropout rate applied to all layers, if `dr_rate` == 0 no dropout will be applied.
"""
def __init__(self, adata: AnnData, hidden_layer_sizes: list = [128, 128], z_dimension: int = 10, dr_rate: float = 0.05):
self.adata = adata
self.x_dim_ = adata.n_vars
self.z_dim_ = z_dimension
self.hidden_layer_sizes_ = hidden_layer_sizes
self.dr_rate_ = dr_rate
self.model = vaeArith(self.x_dim_, self.hidden_layer_sizes_, self.z_dim_, self.dr_rate_)
self.is_trained_ = False
self.trainer = None
def train(self, n_epochs: int = 100, lr: float = 0.001, eps: float = 1e-8, batch_size = 32, **kwargs):
self.trainer = vaeArithTrainer(self.model, self.adata, batch_size, **kwargs)
self.trainer.train(n_epochs, lr, eps)
self.is_trained_ = True
def get_latent(self, data: Optional[np.ndarray] = None):
"""
Map `data` in to the latent space. This function will feed data
in encoder part of VAE and compute the latent space coordinates
for each sample in data.
Parameters
----------
data: numpy nd-array
Numpy nd-array to be mapped to latent space. `data.X` has to be in shape [n_obs, x_dim].
Returns
-------
latent: numpy nd-array
Returns numpy array containing latent space encoding of 'data'
"""
device = next(self.model.parameters()).device #get device of model.parameters
if data is None:
data = self.adata.X
data = torch.tensor(data, device=device) # to tensor
latent = self.model.get_latent(data)
latent = latent.cpu().detach() # to cpu then detach from the comput.graph
return np.array(latent)
def reconstruct(self, data, use_data):
"""
Map back the latent space encoding via the decoder.
Parameters
----------
data: `~anndata.AnnData`
Annotated data matrix whether in latent space or gene expression space.
use_data: bool
This flag determines whether the `data` is already in latent space or not.
if `True`: The `data` is in latent space (`data.X` is in shape [n_obs, z_dim]).
if `False`: The `data` is not in latent space (`data.X` is in shape [n_obs, x_dim]).
Returns
-------
rec_data: 'numpy nd-array'
Returns 'numpy nd-array` containing reconstructed 'data' in shape [n_obs, x_dim].
"""
device = next(self.model.parameters()).device
data = torch.tensor(data, device=device) # to tensor
rec_data = self.model.reconstruct(data, use_data)
rec_data = rec_data.cpu().detach()
return np.array(rec_data)
def predict(self, adata, conditions, cell_type_key, condition_key, adata_to_predict=None, celltype_to_predict=None, obs_key="all"):
"""
Predicts the cell type provided by the user in stimulated condition.
Parameters
----------
celltype_to_predict: basestring
The cell type you want to be predicted.
obs_key: basestring or dict
Dictionary of celltypes you want to be observed for prediction.
adata_to_predict: `~anndata.AnnData`
Adata for unpertubed cells you want to be predicted.
Returns
-------
predicted_cells: numpy nd-array
`numpy nd-array` of predicted cells in primary space.
delta: float
Difference between stimulated and control cells in latent space
"""
#device = next(self.model.parameters()).device # get device of model.parameters
#adata_tensor = torch.tensor(adata, device=device) # to tensor
output = self.model.predict(adata, conditions, cell_type_key, condition_key, adata_to_predict, celltype_to_predict, obs_key)
prediction = output[0].cpu().detach()
delta = output[1].cpu().detach()
return np.array(prediction), np.array(delta)
def batch_removal(self, adata, batch_key, cell_label_key, return_latent = True):
"""
Removes batch effect of adata
Parameters
----------
adata: `~anndata.AnnData`
Annotated data matrix. adata must have `batch_key` and `cell_label_key` which you pass to the function in its obs.
batch_key: `str`
batch label key in adata.obs
cell_label_key: `str`
cell type label key in adata.obs
return_latent: `bool`
if `True` returns corrected latent representation
Returns
-------
corrected: `~anndata.AnnData`
adata of corrected gene expression in adata.X and corrected latent space in adata.obsm["latent_corrected"].
"""
corrected = self.model.batch_removal(adata, batch_key, cell_label_key, return_latent)
return corrected
@classmethod
def _validate_adata(cls, adata, dct):
if adata.n_vars != dct['x_dim_']:
raise ValueError("Incorrect var dimension")
@classmethod
def _get_init_params_from_dict(cls, dct):
init_params = {
'hidden_layer_sizes': dct['hidden_layer_sizes_'],
'z_dimension': dct['z_dim_'],
'dr_rate': dct['dr_rate_'],
}
return init_params
@classmethod
def map_query_data(cls, corrected_reference: AnnData, query: AnnData, reference_model: Union[str, 'scgen'], batch_key: str = 'study', return_latent = True):
"""
Removes the batch effect between reference and query data.
Additional training on query data is not needed.
Parameters
----------
corrected_reference: `~anndata.AnnData`
Already corrected reference anndata object
query: `~anndata.AnnData`
Query anndata object
batch_key: `str`
batch label key in query.obs
return_latent: `bool`
if `True` returns corrected latent representation
Returns
-------
integrated: `~anndata.AnnData`
Returns an integrated query.
"""
query_batches_labels = query.obs[batch_key].unique().tolist()
query_adata_by_batches = [query[query.obs[batch_key].isin([batch])].copy() for batch in query_batches_labels]
reference_query_adata = AnnData.concatenate(*[corrected_reference, query_adata_by_batches],
batch_key="reference_map",
batch_categories= ['reference'] + query_batches_labels,
index_unique=None)
reference_query_adata.obs['original_batch'] = reference_query_adata.obs[batch_key].tolist()
# passed model as file
if isinstance(reference_model, str):
attr_dict, model_state_dict, var_names = cls._load_params(reference_model)
_validate_var_names(query, var_names)
init_params = cls._get_init_params_from_dict(attr_dict)
new_model = cls(reference_query_adata, **init_params)
new_model.model.load_state_dict(model_state_dict)
integrated_query = new_model.batch_removal(reference_query_adata, batch_key = "reference_map", cell_label_key = "cell_type", return_latent = True)
return integrated_query
#passed model as model object
else:
# when corrected_reference is already in the passed model
if np.all(reference_model._get_user_attributes()[0][1].X == corrected_reference.X):
integrated_query = reference_model.batch_removal(reference_query_adata, batch_key = "reference_map", cell_label_key = "cell_type", return_latent = True)
else:
attr_dict = reference_model._get_public_attributes()
model_state_dict = reference_model.model.state_dict()
init_params = cls._get_init_params_from_dict(attr_dict)
new_model = cls(reference_query_adata, **init_params)
new_model.model.load_state_dict(model_state_dict)
integrated_query = new_model.batch_removal(reference_query_adata, batch_key = "reference_map", cell_label_key = "cell_type", return_latent = True)
return integrated_query
|
/scArches-0.5.9.tar.gz/scArches-0.5.9/scarches/models/scgen/vaearith_model.py
| 0.940633 | 0.627937 |
vaearith_model.py
|
pypi
|
import anndata
import numpy as np
from scipy import sparse
import logging
logger = logging.getLogger(__name__)
def extractor(data, cell_type, conditions, cell_type_key="cell_type", condition_key="condition"):
"""
Returns a list of `data` files while filtering for a specific `cell_type`.
Parameters
----------
data: `~anndata.AnnData`
Annotated data matrix
cell_type: basestring
specific cell type to be extracted from `data`.
conditions: dict
dictionary of stimulated/control of `data`.
Returns
-------
list of `data` files while filtering for a specific `cell_type`.
"""
cell_with_both_condition = data[data.obs[cell_type_key] == cell_type]
condition_1 = data[(data.obs[cell_type_key] == cell_type) & (data.obs[condition_key] == conditions["ctrl"])]
condition_2 = data[(data.obs[cell_type_key] == cell_type) & (data.obs[condition_key] == conditions["stim"])]
training = data[~((data.obs[cell_type_key] == cell_type) & (data.obs[condition_key] == conditions["stim"]))]
return [training, condition_1, condition_2, cell_with_both_condition]
def balancer(adata, cell_type_key="cell_type", condition_key="condition"):
"""
Makes cell type population equal.
Parameters
----------
adata: `~anndata.AnnData`
Annotated data matrix.
Returns
-------
balanced_data: `~anndata.AnnData`
Equal cell type population Annotated data matrix.
"""
class_names = np.unique(adata.obs[cell_type_key])
class_pop = {}
for cls in class_names:
class_pop[cls] = adata.copy()[adata.obs[cell_type_key] == cls].shape[0]
max_number = np.max(list(class_pop.values()))
all_data_x = []
all_data_label = []
all_data_condition = []
for cls in class_names:
temp = adata.copy()[adata.obs[cell_type_key] == cls]
index = np.random.choice(range(len(temp)), max_number)
if sparse.issparse(temp.X):
temp_x = temp.X.A[index]
else:
temp_x = temp.X[index]
all_data_x.append(temp_x)
temp_ct = np.repeat(cls, max_number)
all_data_label.append(temp_ct)
temp_cc = np.repeat(np.unique(temp.obs[condition_key]), max_number)
all_data_condition.append(temp_cc)
balanced_data = anndata.AnnData(np.concatenate(all_data_x))
balanced_data.obs[cell_type_key] = np.concatenate(all_data_label)
balanced_data.obs[condition_key] = np.concatenate(all_data_label)
class_names = np.unique(balanced_data.obs[cell_type_key])
class_pop = {}
for cls in class_names:
class_pop[cls] = len(balanced_data[balanced_data.obs[cell_type_key] == cls])
return balanced_data
|
/scArches-0.5.9.tar.gz/scArches-0.5.9/scarches/models/scgen/_utils.py
| 0.874694 | 0.554712 |
_utils.py
|
pypi
|
import warnings
import os
import csv
import numpy as np
from sklearn.metrics import *
from sklearn.covariance import GraphicalLassoCV, graphical_lasso, LedoitWolf
from sklearn.preprocessing import StandardScaler
import torch
import torch_geometric.nn as pyg_nn
import torch_geometric.data as geo_dt
from sklearn.utils.extmath import fast_logdet
import numpy as np
from scipy import sparse
def glasso(adata, alphas=5, n_jobs=None, mode='cd'):
"""
Recustructs the gene-gene interaction network based on gene expressions in `.X` using a guassian graphical model estimated by `glasso`.
Parameters
----------
adata: `AnnData`
The annotated data matrix of shape `n_obs × n_vars`. Rows correspond to cells and columns to genes.
alphas: int or array-like of shape (n_alphas,), dtype=`float`, default=`5`
Non-negative. If an integer is given, it fixes the number of points on the grids of alpha to be used. If a list is given, it gives the grid to be used.
n_jobs: int, default `None`
Non-negative. number of jobs.
Returns
-------
adds an `csr_matrix` matrix under key `adj` to `.varm`.
References
-----------
Friedman, J., Hastie, T., & Tibshirani, R. (2008).
Sparse inverse covariance estimation with the graphical lasso.
Biostatistics, 9(3), 432-441.
"""
scaler = StandardScaler()
data = scaler.fit_transform(adata.X)
cov = GraphicalLassoCV(alphas=alphas, n_jobs=n_jobs).fit(data)
precision_matrix = cov.get_precision()
adjacency_matrix = precision_matrix.astype(bool).astype(int)
adjacency_matrix[np.diag_indices_from(adjacency_matrix)] = 0
save_adata(adata, attr='varm', key='adj', data=sparse.csr_matrix(adjacency_matrix))
def compute_metrics(y_true, y_pred):
"""
Computes prediction quality metrics.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
Returns
--------
accuracy : accuracy
conf_mat : confusion matrix
precision : weighted precision score
recall : weighted recall score
f1 : weighted f1 score
"""
accuracy = accuracy_score(y_true, y_pred)
conf_mat = confusion_matrix(y_true, y_pred)
precision = precision_score(y_true, y_pred, average='weighted')
recall = recall_score(y_true, y_pred, average='weighted')
f1 = f1_score(y_true, y_pred, average='weighted')
return accuracy, conf_mat, precision, recall, f1
def get_dataloader(graph, X, y, batch_size=1, undirected=True, shuffle=True, num_workers=0):
"""
Converts a graph and a dataset to a dataloader.
Parameters
----------
graph : igraph object
The underlying graph to be fed to the graph neural networks.
X : numpy ndarray
Input dataset with columns as features and rows as observations.
y : numpy ndarray
Class labels.
batch_size: int, default=1
The batch size.
undirected: boolean
if the input graph is undirected (symmetric adjacency matrix).
shuffle: boolean, default = `True`
Wheather to shuffle the dataset to be passed to `torch_geometric.data.DataLoader`.
num_workers: int, default = 0
Non-negative. Number of workers to be passed to `torch_geometric.data.DataLoader`.
Returns
--------
dataloader : a pytorch-geometric dataloader. All of the graphs will have the same connectivity (given by the input graph),
but the node features will be the features from X.
"""
n_obs, n_features = X.shape
rows, cols = np.where(graph == 1)
edges = zip(rows.tolist(), cols.tolist())
sources = []
targets = []
for edge in edges:
sources.append(edge[0])
targets.append(edge[1])
if undirected:
sources.append(edge[0])
targets.append(edge[1])
edge_index = torch.tensor([sources,targets],dtype=torch.long)
list_graphs = []
y = y.tolist()
# print(y)
for i in range(n_obs):
y_tensor = torch.tensor(y[i])
X_tensor = torch.tensor(X[i,:]).view(X.shape[1], 1).float()
data = geo_dt.Data(x=X_tensor, edge_index=edge_index, y=y_tensor)
list_graphs.append(data.coalesce())
dataloader = geo_dt.DataLoader(list_graphs, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers, pin_memory=False)
return dataloader
def kullback_leibler_divergence(X):
"""Finds the pairwise Kullback-Leibler divergence
matrix between all rows in X.
Parameters
----------
X : array_like, shape (n_samples, n_features)
Array of probability data. Each row must sum to 1.
Returns
-------
D : ndarray, shape (n_samples, n_samples)
The Kullback-Leibler divergence matrix. A pairwise matrix D such that D_{i, j}
is the divergence between the ith and jth vectors of the given matrix X.
Notes
-----
Based on code from Gordon J. Berman et al.
(https://github.com/gordonberman/MotionMapper)
References
-----------
Berman, G. J., Choi, D. M., Bialek, W., & Shaevitz, J. W. (2014).
Mapping the stereotyped behaviour of freely moving fruit flies.
Journal of The Royal Society Interface, 11(99), 20140672.
"""
X_log = np.log(X)
X_log[np.isinf(X_log) | np.isnan(X_log)] = 0
entropies = -np.sum(X * X_log, axis=1)
D = np.matmul(-X, X_log.T)
D = D - entropies
D = D / np.log(2)
D *= (1 - np.eye(D.shape[0]))
return D
def multinomial_rvs(n, p):
"""Sample from the multinomial distribution with multiple p vectors.
Parameters
----------
n : int
must be a scalar >=1
p : numpy ndarray
must an n-dimensional
he last axis of p holds the sequence of probabilities for a multinomial distribution.
Returns
-------
D : ndarray
same shape as p
"""
count = np.full(p.shape[:-1], n)
out = np.zeros(p.shape, dtype=int)
ps = p.cumsum(axis=-1)
# Conditional probabilities
with np.errstate(divide='ignore', invalid='ignore'):
condp = p / ps
condp[np.isnan(condp)] = 0.0
for i in range(p.shape[-1]-1, 0, -1):
binsample = np.random.binomial(count, condp[..., i])
out[..., i] = binsample
count -= binsample
out[..., 0] = count
return out
def save_adata(adata, attr, key, data):
"""updates an attribute of an `AnnData` object
Parameters
----------
adata : `AnnData`
The annotated data matrix of shape `n_obs × n_vars`. Rows correspond to cells and columns to genes.
attr : str
must be an attribute of `adata`, e.g., `obs`, `var`, etc.
key : str
must be a key in the attr
data : non-specific
the data to be updated/placed
"""
obj = getattr(adata, attr)
obj[key] = data
|
/scArches-0.5.9.tar.gz/scArches-0.5.9/scarches/models/sagenet/utils.py
| 0.878497 | 0.662128 |
utils.py
|
pypi
|
from .utils import *
from .classifier import Classifier
from .model import *
from os import listdir
import numpy as np
import anndata
import re
class sagenet():
"""
A `sagenet` object.
Parameters
----------
device : str, default = 'cpu'
the processing unit to be used in the classifiers (gpu or cpu).
"""
def __init__(self, device='cpu'):
self.models = {}
self.adjs = {}
inf_genes = None
self.num_refs = 0
self.device = device
def train(self,
adata,
tag = None,
comm_columns = 'class_',
classifier = 'TransformerConv',
num_workers = 0,
batch_size = 32,
epochs = 10,
n_genes = 10,
verbose = False,
importance = False,
to_return = False):
"""Trains new classifiers on a reference dataset.
Parameters
----------
adata : `AnnData`
The annotated data matrix of shape `n_obs × n_vars` to be used as the spatial reference. Rows correspond to cells (or spots) and columns to genes.
tag : str, default = `None`
The tag to be used for storing the trained models and the outputs in the `sagenet` object.
classifier : str, default = `'TransformerConv'`
The type of classifier to be passed to `sagenet.Classifier()`
comm_columns : list of str, `'class_'`
The columns in `adata.obs` to be used as spatial partitions.
num_workers : int
Non-negative. Number of workers to be passed to `torch_geometric.data.DataLoader`.
epochs : int
number of epochs.
verbose : boolean, default=False
whether to print out loss during training.
Return
------
Returns nothing.
Notes
-----
Trains the models and adds them to `.models` dictionery of the `sagenet` object.
Also adds a new key `{tag}_entropy` to `.var` from `adata` which contains the entropy values as the importance score corresponding to each gene.
"""
ind = np.where(np.sum(adata.varm['adj'], axis=1) == 0)[0]
ents = np.ones(adata.var.shape[0]) * 1000000
# ents = np.zeros(adata.var.shape[0])
self.num_refs += 1
if tag is None:
tag = 'ref' + str(self.num_refs)
for comm in comm_columns:
data_loader = get_dataloader(
graph = adata.varm['adj'].toarray(),
X = adata.X, y = adata.obs[comm].values.astype('long'),
batch_size = batch_size,
shuffle = True,
num_workers = num_workers
)
clf = Classifier(
n_features = adata.shape[1],
n_classes = (np.max(adata.obs[comm].values.astype('long'))+1),
n_hidden_GNN = [8],
dropout_FC = 0.2,
dropout_GNN = 0.3,
classifier = classifier,
lr = 0.001,
momentum = 0.9,
device = self.device
)
clf.fit(data_loader, epochs = epochs, test_dataloader=None,verbose=verbose)
if importance:
imp = clf.interpret(data_loader, n_features=adata.shape[1], n_classes=(np.max(adata.obs[comm].values.astype('long'))+1))
idx = (-abs(imp)).argsort(axis=0)
imp = np.min(idx, axis=1)
# imp += imp
np.put(imp, ind, 1000000)
ents = np.minimum(ents, imp)
# imp = np.min(idx, axis=1)
# ents = np.minimum(ents, imp)
self.models['_'.join([tag, comm])] = clf.net
self.adjs['_'.join([tag, comm])] = adata.varm['adj'].toarray()
if importance:
if not to_return:
save_adata(adata, attr='var', key='_'.join([tag, 'importance']), data=ents)
else:
return(ents)
# return ents
def load_query_data(self, adata_q, to_return = False):
"""Maps a query dataset to space using the trained models on the spatial reference(s).
Parameters
----------
adata : `AnnData`
The annotated data matrix of shape `n_obs × n_vars` to be used as the query. Rows correspond to cells (or spots) and columns to genes.
Return
------
Returns nothing.
Notes
-----
* Adds new key(s) `pred_{tag}_{partitioning_name}` to `.obs` from `adata` which contains the predicted partition for partitioning `{partitioning_name}`, trained by model `{tag}`.
* Adds new key(s) `ent_{tag}_{partitioning_name}` to `.obs` from `adata` which contains the uncertainity in prediction for partitioning `{partitioning_name}`, trained by model `{tag}`.
* Adds a new key `distmap` to `.obsm` from `adata` which is a sparse matrix of size `n_obs × n_obs` containing the reconstructed cell-to-cell spatial distance.
"""
dist_mat = np.zeros((adata_q.shape[0], adata_q.shape[0]))
for tag in self.models.keys():
self.models[tag].eval()
i = 0
adata_q.obs['class_'] = 0
data_loader = get_dataloader(
graph = self.adjs[tag],
X = adata_q.X, y = adata_q.obs['class_'].values.astype('long'), #TODO: fix this
batch_size = 1,
shuffle = False,
num_workers = 0
)
with torch.no_grad():
for batch in data_loader:
x, edge_index = batch.x.to(self.device), batch.edge_index.to(self.device)
outputs = self.models[tag](x, edge_index)
predicted = outputs.data.to('cpu').detach().numpy()
i += 1
if i == 1:
n_classes = predicted.shape[1]
y_pred = np.empty((0, n_classes))
y_pred = np.concatenate((y_pred, predicted), axis=0)
y_pred = np.exp(y_pred)
y_pred = (y_pred.T / y_pred.T.sum(0)).T
save_adata(adata_q, attr='obs', key='_'.join(['pred', tag]), data = np.argmax(y_pred, axis=1))
temp = (-y_pred * np.log2(y_pred)).sum(axis = 1)
# adata_q.obs['_'.join(['ent', tag])] = np.array(temp) / np.log2(n_classes)
save_adata(adata_q, attr='obs', key='_'.join(['ent', tag]), data = (np.array(temp) / np.log2(n_classes)))
y_pred_1 = (multinomial_rvs(1, y_pred).T * np.array(adata_q.obs['_'.join(['ent', tag])])).T
y_pred_2 = (y_pred.T * (1-np.array(adata_q.obs['_'.join(['ent', tag])]))).T
y_pred_final = y_pred_1 + y_pred_2
kl_d = kullback_leibler_divergence(y_pred_final)
kl_d = kl_d + kl_d.T
kl_d /= np.linalg.norm(kl_d, 'fro')
dist_mat += kl_d
if not to_return:
save_adata(adata_q, attr='obsm', key='dist_map', data=dist_mat)
else:
return(dist_mat)
def save(self, tag, dir='.'):
"""Saves a single trained model.
Parameters
----------
tag : str
Name of the trained model to be saved.
dir : dir, defult=`'.'`
The saving directory.
"""
path = os.path.join(dir, tag) + '.pickle'
torch.save(self.models[tag], path)
def load(self, tag, dir='.'):
"""Loads a single pre-trained model.
Parameters
----------
tag : str
Name of the trained model to be stored in the `sagenet` object.
dir : dir, defult=`'.'`
The input directory.
"""
path = os.path.join(dir, tag) + '.pickle'
self.models[tag] = torch.load(path)
def save_as_folder(self, dir='.'):
"""Saves all trained models stored in the `sagenet` object as a folder.
Parameters
----------
dir : dir, defult=`'.'`
The saving directory.
"""
for tag in self.models.keys():
self.save(tag, dir)
adj_path = os.path.join(dir, tag) + '.h5ad'
adj_adata = anndata.AnnData(X = self.adjs[tag])
adj_adata.write(filename=adj_path)
def load_from_folder(self, dir='.'):
"""Loads pre-trained models from a directory.
Parameters
----------
dir : dir, defult=`'.'`
The input directory.
"""
model_files = [f for f in listdir(dir) if re.search(r".pickle$", f)]
for m in model_files:
tag = re.sub(r'.pickle', '', m)
model_path = os.path.join(dir, tag) + '.pickle'
adj_path = os.path.join(dir, tag) + '.h5ad'
self.models[tag] = torch.load(model_path)
self.adjs[tag] = anndata.read_h5ad(adj_path).X
|
/scArches-0.5.9.tar.gz/scArches-0.5.9/scarches/models/sagenet/sagenet.py
| 0.746046 | 0.46642 |
sagenet.py
|
pypi
|
import warnings
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch_geometric.nn as pyg_nn
class NN(nn.Module):
def __init__(self, \
n_features, \
n_classes, \
n_hidden_GNN=[], \
n_hidden_FC=[10], \
dropout_GNN=0, \
dropout_FC=0):
super(NN, self).__init__()
self.FC = True
self.n_features = n_features
self.n_classes = n_classes
self.layers_GNN = nn.ModuleList()
self.layers_FC = nn.ModuleList()
self.n_layers_GNN = len(n_hidden_GNN)
self.n_layers_FC = len(n_hidden_FC)
self.dropout_GNN = dropout_GNN
self.dropout_FC = dropout_FC
self.n_hidden_GNN = n_hidden_GNN
self.n_hidden_FC = n_hidden_FC
self.conv = False
if self.n_layers_GNN > 0:
self.FC = False
# Fully connected layers. They occur after the graph convolutions (or at the start if there no are graph convolutions)
if self.n_layers_FC > 0:
if self.n_layers_GNN==0:
self.layers_FC.append(nn.Linear(n_features, n_hidden_FC[0]))
else:
self.layers_FC.append(nn.Linear(n_features*n_hidden_GNN[-1], n_hidden_FC[0]))
if self.n_layers_FC > 1:
for i in range(self.n_layers_FC-1):
self.layers_FC.append(nn.Linear(n_hidden_FC[i], n_hidden_FC[(i+1)]))
# Last layer
if self.n_layers_FC>0:
self.last_layer_FC = nn.Linear(n_hidden_FC[-1], n_classes)
elif self.n_layers_GNN>0:
self.last_layer_FC = nn.Linear(n_features*n_hidden_GNN[-1], n_classes)
else:
self.last_layer_FC = nn.Linear(n_features, n_classes)
def forward(self,x,edge_index):
if self.FC:
# Resize from (1,batch_size * n_features) to (batch_size, n_features)
x = x.view(-1,self.n_features)
if self.conv:
x = x.view(-1,1,self.n_features)
for layer in self.layers_GNN:
x = F.relu(layer(x))
x = F.max_pool1d(x, kernel_size=self.pool_K, stride=1, padding=self.pool_K//2, dilation=1)
x = F.dropout(x, p=self.dropout_GNN, training=self.training)
# x = F.max_pool1d(x)
else:
for layer in self.layers_GNN:
x = F.relu(layer(x, edge_index))
x = F.dropout(x, p=self.dropout_GNN, training=self.training)
if self.n_layers_GNN > 0:
x = x.view(-1, self.n_features*self.n_hidden_GNN[-1])
for layer in self.layers_FC:
x = F.relu(layer(x))
x = F.dropout(x, p=self.dropout_FC, training=self.training)
x = self.last_layer_FC(x)
return x
class GraphSAGE(NN):
def __init__(self, \
n_features, \
n_classes, \
n_hidden_GNN=[10], \
n_hidden_FC=[], \
dropout_GNN=0, \
dropout_FC=0):
super(GraphSAGE, self).__init__(\
n_features, n_classes, n_hidden_GNN,\
n_hidden_FC, dropout_FC, dropout_GNN)
self.layers_GNN.append(pyg_nn.SAGEConv(1, n_hidden_GNN[0]))
if self.n_layers_GNN > 1:
for i in range(self.n_layers_GNN-1):
self.layers_GNN.append(pyg_nn.SAGEConv(n_hidden_GNN[i], n_hidden_GNN[(i+1)]))
class ChebNet(NN):
def __init__(self,
n_features,
n_classes,
n_hidden_GNN=[10],
n_hidden_FC=[],
K=4,
dropout_GNN=0,
dropout_FC=0):
super(ChebNet, self).__init__(\
n_features, n_classes, n_hidden_GNN,\
n_hidden_FC, dropout_FC, dropout_GNN)
self.layers_GNN.append(pyg_nn.ChebConv(1, n_hidden_GNN[0], K))
if self.n_layers_GNN > 1:
for i in range(self.n_layers_GNN-1):
self.layers_GNN.append(pyg_nn.ChebConv(n_hidden_GNN[i], n_hidden_GNN[(i+1)], K))
class NNConvNet(NN):
def __init__(self, \
n_features, \
n_classes, \
n_hidden_GNN=[10], \
n_hidden_FC=[], \
dropout_GNN=0, \
dropout_FC=0):
super(NNConvNet, self).__init__(\
n_features, n_classes, n_hidden_GNN,\
n_hidden_FC, dropout_FC, dropout_GNN)
self.layers_GNN.append(pyg_nn.NNConv(1, n_hidden_GNN[0]))
if self.n_layers_GNN > 1:
for i in range(self.n_layers_GNN-1):
self.layers_GNN.append(pyg_nn.NNConv(n_hidden_GNN[i], n_hidden_GNN[(i+1)]))
class GATConvNet(NN):
def __init__(self, \
n_features, \
n_classes, \
n_hidden_GNN=[10], \
n_hidden_FC=[], \
dropout_GNN=0, \
dropout_FC=0):
super(GATConvNet, self).__init__(\
n_features, n_classes, n_hidden_GNN,\
n_hidden_FC, dropout_FC, dropout_GNN)
self.layers_GNN.append(pyg_nn.GATConv(1, n_hidden_GNN[0]))
if self.n_layers_GNN > 1:
for i in range(self.n_layers_GNN-1):
self.layers_GNN.append(pyg_nn.GATConv(n_hidden_GNN[i], n_hidden_GNN[(i+1)]))
class GENConvNet(NN):
def __init__(self, \
n_features, \
n_classes, \
n_hidden_GNN=[10], \
n_hidden_FC=[], \
dropout_GNN=0, \
dropout_FC=0):
super(GENConvNet, self).__init__(\
n_features, n_classes, n_hidden_GNN,\
n_hidden_FC, dropout_FC, dropout_GNN)
self.layers_GNN.append(pyg_nn.GENConv(1, n_hidden_GNN[0]))
if self.n_layers_GNN > 1:
for i in range(self.n_layers_GNN-1):
self.layers_GNN.append(pyg_nn.GENConv(n_hidden_GNN[i], n_hidden_GNN[(i+1)]))
class GINConv(NN):
def __init__(self, \
n_features, \
n_classes, \
n_hidden_GNN=[10], \
n_hidden_FC=[], \
dropout_GNN=0, \
dropout_FC=0):
super(GINConv, self).__init__(\
n_features, n_classes, n_hidden_GNN,\
n_hidden_FC, dropout_FC, dropout_GNN)
self.layers_GNN.append(pyg_nn.GINConv(nn.Sequential(nn.Linear(1, n_hidden_GNN[0]),
nn.ReLU(), nn.Linear(n_hidden_GNN[0],n_hidden_GNN[0])),eps=0.2))
if self.n_layers_GNN > 1:
for i in range(self.n_layers_GNN-1):
self.layers_GNN.append(pyg_nn.GINConv(nn.Sequential(nn.Linear(n_hidden_GNN[i], n_hidden_GNN[(i+1)]),
nn.ReLU(), nn.Linear(n_hidden_GNN[(i+1)],n_hidden_GNN[(i+1)]))))
class GraphConv(NN):
def __init__(self, \
n_features, \
n_classes, \
n_hidden_GNN=[10], \
n_hidden_FC=[], \
dropout_GNN=0, \
dropout_FC=0):
super(GraphConv, self).__init__(\
n_features, n_classes, n_hidden_GNN,\
n_hidden_FC, dropout_FC, dropout_GNN)
self.layers_GNN.append(pyg_nn.GraphConv(1, n_hidden_GNN[0]))
if self.n_layers_GNN > 1:
for i in range(self.n_layers_GNN-1):
self.layers_GNN.append(pyg_nn.GraphConv(n_hidden_GNN[i], n_hidden_GNN[(i+1)]))
class MFConv(NN):
def __init__(self, \
n_features, \
n_classes, \
n_hidden_GNN=[10], \
n_hidden_FC=[], \
dropout_GNN=0, \
dropout_FC=0):
super(MFConv, self).__init__(\
n_features, n_classes, n_hidden_GNN,\
n_hidden_FC, dropout_FC, dropout_GNN)
self.layers_GNN.append(pyg_nn.MFConv(1, n_hidden_GNN[0]))
if self.n_layers_GNN > 1:
for i in range(self.n_layers_GNN-1):
self.layers_GNN.append(pyg_nn.MFConv(n_hidden_GNN[i], n_hidden_GNN[(i+1)]))
class TransformerConv(NN):
def __init__(self, \
n_features, \
n_classes, \
n_hidden_GNN=[10], \
n_hidden_FC=[], \
dropout_GNN=0, \
dropout_FC=0):
super(TransformerConv, self).__init__(\
n_features, n_classes, n_hidden_GNN,\
n_hidden_FC, dropout_FC, dropout_GNN)
self.layers_GNN.append(pyg_nn.TransformerConv(1, n_hidden_GNN[0]))
if self.n_layers_GNN > 1:
for i in range(self.n_layers_GNN-1):
self.layers_GNN.append(pyg_nn.TransformerConv(n_hidden_GNN[i], n_hidden_GNN[(i+1)]))
class ConvNet(NN):
def __init__(self,
n_features,
n_classes,
n_hidden_GNN=[10],
n_hidden_FC=[],
filter_K=4,
pool_K=0,
dropout_GNN=0,
dropout_FC=0):
super(ConvNet, self).__init__(\
n_features, n_classes, n_hidden_GNN,\
n_hidden_FC, dropout_FC, dropout_GNN)
self.conv = True
self.filter_K = filter_K
self.pool_K = pool_K
self.layers_GNN.append(nn.Conv1d(in_channels=1, out_channels=n_hidden_GNN[0], kernel_size=filter_K, padding=filter_K//2, dilation=1, stride=1))
if self.n_layers_GNN > 1:
for i in range(self.n_layers_GNN-1):
self.layers_GNN.append(nn.Conv1d(in_channels=n_hidden_GNN[i], out_channels=n_hidden_GNN[(i+1)], kernel_size=filter_K, padding=filter_K//2, dilation=1, stride=1))
|
/scArches-0.5.9.tar.gz/scArches-0.5.9/scarches/models/sagenet/model.py
| 0.810216 | 0.366335 |
model.py
|
pypi
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Normal, kl_divergence
from ._utils import one_hot_encoder
from ..trvae.losses import mse, nb, zinb, bce, poisson, nb_dist
from ...trainers.scpoli._utils import cov
class scpoli(nn.Module):
def __init__(
self,
input_dim,
hidden_layer_sizes,
cell_types,
unknown_ct_names,
conditions,
conditions_combined,
inject_condition,
latent_dim,
embedding_dims,
embedding_max_norm,
recon_loss,
dr_rate,
beta,
use_bn,
use_ln,
prototypes_labeled,
prototypes_unlabeled,
):
super().__init__()
self.input_dim = input_dim
self.latent_dim = latent_dim
self.embedding_dims = embedding_dims
self.embedding_max_norm = embedding_max_norm
self.cell_types = cell_types
self.n_cell_types = len(cell_types)
self.cell_type_encoder = {
k: v for k, v in zip(cell_types, range(len(cell_types)))
}
self.n_conditions = [len(conditions[cond]) for cond in conditions.keys()]
self.n_reference_conditions = None
self.conditions = conditions
self.condition_encoders = {cond: {
k: v for k, v in zip(conditions[cond], range(len(conditions[cond])))
} for cond in conditions.keys()}
self.conditions_combined = conditions_combined
self.n_conditions_combined = len(conditions_combined)
self.conditions_combined_encoder = {
k: v for k, v in zip(conditions_combined, range(len(conditions_combined)))
}
self.inject_condition = inject_condition
self.use_bn = use_bn
self.use_ln = use_ln
self.use_mmd = False
self.recon_loss = recon_loss
self.hidden_layer_sizes = hidden_layer_sizes
self.freeze = False
self.unknown_ct_names = unknown_ct_names
if self.unknown_ct_names is not None:
for unknown_ct in self.unknown_ct_names:
self.cell_type_encoder[unknown_ct] = -1
self.prototypes_labeled = (
{"mean": None, "cov": None}
if prototypes_labeled is None
else prototypes_labeled
)
self.prototypes_unlabeled = (
{"mean": None} if prototypes_unlabeled is None else prototypes_unlabeled
)
self.new_prototypes = None
self.num_reference_conditions = None
if self.prototypes_labeled["mean"] is not None:
# Save indices of possible new prototypes to train
self.new_prototypes = []
for idx in range(self.n_cell_types - len(self.prototypes_labeled["mean"])):
self.new_prototypes.append(len(self.prototypes_labeled["mean"]) + idx)
self.dr_rate = dr_rate
if self.dr_rate > 0:
self.use_dr = True
else:
self.use_dr = False
if recon_loss in ["nb", "zinb", "nb_dist"]:
self.theta = torch.nn.Parameter(
torch.randn(self.input_dim, self.n_conditions_combined)
)
else:
self.theta = None
encoder_layer_sizes = self.hidden_layer_sizes.copy()
encoder_layer_sizes.insert(0, self.input_dim)
decoder_layer_sizes = self.hidden_layer_sizes.copy()
decoder_layer_sizes.reverse()
decoder_layer_sizes.append(self.input_dim)
self.embeddings = nn.ModuleList(nn.Embedding(
self.n_conditions[i], self.embedding_dims[i], max_norm=self.embedding_max_norm
) for i in range(len(self.embedding_dims)))
print(
"Embedding dictionary:\n",
f"\tNum conditions: {self.n_conditions}\n",
f"\tEmbedding dim: {self.embedding_dims}",
)
self.encoder = Encoder(
encoder_layer_sizes,
self.latent_dim,
self.use_bn,
self.use_ln,
self.use_dr,
self.dr_rate,
sum(self.embedding_dims) if "encoder" in self.inject_condition else None,
)
self.decoder = Decoder(
decoder_layer_sizes,
self.latent_dim,
self.recon_loss,
self.use_bn,
self.use_ln,
self.use_dr,
self.dr_rate,
sum(self.embedding_dims) if "decoder" in self.inject_condition else None,
)
def forward(
self,
x=None,
batch=None,
combined_batch=None,
sizefactor=None,
celltypes=None,
labeled=None,
):
batch_embeddings = torch.hstack([self.embeddings[i](batch[:, i]) for i in range(batch.shape[1])])
x_log = torch.log(1 + x)
if self.recon_loss == "mse":
x_log = x
if "encoder" in self.inject_condition:
z1_mean, z1_log_var = self.encoder(x_log, batch_embeddings)
else:
z1_mean, z1_log_var = self.encoder(x_log, batch=None)
z1 = self.sampling(z1_mean, z1_log_var)
if "decoder" in self.inject_condition:
outputs = self.decoder(z1, batch_embeddings)
else:
outputs = self.decoder(z1, batch=None)
if self.recon_loss == "mse":
recon_x, y1 = outputs
recon_loss = mse(recon_x, x_log).sum(dim=-1).mean()
elif self.recon_loss == "zinb":
dec_mean_gamma, dec_dropout, y1 = outputs
size_factor_view = sizefactor.unsqueeze(1).expand(
dec_mean_gamma.size(0), dec_mean_gamma.size(1)
)
dec_mean = dec_mean_gamma * size_factor_view
dispersion = F.linear(one_hot_encoder(combined_batch, self.n_conditions_combined), self.theta)
dispersion = torch.exp(dispersion)
recon_loss = (
-zinb(x=x, mu=dec_mean, theta=dispersion, pi=dec_dropout)
.sum(dim=-1)
.mean()
)
elif self.recon_loss == "nb":
dec_mean_gamma, y1 = outputs
size_factor_view = sizefactor.unsqueeze(1).expand(
dec_mean_gamma.size(0), dec_mean_gamma.size(1)
)
dec_mean = dec_mean_gamma * size_factor_view
dispersion = F.linear(one_hot_encoder(combined_batch, self.n_conditions_combined), self.theta)
dispersion = torch.exp(dispersion)
recon_loss = -nb(x=x, mu=dec_mean, theta=dispersion).sum(dim=-1).mean()
elif self.recon_loss == "nb_dist":
dec_mean_gamma, y1 = outputs
size_factor_view = sizefactor.unsqueeze(1).expand(
dec_mean_gamma.size(0), dec_mean_gamma.size(1)
)
dec_mean = dec_mean_gamma * size_factor_view
dispersion = F.linear(one_hot_encoder(batch, self.n_conditions), self.theta)
dispersion = torch.exp(dispersion)
recon_loss = nb_dist(x=x, mu=dec_mean, theta=dispersion).sum(dim=-1).mean()
elif self.recon_loss == 'bernoulli':
recon_x, y1 = outputs
recon_loss = bce(recon_x, x).sum(dim=-1).mean()
elif self.recon_loss == 'poisson':
recon_x, y1 = outputs
recon_loss = poisson(recon_x, x).sum(dim=-1).mean()
z1_var = torch.exp(z1_log_var) + 1e-4
kl_div = (
kl_divergence(
Normal(z1_mean, torch.sqrt(z1_var)),
Normal(torch.zeros_like(z1_mean), torch.ones_like(z1_var)),
)
.sum(dim=1)
.mean()
)
mmd_loss = torch.tensor(0.0, device=z1.device)
if self.use_mmd:
mmd_calculator = mmd(self.n_conditions, self.beta, self.mmd_boundary)
if self.mmd_on == "z":
mmd_loss = mmd_calculator(z1, batch)
else:
mmd_loss = mmd_calculator(y1, batch)
return z1, recon_loss, kl_div, mmd_loss
def add_new_cell_type(self, latent, cell_type_name, prototypes, classes_list=None):
"""
Function used to add new annotation for a novel cell type.
Parameters
----------
latent: torch.Tensor
Latent representation of adata.
cell_type_name: str
Name of the new cell type
prototypes: list
List of indices of the unlabeled prototypes that correspond to the new cell type
classes_list: torch.Tensor
Tensor of prototype indices corresponding to current hierarchy
Returns
-------
"""
# Update internal model parameters
device = next(self.parameters()).device
self.cell_types.append(cell_type_name)
self.n_cell_types += 1
self.cell_type_encoder = {
k: v for k, v in zip(self.cell_types, range(len(self.cell_types)))
}
# Add new celltype index to hierarchy index list of prototypes
classes_list = torch.cat(
(
classes_list,
torch.tensor([self.n_cell_types - 1], device=classes_list.device),
)
)
# Add new prototype mean to labeled prototype means
new_prototype = (
self.prototypes_unlabeled["mean"][prototypes].mean(0).unsqueeze(0)
)
self.prototypes_labeled["mean"] = torch.cat(
(self.prototypes_labeled["mean"], new_prototype), dim=0
)
# Get latent indices which correspond to new prototype
self.prototypes_labeled["mean"] = self.prototypes_labeled["mean"].to(device)
latent = latent.to(device)
dists = torch.cdist(latent, self.prototypes_labeled["mean"][classes_list, :])
min_dist, y_hat = torch.min(dists, 1)
y_hat = classes_list[y_hat]
indices = y_hat.eq(self.n_cell_types - 1).nonzero(as_tuple=False)[:, 0]
# Add new prototype cov to labeled prototype covs
new_prototype_cov = cov(latent[indices, :]).unsqueeze(0)
new_prototype_cov = new_prototype_cov.to(self.prototypes_labeled["cov"].device)
self.prototypes_labeled["cov"] = torch.cat(
(self.prototypes_labeled["cov"], new_prototype_cov), dim=0
)
def classify(
self,
x,
c=None,
prototype=False,
classes_list=None,
p=2,
get_prob=False,
log_distance=True,
):
"""
Classifies unlabeled cells using the prototypes obtained during training.
Data handling before call to model's classify method.
x: torch.Tensor
Features to be classified.
c: torch.Tensor
Condition vector.
prototype: Boolean
Boolean whether to classify the gene features or prototypes stored
stored in the model.
classes_list: torch.Tensor
Tensor of prototype indices corresponding to current hierarchy
get_prob: Str
Method to use for scaling euclidean distances to pseudo-probabilities
"""
if prototype:
latent = x
else:
latent = self.get_latent(x, c)
device = next(self.parameters()).device
self.prototypes_labeled["mean"] = self.prototypes_labeled["mean"].to(device)
dists = torch.cdist(latent, self.prototypes_labeled["mean"][classes_list, :], p)
# Idea of using euclidean distances for classification
if get_prob == True:
dists = F.softmax(-dists, dim=1)
uncert, preds = torch.max(dists, dim=1)
preds = classes_list[preds]
else:
uncert, preds = torch.min(dists, dim=1)
preds = classes_list[preds]
if log_distance == True:
probs = torch.log1p(uncert)
return preds, uncert, dists
def sampling(self, mu, log_var):
"""Samples from standard Normal distribution and applies re-parametrization trick.
It is actually sampling from latent space distributions with N(mu, var), computed by encoder.
Parameters
----------
mu: torch.Tensor
Torch Tensor of Means.
log_var: torch.Tensor
Torch Tensor of log. variances.
Returns
-------
Torch Tensor of sampled data.
"""
var = torch.exp(log_var) + 1e-4
return Normal(mu, var.sqrt()).rsample()
def get_latent(self, x, c=None, mean=False):
"""Map `x` in to the latent space. This function will feed data in encoder and return z for each sample in
data.
Parameters
----------
x: torch.Tensor
Torch Tensor to be mapped to latent space. `x` has to be in shape [n_obs, input_dim].
c: torch.Tensor
Torch Tensor of condition labels for each sample.
mean: boolean
Returns
-------
Returns Torch Tensor containing latent space encoding of 'x'.
"""
x_ = torch.log(1 + x)
if self.recon_loss == "mse":
x_ = x
if "encoder" in self.inject_condition:
# c = c.type(torch.cuda.LongTensor)
c = c.long()
embed_c = torch.hstack([self.embeddings[i](c[:, i]) for i in range(c.shape[1])])
z_mean, z_log_var = self.encoder(x_, embed_c)
else:
z_mean, z_log_var = self.encoder(x_)
latent = self.sampling(z_mean, z_log_var)
if mean:
return z_mean
return latent
class Encoder(nn.Module):
"""ScArches Encoder class. Constructs the encoder sub-network of TRVAE and CVAE. It will transform primary space
input to means and log. variances of latent space with n_dimensions = z_dimension.
Parameters
----------
layer_sizes: List
List of first and hidden layer sizes
latent_dim: Integer
Bottleneck layer (z) size.
use_bn: Boolean
If `True` batch normalization will be applied to layers.
use_ln: Boolean
If `True` layer normalization will be applied to layers.
use_dr: Boolean
If `True` dropout will applied to layers.
dr_rate: Float
Dropput rate applied to all layers, if `dr_rate`==0 no dropput will be applied.
num_classes: Integer
Number of classes (conditions) the data contain. if `None` the model will be a normal VAE instead of
conditional VAE.
"""
def __init__(
self,
layer_sizes: list,
latent_dim: int,
use_bn: bool,
use_ln: bool,
use_dr: bool,
dr_rate: float,
embedding_dim: int = None,
):
super().__init__()
self.embedding_dim = 0
if embedding_dim is not None:
self.embedding_dim = embedding_dim
self.FC = None
if len(layer_sizes) > 1:
print("Encoder Architecture:")
self.FC = nn.Sequential()
for i, (in_size, out_size) in enumerate(
zip(layer_sizes[:-1], layer_sizes[1:])
):
if i == 0:
print(
"\tInput Layer in, out and cond:",
in_size,
out_size,
self.embedding_dim,
)
(
self.FC.add_module(
name="L{:d}".format(i),
module=CondLayers(
in_size, out_size, self.embedding_dim, bias=True
),
)
)
else:
print("\tHidden Layer", i, "in/out:", in_size, out_size)
(
self.FC.add_module(
name="L{:d}".format(i),
module=nn.Linear(in_size, out_size, bias=True),
)
)
if use_bn:
(
self.FC.add_module(
"N{:d}".format(i),
module=nn.BatchNorm1d(out_size, affine=True),
)
)
elif use_ln:
(
self.FC.add_module(
"N{:d}".format(i),
module=nn.LayerNorm(out_size, elementwise_affine=False),
)
)
self.FC.add_module(name="A{:d}".format(i), module=nn.ReLU())
if use_dr:
self.FC.add_module(
name="D{:d}".format(i), module=nn.Dropout(p=dr_rate)
)
print("\tMean/Var Layer in/out:", layer_sizes[-1], latent_dim)
self.mean_encoder = nn.Linear(layer_sizes[-1], latent_dim)
self.log_var_encoder = nn.Linear(layer_sizes[-1], latent_dim)
def forward(self, x, batch=None):
if batch is not None:
# batch = one_hot_encoder(batch, n_cls=self.n_classes)
x = torch.cat((x, batch), dim=-1)
if self.FC is not None:
x = self.FC(x)
means = self.mean_encoder(x)
log_vars = self.log_var_encoder(x)
return means, log_vars
class Decoder(nn.Module):
"""ScArches Decoder class. Constructs the decoder sub-network of TRVAE or CVAE networks. It will transform the
constructed latent space to the previous space of data with n_dimensions = x_dimension.
Parameters
----------
layer_sizes: List
List of hidden and last layer sizes
latent_dim: Integer
Bottleneck layer (z) size.
recon_loss: String
Definition of Reconstruction-Loss-Method, 'mse', 'nb' or 'zinb'.
use_bn: Boolean
If `True` batch normalization will be applied to layers.
use_ln: Boolean
If `True` layer normalization will be applied to layers.
use_dr: Boolean
If `True` dropout will applied to layers.
dr_rate: Float
Dropput rate applied to all layers, if `dr_rate`==0 no dropput will be applied.
num_classes: Integer
Number of classes (conditions) the data contain. if `None` the model will be a normal VAE instead of
conditional VAE.
"""
def __init__(
self,
layer_sizes: list,
latent_dim: int,
recon_loss: str,
use_bn: bool,
use_ln: bool,
use_dr: bool,
dr_rate: float,
embedding_dim: int = None,
):
super().__init__()
self.use_dr = use_dr
self.recon_loss = recon_loss
self.embedding_dim = 0
if embedding_dim is not None:
self.embedding_dim = embedding_dim
layer_sizes = [latent_dim] + layer_sizes
print("Decoder Architecture:")
# Create first Decoder layer
self.FirstL = nn.Sequential()
print(
"\tFirst Layer in, out and cond: ",
layer_sizes[0],
layer_sizes[1],
self.embedding_dim,
)
(
self.FirstL.add_module(
name="L0",
module=CondLayers(
layer_sizes[0], layer_sizes[1], self.embedding_dim, bias=False
),
)
)
if use_bn:
(
self.FirstL.add_module(
"N0", module=nn.BatchNorm1d(layer_sizes[1], affine=True)
)
)
elif use_ln:
(
self.FirstL.add_module(
"N0", module=nn.LayerNorm(layer_sizes[1], elementwise_affine=False)
)
)
(self.FirstL.add_module(name="A0", module=nn.ReLU()))
if self.use_dr:
self.FirstL.add_module(name="D0", module=nn.Dropout(p=dr_rate))
# Create all Decoder hidden layers
if len(layer_sizes) > 2:
self.HiddenL = nn.Sequential()
for i, (in_size, out_size) in enumerate(
zip(layer_sizes[1:-1], layer_sizes[2:])
):
if i + 3 < len(layer_sizes):
print("\tHidden Layer", i + 1, "in/out:", in_size, out_size)
(
self.HiddenL.add_module(
name="L{:d}".format(i + 1),
module=nn.Linear(in_size, out_size, bias=False),
)
)
if use_bn:
(
self.HiddenL.add_module(
"N{:d}".format(i + 1),
module=nn.BatchNorm1d(out_size, affine=True),
)
)
elif use_ln:
(
self.HiddenL.add_module(
"N{:d}".format(i + 1),
module=nn.LayerNorm(out_size, elementwise_affine=False),
)
)
(
self.HiddenL.add_module(
name="A{:d}".format(i + 1), module=nn.ReLU()
)
)
if self.use_dr:
(
self.HiddenL.add_module(
name="D{:d}".format(i + 1), module=nn.Dropout(p=dr_rate)
)
)
else:
self.HiddenL = None
# Create Output Layers
print("\tOutput Layer in/out: ", layer_sizes[-2], layer_sizes[-1], "\n")
if self.recon_loss == "mse":
self.recon_decoder = nn.Sequential(
nn.Linear(layer_sizes[-2], layer_sizes[-1]), nn.ReLU()
)
elif self.recon_loss == "zinb":
# mean gamma
self.mean_decoder = nn.Sequential(
nn.Linear(layer_sizes[-2], layer_sizes[-1]), nn.Softmax(dim=-1)
)
# dropout
self.dropout_decoder = nn.Linear(layer_sizes[-2], layer_sizes[-1])
elif self.recon_loss in ["nb", "nb_dist"]:
# mean gamma
self.mean_decoder = nn.Sequential(
nn.Linear(layer_sizes[-2], layer_sizes[-1]), nn.Softmax(dim=-1)
)
elif self.recon_loss == 'bernoulli':
self.recon_decoder = nn.Sequential(
nn.Linear(layer_sizes[-2], layer_sizes[-1]), nn.Sigmoid()
)
elif self.recon_loss == 'poisson':
self.recon_decoder = nn.Sequential(
nn.Linear(layer_sizes[-2], layer_sizes[-1]), nn.Softmax(dim=-1)
)
def forward(self, z, batch=None):
# Add Condition Labels to Decoder Input
if batch is not None:
# batch = one_hot_encoder(batch, n_cls=self.n_classes)
z_cat = torch.cat((z, batch), dim=-1)
dec_latent = self.FirstL(z_cat)
else:
dec_latent = self.FirstL(z)
# Compute Hidden Output
if self.HiddenL is not None:
x = self.HiddenL(dec_latent)
else:
x = dec_latent
# Compute Decoder Output
if self.recon_loss == "mse":
recon_x = self.recon_decoder(x)
return recon_x, dec_latent
elif self.recon_loss == "zinb":
dec_mean_gamma = self.mean_decoder(x)
dec_dropout = self.dropout_decoder(x)
return dec_mean_gamma, dec_dropout, dec_latent
elif self.recon_loss in ["nb", "nb_dist"]:
dec_mean_gamma = self.mean_decoder(x)
return dec_mean_gamma, dec_latent
elif self.recon_loss == 'bernoulli':
recon_x = self.recon_decoder(x)
elif self.recon_loss == 'poisson':
recon_x = self.recon_decoder(x)
return recon_x, dec_latent
class CondLayers(nn.Module):
def __init__(
self,
n_in: int,
n_out: int,
n_cond: int,
bias: bool,
):
super().__init__()
self.n_cond = n_cond
self.expr_L = nn.Linear(n_in, n_out, bias=bias)
if self.n_cond != 0:
self.cond_L = nn.Linear(self.n_cond, n_out, bias=False)
def forward(self, x: torch.Tensor):
if self.n_cond == 0:
out = self.expr_L(x)
else:
expr, cond = torch.split(x, [x.shape[1] - self.n_cond, self.n_cond], dim=1)
out = self.expr_L(expr) + self.cond_L(cond)
return out
|
/scArches-0.5.9.tar.gz/scArches-0.5.9/scarches/models/scpoli/scpoli.py
| 0.91384 | 0.360855 |
scpoli.py
|
pypi
|
from scvi.distributions import NegativeBinomial
import torch
from torch.autograd import Variable
from torch.distributions import Poisson
import torch.nn.functional as F
from ._utils import partition
def bce(recon_x, x):
"""Computes BCE loss between reconstructed data and ground truth data.
Parameters
----------
recon_x: torch.Tensor
Torch Tensor of reconstructed data
x: torch.Tensor
Torch Tensor of ground truth data
Returns
-------
MSE loss value
"""
bce_loss = torch.nn.functional.binary_cross_entropy(recon_x, (x > 0).float(), reduction='none')
return bce_loss
def mse(recon_x, x):
"""Computes MSE loss between reconstructed data and ground truth data.
Parameters
----------
recon_x: torch.Tensor
Torch Tensor of reconstructed data
x: torch.Tensor
Torch Tensor of ground truth data
Returns
-------
MSE loss value
"""
mse_loss = torch.nn.functional.mse_loss(recon_x, x, reduction='none')
return mse_loss
def poisson(recon_x, x):
"""Computes Poisson NLL between reconstructed data and ground truth data.
Parameters
----------
recon_x: torch.Tensor
Torch Tensor of reconstructed data
x: torch.Tensor
Torch Tensor of ground truth data
Returns
-------
MSE loss value
"""
#poisson_nll = torch.nn.functional.poisson_nll_loss(recon_x, x, reduction='none')
poisson_loss = -Poisson(recon_x).log_prob(x)
return poisson_loss
def nb(x: torch.Tensor, mu: torch.Tensor, theta: torch.Tensor, eps=1e-8):
"""
This negative binomial function was taken from:
Title: scvi-tools
Authors: Romain Lopez <[email protected]>,
Adam Gayoso <[email protected]>,
Galen Xing <[email protected]>
Date: 16th November 2020
Code version: 0.8.1
Availability: https://github.com/YosefLab/scvi-tools/blob/8f5a9cc362325abbb7be1e07f9523cfcf7e55ec0/scvi/core/distributions/_negative_binomial.py
Computes negative binomial loss.
Parameters
----------
x: torch.Tensor
Torch Tensor of ground truth data.
mu: torch.Tensor
Torch Tensor of means of the negative binomial (has to be positive support).
theta: torch.Tensor
Torch Tensor of inverse dispersion parameter (has to be positive support).
eps: Float
numerical stability constant.
Returns
-------
If 'mean' is 'True' NB loss value gets returned, otherwise Torch tensor of losses gets returned.
"""
if theta.ndimension() == 1:
theta = theta.view(1, theta.size(0))
log_theta_mu_eps = torch.log(theta + mu + eps)
res = (
theta * (torch.log(theta + eps) - log_theta_mu_eps)
+ x * (torch.log(mu + eps) - log_theta_mu_eps)
+ torch.lgamma(x + theta)
- torch.lgamma(theta)
- torch.lgamma(x + 1)
)
return res
def nb_dist(x: torch.Tensor, mu: torch.Tensor, theta: torch.Tensor, eps=1e-8):
loss = -NegativeBinomial(mu=mu, theta=theta).log_prob(x)
return loss
def zinb(x: torch.Tensor, mu: torch.Tensor, theta: torch.Tensor, pi: torch.Tensor, eps=1e-8):
"""
This zero-inflated negative binomial function was taken from:
Title: scvi-tools
Authors: Romain Lopez <[email protected]>,
Adam Gayoso <[email protected]>,
Galen Xing <[email protected]>
Date: 16th November 2020
Code version: 0.8.1
Availability: https://github.com/YosefLab/scvi-tools/blob/8f5a9cc362325abbb7be1e07f9523cfcf7e55ec0/scvi/core/distributions/_negative_binomial.py
Computes zero inflated negative binomial loss.
Parameters
----------
x: torch.Tensor
Torch Tensor of ground truth data.
mu: torch.Tensor
Torch Tensor of means of the negative binomial (has to be positive support).
theta: torch.Tensor
Torch Tensor of inverses dispersion parameter (has to be positive support).
pi: torch.Tensor
Torch Tensor of logits of the dropout parameter (real support)
eps: Float
numerical stability constant.
Returns
-------
If 'mean' is 'True' ZINB loss value gets returned, otherwise Torch tensor of losses gets returned.
"""
# theta is the dispersion rate. If .ndimension() == 1, it is shared for all cells (regardless of batch or labels)
if theta.ndimension() == 1:
theta = theta.view(
1, theta.size(0)
) # In this case, we reshape theta for broadcasting
softplus_pi = F.softplus(-pi) # uses log(sigmoid(x)) = -softplus(-x)
log_theta_eps = torch.log(theta + eps)
log_theta_mu_eps = torch.log(theta + mu + eps)
pi_theta_log = -pi + theta * (log_theta_eps - log_theta_mu_eps)
case_zero = F.softplus(pi_theta_log) - softplus_pi
mul_case_zero = torch.mul((x < eps).type(torch.float32), case_zero)
case_non_zero = (
-softplus_pi
+ pi_theta_log
+ x * (torch.log(mu + eps) - log_theta_mu_eps)
+ torch.lgamma(x + theta)
- torch.lgamma(theta)
- torch.lgamma(x + 1)
)
mul_case_non_zero = torch.mul((x > eps).type(torch.float32), case_non_zero)
res = mul_case_zero + mul_case_non_zero
return res
def pairwise_distance(x, y):
x = x.view(x.shape[0], x.shape[1], 1)
y = torch.transpose(y, 0, 1)
output = torch.sum((x - y) ** 2, 1)
output = torch.transpose(output, 0, 1)
return output
def gaussian_kernel_matrix(x, y, alphas):
"""Computes multiscale-RBF kernel between x and y.
Parameters
----------
x: torch.Tensor
Tensor with shape [batch_size, z_dim].
y: torch.Tensor
Tensor with shape [batch_size, z_dim].
alphas: Tensor
Returns
-------
Returns the computed multiscale-RBF kernel between x and y.
"""
dist = pairwise_distance(x, y).contiguous()
dist_ = dist.view(1, -1)
alphas = alphas.view(alphas.shape[0], 1)
beta = 1. / (2. * alphas)
s = torch.matmul(beta, dist_)
return torch.sum(torch.exp(-s), 0).view_as(dist)
def mmd_loss_calc(source_features, target_features):
"""Initializes Maximum Mean Discrepancy(MMD) between source_features and target_features.
- Gretton, Arthur, et al. "A Kernel Two-Sample Test". 2012.
Parameters
----------
source_features: torch.Tensor
Tensor with shape [batch_size, z_dim]
target_features: torch.Tensor
Tensor with shape [batch_size, z_dim]
Returns
-------
Returns the computed MMD between x and y.
"""
alphas = [
1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1, 5, 10, 15, 20, 25, 30, 35, 100,
1e3, 1e4, 1e5, 1e6
]
alphas = Variable(torch.FloatTensor(alphas)).to(device=source_features.device)
cost = torch.mean(gaussian_kernel_matrix(source_features, source_features, alphas))
cost += torch.mean(gaussian_kernel_matrix(target_features, target_features, alphas))
cost -= 2 * torch.mean(gaussian_kernel_matrix(source_features, target_features, alphas))
return cost
def mmd(y,c,n_conditions, beta, boundary):
"""Initializes Maximum Mean Discrepancy(MMD) between every different condition.
Parameters
----------
n_conditions: integer
Number of classes (conditions) the data contain.
beta: float
beta coefficient for MMD loss.
boundary: integer
If not 'None', mmd loss is only calculated on #new conditions.
y: torch.Tensor
Torch Tensor of computed latent data.
c: torch.Tensor
Torch Tensor of condition labels.
Returns
-------
Returns MMD loss.
"""
# partition separates y into num_cls subsets w.r.t. their labels c
conditions_mmd = partition(y, c, n_conditions)
loss = torch.tensor(0.0, device=y.device)
if boundary is not None:
for i in range(boundary):
for j in range(boundary, n_conditions):
if conditions_mmd[i].size(0) < 2 or conditions_mmd[j].size(0) < 2:
continue
loss += mmd_loss_calc(conditions_mmd[i], conditions_mmd[j])
else:
for i in range(len(conditions_mmd)):
if conditions_mmd[i].size(0) < 1:
continue
for j in range(i):
if conditions_mmd[j].size(0) < 1 or i == j:
continue
loss += mmd_loss_calc(conditions_mmd[i], conditions_mmd[j])
return beta * loss
|
/scArches-0.5.9.tar.gz/scArches-0.5.9/scarches/models/trvae/losses.py
| 0.954764 | 0.853547 |
losses.py
|
pypi
|
import torch
import torch.nn as nn
from ._utils import one_hot_encoder
class CondLayers(nn.Module):
def __init__(
self,
n_in: int,
n_out: int,
n_cond: int,
bias: bool,
):
super().__init__()
self.n_cond = n_cond
self.expr_L = nn.Linear(n_in, n_out, bias=bias)
if self.n_cond != 0:
self.cond_L = nn.Linear(self.n_cond, n_out, bias=False)
def forward(self, x: torch.Tensor):
if self.n_cond == 0:
out = self.expr_L(x)
else:
expr, cond = torch.split(x, [x.shape[1] - self.n_cond, self.n_cond], dim=1)
out = self.expr_L(expr) + self.cond_L(cond)
return out
class Encoder(nn.Module):
"""ScArches Encoder class. Constructs the encoder sub-network of TRVAE and CVAE. It will transform primary space
input to means and log. variances of latent space with n_dimensions = z_dimension.
Parameters
----------
layer_sizes: List
List of first and hidden layer sizes
latent_dim: Integer
Bottleneck layer (z) size.
use_bn: Boolean
If `True` batch normalization will be applied to layers.
use_ln: Boolean
If `True` layer normalization will be applied to layers.
use_dr: Boolean
If `True` dropout will applied to layers.
dr_rate: Float
Dropput rate applied to all layers, if `dr_rate`==0 no dropput will be applied.
num_classes: Integer
Number of classes (conditions) the data contain. if `None` the model will be a normal VAE instead of
conditional VAE.
"""
def __init__(self,
layer_sizes: list,
latent_dim: int,
use_bn: bool,
use_ln: bool,
use_dr: bool,
dr_rate: float,
num_classes: int = None):
super().__init__()
self.n_classes = 0
if num_classes is not None:
self.n_classes = num_classes
self.FC = None
if len(layer_sizes) > 1:
print("Encoder Architecture:")
self.FC = nn.Sequential()
for i, (in_size, out_size) in enumerate(zip(layer_sizes[:-1], layer_sizes[1:])):
if i == 0:
print("\tInput Layer in, out and cond:", in_size, out_size, self.n_classes)
self.FC.add_module(name="L{:d}".format(i), module=CondLayers(in_size,
out_size,
self.n_classes,
bias=True))
else:
print("\tHidden Layer", i, "in/out:", in_size, out_size)
self.FC.add_module(name="L{:d}".format(i), module=nn.Linear(in_size, out_size, bias=True))
if use_bn:
self.FC.add_module("N{:d}".format(i), module=nn.BatchNorm1d(out_size, affine=True))
elif use_ln:
self.FC.add_module("N{:d}".format(i), module=nn.LayerNorm(out_size, elementwise_affine=False))
self.FC.add_module(name="A{:d}".format(i), module=nn.ReLU())
if use_dr:
self.FC.add_module(name="D{:d}".format(i), module=nn.Dropout(p=dr_rate))
print("\tMean/Var Layer in/out:", layer_sizes[-1], latent_dim)
self.mean_encoder = nn.Linear(layer_sizes[-1], latent_dim)
self.log_var_encoder = nn.Linear(layer_sizes[-1], latent_dim)
def forward(self, x, batch=None):
if batch is not None:
batch = one_hot_encoder(batch, n_cls=self.n_classes)
x = torch.cat((x, batch), dim=-1)
if self.FC is not None:
x = self.FC(x)
means = self.mean_encoder(x)
log_vars = self.log_var_encoder(x)
return means, log_vars
class Decoder(nn.Module):
"""ScArches Decoder class. Constructs the decoder sub-network of TRVAE or CVAE networks. It will transform the
constructed latent space to the previous space of data with n_dimensions = x_dimension.
Parameters
----------
layer_sizes: List
List of hidden and last layer sizes
latent_dim: Integer
Bottleneck layer (z) size.
recon_loss: String
Definition of Reconstruction-Loss-Method, 'mse', 'nb' or 'zinb'.
use_bn: Boolean
If `True` batch normalization will be applied to layers.
use_ln: Boolean
If `True` layer normalization will be applied to layers.
use_dr: Boolean
If `True` dropout will applied to layers.
dr_rate: Float
Dropput rate applied to all layers, if `dr_rate`==0 no dropput will be applied.
num_classes: Integer
Number of classes (conditions) the data contain. if `None` the model will be a normal VAE instead of
conditional VAE.
"""
def __init__(self,
layer_sizes: list,
latent_dim: int,
recon_loss: str,
use_bn: bool,
use_ln: bool,
use_dr: bool,
dr_rate: float,
num_classes: int = None):
super().__init__()
self.use_dr = use_dr
self.recon_loss = recon_loss
self.n_classes = 0
if num_classes is not None:
self.n_classes = num_classes
layer_sizes = [latent_dim] + layer_sizes
print("Decoder Architecture:")
# Create first Decoder layer
self.FirstL = nn.Sequential()
print("\tFirst Layer in, out and cond: ", layer_sizes[0], layer_sizes[1], self.n_classes)
self.FirstL.add_module(name="L0", module=CondLayers(layer_sizes[0], layer_sizes[1], self.n_classes, bias=False))
if use_bn:
self.FirstL.add_module("N0", module=nn.BatchNorm1d(layer_sizes[1], affine=True))
elif use_ln:
self.FirstL.add_module("N0", module=nn.LayerNorm(layer_sizes[1], elementwise_affine=False))
self.FirstL.add_module(name="A0", module=nn.ReLU())
if self.use_dr:
self.FirstL.add_module(name="D0", module=nn.Dropout(p=dr_rate))
# Create all Decoder hidden layers
if len(layer_sizes) > 2:
self.HiddenL = nn.Sequential()
for i, (in_size, out_size) in enumerate(zip(layer_sizes[1:-1], layer_sizes[2:])):
if i+3 < len(layer_sizes):
print("\tHidden Layer", i+1, "in/out:", in_size, out_size)
self.HiddenL.add_module(name="L{:d}".format(i+1), module=nn.Linear(in_size, out_size, bias=False))
if use_bn:
self.HiddenL.add_module("N{:d}".format(i+1), module=nn.BatchNorm1d(out_size, affine=True))
elif use_ln:
self.HiddenL.add_module("N{:d}".format(i + 1), module=nn.LayerNorm(out_size, elementwise_affine=False))
self.HiddenL.add_module(name="A{:d}".format(i+1), module=nn.ReLU())
if self.use_dr:
self.HiddenL.add_module(name="D{:d}".format(i+1), module=nn.Dropout(p=dr_rate))
else:
self.HiddenL = None
# Create Output Layers
print("\tOutput Layer in/out: ", layer_sizes[-2], layer_sizes[-1], "\n")
if self.recon_loss == "mse":
self.recon_decoder = nn.Sequential(nn.Linear(layer_sizes[-2], layer_sizes[-1]), nn.ReLU())
if self.recon_loss == "zinb":
# mean gamma
self.mean_decoder = nn.Sequential(nn.Linear(layer_sizes[-2], layer_sizes[-1]), nn.Softmax(dim=-1))
# dropout
self.dropout_decoder = nn.Linear(layer_sizes[-2], layer_sizes[-1])
if self.recon_loss == "nb":
# mean gamma
self.mean_decoder = nn.Sequential(nn.Linear(layer_sizes[-2], layer_sizes[-1]), nn.Softmax(dim=-1))
def forward(self, z, batch=None):
# Add Condition Labels to Decoder Input
if batch is not None:
batch = one_hot_encoder(batch, n_cls=self.n_classes)
z_cat = torch.cat((z, batch), dim=-1)
dec_latent = self.FirstL(z_cat)
else:
dec_latent = self.FirstL(z)
# Compute Hidden Output
if self.HiddenL is not None:
x = self.HiddenL(dec_latent)
else:
x = dec_latent
# Compute Decoder Output
if self.recon_loss == "mse":
recon_x = self.recon_decoder(x)
return recon_x, dec_latent
elif self.recon_loss == "zinb":
dec_mean_gamma = self.mean_decoder(x)
dec_dropout = self.dropout_decoder(x)
return dec_mean_gamma, dec_dropout, dec_latent
elif self.recon_loss == "nb":
dec_mean_gamma = self.mean_decoder(x)
return dec_mean_gamma, dec_latent
|
/scArches-0.5.9.tar.gz/scArches-0.5.9/scarches/models/trvae/modules.py
| 0.937139 | 0.472866 |
modules.py
|
pypi
|
import os
import numpy as np
import torch
import pickle
from typing import Optional, Union
from copy import deepcopy
from .trvae_model import TRVAE
class Adaptor:
"""Adaptor class for trVAE.
Allows to save and load trainded conditional weights for trVAE models.
Parameters
----------
trvae_model
A TRVAE class object with a trainded model or a path to saved Adaptor object.
condition
Condition name to save in the adaptor.
"""
model_type = 'trVAE'
def __init__(
self,
trvae_model: Union[str, TRVAE],
condition: Optional[str] = None
):
if isinstance(trvae_model, str):
cond_params_path = os.path.join(trvae_model, "cond_params.pt")
adapt_params_path = os.path.join(trvae_model, "adapt_params.pkl")
self.cond_params = torch.load(cond_params_path)
with open(adapt_params_path, "rb") as handle:
self._adapt_params = pickle.load(handle)
self.condition = self._adapt_params['condition']
else:
self.cond_params = {}
self.condition = condition
cond_idx = trvae_model.conditions_.index(self.condition)
for name, p in trvae_model.model.state_dict().items():
if 'cond_L.weight' in name or 'theta' in name:
self.cond_params[name] = p[:, cond_idx].unsqueeze(-1)
self._adapt_params = {}
self._adapt_params['condition'] = self.condition
self._adapt_params['model_params'] = {}
self._adapt_params['model_params']['varnames'] = trvae_model.adata.var_names.tolist()
self._adapt_params['model_params']['hidden_layer_sizes'] = trvae_model.hidden_layer_sizes_
self._adapt_params['model_params']['latent_dim'] = trvae_model.latent_dim_
self._adapt_params['model_params']['recon_loss'] = trvae_model.recon_loss_
def _validate_params(self, varnames, init_params):
params = self._adapt_params['model_params'].copy()
adaptor_varnames = np.array(params.pop('varnames'), dtype=str)
if not np.array_equal(adaptor_varnames, varnames.astype(str)):
logger.warning(
"var_names for adata in the model does not match var_names of "
"adata used to train the model of the adaptor. For valid results, the vars "
"need to be the same and in the same order as the adata used to train the model."
)
for k in params:
if init_params[k] != params[k]:
raise ValueError(f'Parameter {k} in the adaptor isn\'t equal to {k} of the model.')
def save(
self,
dir_path: str,
overwrite: Optional[bool] = False
):
"""Save the state of the adaptor.
Parameters
----------
dir_path
Path to a directory.
overwrite
Overwrite existing data or not. If `False` and directory
already exists at `dir_path`, error will be raised.
"""
cond_params_path = os.path.join(dir_path, "cond_params.pt")
adapt_params_path = os.path.join(dir_path, "adapt_params.pkl")
if not os.path.exists(dir_path) or overwrite:
os.makedirs(dir_path, exist_ok=overwrite)
else:
raise ValueError(
f"{dir_path} already exists. Please provide an unexisting directory for saving."
)
torch.save(self.cond_params, cond_params_path)
with open(adapt_params_path, "wb") as f:
pickle.dump(self._adapt_params, f)
def attach_adaptors(
trvae_model: TRVAE,
adaptors: list,
only_new: bool = False
):
"""Attach the conditional weights from the adaptors to a trVAE model.
Attaches the conditional weights saved in the adaptors to a model,
expanding it to all conditions present in the adaptors.
Parameters
----------
trvae_model
A TRVAE class object. The object should have the same architecture
as the model which was used to save the conditional weights to the adaptors.
adaptors
List of adaptors to attach.
only_new
Attach only condtional weights for new conditions.
Do not overwrite conditional weights for the conditions
which are already in the model (in `trvae_model.conditions_`).
"""
attr_dict = trvae_model._get_public_attributes()
init_params = deepcopy(TRVAE._get_init_params_from_dict(attr_dict))
adpt_conditions = []
cond_params = {}
for adaptor in adaptors:
if isinstance(adaptor, str):
adaptor = Adaptor(adaptor)
adaptor._validate_params(trvae_model.adata.var_names, init_params)
adpt_conditions.append(adaptor.condition)
for k, p in adaptor.cond_params.items():
if k not in cond_params:
cond_params[k] = p.clone()
else:
cond_params[k] = torch.cat([cond_params[k], p], dim=-1)
inds_exist, inds_old, inds_new = [], [], []
conditions = init_params['conditions']
for i, c in enumerate(adpt_conditions):
if c not in conditions:
inds_new.append(i)
else:
inds_exist.append(i)
inds_old.append(conditions.index(c))
init_params['conditions'] += [adpt_conditions[i] for i in inds_new]
new_model = TRVAE(trvae_model.adata, **init_params)
state_dict = trvae_model.model.state_dict().copy()
for k, ten in cond_params.items():
new_ten = state_dict[k]
if not only_new and len(inds_exist) > 0:
new_ten[:, inds_old] = ten[:, inds_exist]
if len(inds_new) > 0:
state_dict[k] = torch.cat([new_ten, ten[:, inds_new]], dim=-1)
new_model.model.load_state_dict(state_dict)
return new_model
|
/scArches-0.5.9.tar.gz/scArches-0.5.9/scarches/models/trvae/adaptors.py
| 0.878079 | 0.365825 |
adaptors.py
|
pypi
|
from typing import Optional
import torch
import torch.nn as nn
from torch.distributions import Normal, kl_divergence
import torch.nn.functional as F
from .modules import Encoder, Decoder
from .losses import mse, mmd, zinb, nb
from ._utils import one_hot_encoder
from ..base._base import CVAELatentsModelMixin
class trVAE(nn.Module, CVAELatentsModelMixin):
"""ScArches model class. This class contains the implementation of Conditional Variational Auto-encoder.
Parameters
----------
input_dim: Integer
Number of input features (i.e. gene in case of scRNA-seq).
conditions: List
List of Condition names that the used data will contain to get the right encoding when used after reloading.
hidden_layer_sizes: List
A list of hidden layer sizes for encoder network. Decoder network will be the reversed order.
latent_dim: Integer
Bottleneck layer (z) size.
dr_rate: Float
Dropput rate applied to all layers, if `dr_rate`==0 no dropout will be applied.
use_mmd: Boolean
If 'True' an additional MMD loss will be calculated on the latent dim. 'z' or the first decoder layer 'y'.
mmd_on: String
Choose on which layer MMD loss will be calculated on if 'use_mmd=True': 'z' for latent dim or 'y' for first
decoder layer.
mmd_boundary: Integer or None
Choose on how many conditions the MMD loss should be calculated on. If 'None' MMD will be calculated on all
conditions.
recon_loss: String
Definition of Reconstruction-Loss-Method, 'mse', 'nb' or 'zinb'.
beta: Float
Scaling Factor for MMD loss. Higher beta values result in stronger batch-correction at a cost of worse biological variation.
use_bn: Boolean
If `True` batch normalization will be applied to layers.
use_ln: Boolean
If `True` layer normalization will be applied to layers.
"""
def __init__(self,
input_dim: int,
conditions: list,
hidden_layer_sizes: list = [256, 64],
latent_dim: int = 10,
dr_rate: float = 0.05,
use_mmd: bool = False,
mmd_on: str = 'z',
mmd_boundary: Optional[int] = None,
recon_loss: Optional[str] = 'nb',
beta: float = 1,
use_bn: bool = False,
use_ln: bool = True,
):
super().__init__()
assert isinstance(hidden_layer_sizes, list)
assert isinstance(latent_dim, int)
assert isinstance(conditions, list)
assert recon_loss in ["mse", "nb", "zinb"], "'recon_loss' must be 'mse', 'nb' or 'zinb'"
print("\nINITIALIZING NEW NETWORK..............")
self.input_dim = input_dim
self.latent_dim = latent_dim
self.n_conditions = len(conditions)
self.conditions = conditions
self.condition_encoder = {k: v for k, v in zip(conditions, range(len(conditions)))}
self.cell_type_encoder = None
self.recon_loss = recon_loss
self.mmd_boundary = mmd_boundary
self.use_mmd = use_mmd
self.freeze = False
self.beta = beta
self.use_bn = use_bn
self.use_ln = use_ln
self.mmd_on = mmd_on
self.dr_rate = dr_rate
if self.dr_rate > 0:
self.use_dr = True
else:
self.use_dr = False
if recon_loss in ["nb", "zinb"]:
self.theta = torch.nn.Parameter(torch.randn(self.input_dim, self.n_conditions))
else:
self.theta = None
self.hidden_layer_sizes = hidden_layer_sizes
encoder_layer_sizes = self.hidden_layer_sizes.copy()
encoder_layer_sizes.insert(0, self.input_dim)
decoder_layer_sizes = self.hidden_layer_sizes.copy()
decoder_layer_sizes.reverse()
decoder_layer_sizes.append(self.input_dim)
self.encoder = Encoder(encoder_layer_sizes,
self.latent_dim,
self.use_bn,
self.use_ln,
self.use_dr,
self.dr_rate,
self.n_conditions)
self.decoder = Decoder(decoder_layer_sizes,
self.latent_dim,
self.recon_loss,
self.use_bn,
self.use_ln,
self.use_dr,
self.dr_rate,
self.n_conditions)
def forward(self, x=None, batch=None, sizefactor=None, labeled=None):
x_log = torch.log(1 + x)
if self.recon_loss == 'mse':
x_log = x
z1_mean, z1_log_var = self.encoder(x_log, batch)
z1 = self.sampling(z1_mean, z1_log_var)
outputs = self.decoder(z1, batch)
if self.recon_loss == "mse":
recon_x, y1 = outputs
recon_loss = mse(recon_x, x_log).sum(dim=-1).mean()
elif self.recon_loss == "zinb":
dec_mean_gamma, dec_dropout, y1 = outputs
size_factor_view = sizefactor.unsqueeze(1).expand(dec_mean_gamma.size(0), dec_mean_gamma.size(1))
dec_mean = dec_mean_gamma * size_factor_view
dispersion = F.linear(one_hot_encoder(batch, self.n_conditions), self.theta)
dispersion = torch.exp(dispersion)
recon_loss = -zinb(x=x, mu=dec_mean, theta=dispersion, pi=dec_dropout).sum(dim=-1).mean()
elif self.recon_loss == "nb":
dec_mean_gamma, y1 = outputs
size_factor_view = sizefactor.unsqueeze(1).expand(dec_mean_gamma.size(0), dec_mean_gamma.size(1))
dec_mean = dec_mean_gamma * size_factor_view
dispersion = F.linear(one_hot_encoder(batch, self.n_conditions), self.theta)
dispersion = torch.exp(dispersion)
recon_loss = -nb(x=x, mu=dec_mean, theta=dispersion).sum(dim=-1).mean()
z1_var = torch.exp(z1_log_var) + 1e-4
kl_div = kl_divergence(
Normal(z1_mean, torch.sqrt(z1_var)),
Normal(torch.zeros_like(z1_mean), torch.ones_like(z1_var))
).sum(dim=1).mean()
mmd_loss = torch.tensor(0.0, device=z1.device)
if self.use_mmd:
if self.mmd_on == "z":
mmd_loss = mmd(z1, batch,self.n_conditions, self.beta, self.mmd_boundary)
else:
mmd_loss = mmd(y1, batch,self.n_conditions, self.beta, self.mmd_boundary)
return recon_loss, kl_div, mmd_loss
|
/scArches-0.5.9.tar.gz/scArches-0.5.9/scarches/models/trvae/trvae.py
| 0.966418 | 0.595757 |
trvae.py
|
pypi
|
import inspect
import os
import torch
import pickle
import numpy as np
from anndata import AnnData, read
from copy import deepcopy
from typing import Optional, Union
from .trvae import trVAE
from ...trainers.trvae.unsupervised import trVAETrainer
from ..base._utils import _validate_var_names
from ..base._base import BaseMixin, SurgeryMixin, CVAELatentsMixin
class TRVAE(BaseMixin, SurgeryMixin, CVAELatentsMixin):
"""Model for scArches class. This class contains the implementation of Conditional Variational Auto-encoder.
Parameters
----------
adata: : `~anndata.AnnData`
Annotated data matrix. Has to be count data for 'nb' and 'zinb' loss and normalized log transformed data
for 'mse' loss.
condition_key: String
column name of conditions in `adata.obs` data frame.
conditions: List
List of Condition names that the used data will contain to get the right encoding when used after reloading.
hidden_layer_sizes: List
A list of hidden layer sizes for encoder network. Decoder network will be the reversed order.
latent_dim: Integer
Bottleneck layer (z) size.
dr_rate: Float
Dropput rate applied to all layers, if `dr_rate`==0 no dropout will be applied.
use_mmd: Boolean
If 'True' an additional MMD loss will be calculated on the latent dim. 'z' or the first decoder layer 'y'.
mmd_on: String
Choose on which layer MMD loss will be calculated on if 'use_mmd=True': 'z' for latent dim or 'y' for first
decoder layer.
mmd_boundary: Integer or None
Choose on how many conditions the MMD loss should be calculated on. If 'None' MMD will be calculated on all
conditions.
recon_loss: String
Definition of Reconstruction-Loss-Method, 'mse', 'nb' or 'zinb'.
beta: Float
Scaling Factor for MMD loss
use_bn: Boolean
If `True` batch normalization will be applied to layers.
use_ln: Boolean
If `True` layer normalization will be applied to layers.
"""
def __init__(
self,
adata: AnnData,
condition_key: str = None,
conditions: Optional[list] = None,
hidden_layer_sizes: list = [256, 64],
latent_dim: int = 10,
dr_rate: float = 0.05,
use_mmd: bool = True,
mmd_on: str = 'z',
mmd_boundary: Optional[int] = None,
recon_loss: Optional[str] = 'nb',
beta: float = 1,
use_bn: bool = False,
use_ln: bool = True,
):
self.adata = adata
self.condition_key_ = condition_key
if conditions is None:
if condition_key is not None:
self.conditions_ = adata.obs[condition_key].unique().tolist()
else:
self.conditions_ = []
else:
self.conditions_ = conditions
self.hidden_layer_sizes_ = hidden_layer_sizes
self.latent_dim_ = latent_dim
self.dr_rate_ = dr_rate
self.use_mmd_ = use_mmd
self.mmd_on_ = mmd_on
self.mmd_boundary_ = mmd_boundary
self.recon_loss_ = recon_loss
self.beta_ = beta
self.use_bn_ = use_bn
self.use_ln_ = use_ln
self.input_dim_ = adata.n_vars
self.model = trVAE(
self.input_dim_,
self.conditions_,
self.hidden_layer_sizes_,
self.latent_dim_,
self.dr_rate_,
self.use_mmd_,
self.mmd_on_,
self.mmd_boundary_,
self.recon_loss_,
self.beta_,
self.use_bn_,
self.use_ln_,
)
self.is_trained_ = False
self.trainer = None
def train(
self,
n_epochs: int = 400,
lr: float = 1e-3,
eps: float = 0.01,
**kwargs
):
"""Train the model.
Parameters
----------
n_epochs
Number of epochs for training the model.
lr
Learning rate for training the model.
eps
torch.optim.Adam eps parameter
kwargs
kwargs for the TrVAE trainer.
"""
self.trainer = trVAETrainer(
self.model,
self.adata,
condition_key=self.condition_key_,
**kwargs)
self.trainer.train(n_epochs, lr, eps)
self.is_trained_ = True
@classmethod
def _get_init_params_from_dict(cls, dct):
init_params = {
'condition_key': dct['condition_key_'],
'conditions': dct['conditions_'],
'hidden_layer_sizes': dct['hidden_layer_sizes_'],
'latent_dim': dct['latent_dim_'],
'dr_rate': dct['dr_rate_'],
'use_mmd': dct['use_mmd_'],
'mmd_on': dct['mmd_on_'],
'mmd_boundary': dct['mmd_boundary_'],
'recon_loss': dct['recon_loss_'],
'beta': dct['beta_'],
'use_bn': dct['use_bn_'],
'use_ln': dct['use_ln_'],
}
return init_params
@classmethod
def _validate_adata(cls, adata, dct):
if adata.n_vars != dct['input_dim_']:
raise ValueError("Incorrect var dimension")
adata_conditions = adata.obs[dct['condition_key_']].unique().tolist()
if not set(adata_conditions).issubset(dct['conditions_']):
raise ValueError("Incorrect conditions")
|
/scArches-0.5.9.tar.gz/scArches-0.5.9/scarches/models/trvae/trvae_model.py
| 0.909601 | 0.488832 |
trvae_model.py
|
pypi
|
import numpy as np
import torch
import anndata
from ..models.trvae.trvae import trVAE
from ..trainers.trvae.unsupervised import trVAETrainer
def trvae_operate(
network: trVAE,
data: anndata,
condition_key: str = None,
size_factor_key: str = None,
n_epochs: int = 20,
freeze: bool = True,
freeze_expression: bool = True,
remove_dropout: bool = True,
) -> [trVAE, trVAETrainer]:
"""Transfer Learning function for new data. Uses old trained Network and expands it for new conditions.
Parameters
----------
network: trVAE
A scNet model object.
data: Anndata
Query anndata object.
condition_key: String
Key where the conditions in the data can be found.
size_factor_key: String
Key where the size_factors in the data can be found.
n_epochs: Integer
Number of epochs for training the network on query data.
freeze: Boolean
If 'True' freezes every part of the network except the first layers of encoder/decoder.
freeze_expression: Boolean
If 'True' freeze every weight in first layers except the condition weights.
remove_dropout: Boolean
If 'True' remove Dropout for Transfer Learning.
Returns
-------
new_network: trVAE
Newly network that got trained on query data.
new_trainer: trVAETrainer
Trainer for the newly network.
"""
conditions = network.conditions
new_conditions = []
data_conditions = data.obs[condition_key].unique().tolist()
# Check if new conditions are already known
for item in data_conditions:
if item not in conditions:
new_conditions.append(item)
n_new_conditions = len(new_conditions)
# Add new conditions to overall conditions
for condition in new_conditions:
conditions.append(condition)
# Update DR Rate
new_dr = network.dr_rate
if remove_dropout:
new_dr = 0.0
print("Surgery to get new Network...")
new_network = trVAE(
network.input_dim,
conditions=conditions,
hidden_layer_sizes=network.hidden_layer_sizes,
latent_dim=network.latent_dim,
dr_rate=new_dr,
use_mmd=network.use_mmd,
mmd_boundary=network.mmd_boundary,
recon_loss=network.recon_loss,
)
# Expand First Layer weights of encoder/decoder of old network by new conditions
encoder_input_weights = network.encoder.FC.L0.cond_L.weight
to_be_added_encoder_input_weights = np.random.randn(encoder_input_weights.size()[0], n_new_conditions) * np.sqrt(
2 / (encoder_input_weights.size()[0] + 1 + encoder_input_weights.size()[1]))
to_be_added_encoder_input_weights = torch.from_numpy(to_be_added_encoder_input_weights).float().to(network.device)
network.encoder.FC.L0.cond_L.weight.data = torch.cat((encoder_input_weights,
to_be_added_encoder_input_weights), 1)
decoder_input_weights = network.decoder.FirstL.L0.cond_L.weight
to_be_added_decoder_input_weights = np.random.randn(decoder_input_weights.size()[0], n_new_conditions) * np.sqrt(
2 / (decoder_input_weights.size()[0] + 1 + decoder_input_weights.size()[1]))
to_be_added_decoder_input_weights = torch.from_numpy(to_be_added_decoder_input_weights).float().to(network.device)
network.decoder.FirstL.L0.cond_L.weight.data = torch.cat((decoder_input_weights,
to_be_added_decoder_input_weights), 1)
# Set the weights of new network to old network weights
new_network.load_state_dict(network.state_dict())
# Freeze parts of the network
if freeze:
new_network.freeze = True
for name, p in new_network.named_parameters():
p.requires_grad = False
if freeze_expression:
if 'cond_L.weight' in name:
p.requires_grad = True
else:
if "L0" in name or "B0" in name:
p.requires_grad = True
new_trainer = trVAETrainer(
new_network,
data,
condition_key=condition_key,
size_factor_key=size_factor_key,
batch_size=1024,
n_samples=4096
)
new_trainer.train(
n_epochs=n_epochs,
lr=0.001
)
return new_network, new_trainer
|
/scArches-0.5.9.tar.gz/scArches-0.5.9/scarches/surgery/trvae.py
| 0.808446 | 0.579162 |
trvae.py
|
pypi
|
import os
import requests
def download_file(link: str,
save_path: str = None,
make_dir: bool = False
):
"""Downloads the file in the ``link`` and saves it in ``save_path``.
Parameters
----------
link: str
Direct downloadable link.
save_path: str
Path with the name and extension of downloaded file.
make_dir: bool
Whether to make the ``save_path`` if it does not exist in the system.
Returns
-------
file_path: str
Full path with name and extension of downloaded file.
http_response: :class:`~http.client.HTTPMessage`
``HttpMessage`` object containing status code and information about the http request.
"""
from urllib.request import urlretrieve
if make_dir:
path = os.path.dirname(save_path)
os.makedirs(path, exist_ok=True)
else:
if not os.path.isdir(save_path):
raise ValueError("`save_path` is not a valid path. You may want to try setting `make_dir` to True.")
if not os.path.exists(save_path):
print(f"Downloading...", end="\t")
file_path, http_response = urlretrieve(link, save_path)
print(f"Finished! File has been successfully saved to {file_path}.")
else:
file_path, http_response = save_path, None
print("File already exists!")
return file_path, http_response
def upload_file(file_path: str,
deposition_id: str,
access_token: str):
"""Downloads the file in the ``link`` and saves it in ``save_path``.
Parameters
----------
file_path: str
Full path with the name and extension of the file you want to upload.
deposition_id: str
ID of a deposition in your Zenodo account.
access_token: str
Your Zenodo Access token.
Returns
-------
file_path: str
Full path with name and extension of downloaded file.
http_response: :class:`~http.client.HTTPMessage`
``HttpMessage`` object containing status code and information about the http request.
"""
file_name = file_path.split("/")[-1]
data = {
'filename': file_name,
}
files = {'file': open(file_path, 'rb')}
r = requests.post(f'https://zenodo.org/api/deposit/depositions/{deposition_id}/files',
params={'access_token': access_token}, data=data, files=files)
r_dict = r.json()
if r.status_code != 201:
raise Exception(r_dict['message'])
filename = r_dict['filename']
download_link = f"https://zenodo.org/record/{deposition_id}/files/{filename}?download=1"
return download_link
|
/scArches-0.5.9.tar.gz/scArches-0.5.9/scarches/zenodo/file.py
| 0.628179 | 0.166438 |
file.py
|
pypi
|
from typing import Union
from ..models import TRVAE, SCVI, SCANVI, TOTALVI
from .file import *
from .deposition import *
from .zip import *
def upload_model(model: Union[TRVAE, SCVI, SCANVI, TOTALVI, str],
deposition_id: str,
access_token: str,
model_name: str = None):
"""Uploads trained ``model`` to Zenodo.
Parameters
----------
model: :class:`~scarches.models.TRVAE`, :class:`~scarches.models.SCVI`, :class:`~scarches.models.SCANVI`, :class:`~scarches.models.TOTALVI`, str
An instance of one of classes defined in ``scarches.models`` module or a path to a saved model.
deposition_id: str
ID of a deposition in your Zenodo account.
access_token: str
Your Zenodo access token.
model_name: str
An optional name of the model to upload
Returns
-------
download_link: str
Generated direct download link for the uploaded model in the deposition. Please **Note** that the link is usable **after** your published your deposition.
"""
if model_name is None:
model_name = type(model).__name__
if isinstance(model, str):
model_path = model
else:
model_path = f"tmp_{model_name}"
model.save(model_path)
output_base_name = f"./tmp/scarches-{model_name}"
output_path = output_base_name + ".zip"
zip_model_directory(output_path=output_base_name, directory=model_path)
download_link = upload_file(file_path=output_path,
deposition_id=deposition_id,
access_token=access_token)
print("Model has been successfully uploaded")
return download_link
def download_model(download_link: str,
save_path: str = './',
make_dir: bool = False):
"""Downloads the zip file of the model in the ``link`` and saves it in ``save_path`` and extracts.
Parameters
----------
link: str
Direct downloadable link.
save_path: str
Directory path for downloaded file
make_dir: bool
Whether to make the ``save_path`` if it does not exist in the system.
Returns
-------
extract_dir: str
Full path to the folder of the model.
"""
if not save_path.endswith("/"):
save_path += "/"
if download_link != '':
file_path, response = download_file(download_link, f'{save_path}downloaded_model.zip', make_dir)
else:
raise Exception("Download link does not exist for the specified task")
if os.path.exists(file_path) and file_path.endswith(".zip"):
extract_dir = os.path.dirname(file_path)
unzip_model_directory(file_path, extract_dir=extract_dir)
else:
raise Exception("The model should be in zip archive")
return extract_dir
|
/scArches-0.5.9.tar.gz/scArches-0.5.9/scarches/zenodo/__init__.py
| 0.836521 | 0.16492 |
__init__.py
|
pypi
|
import json
import requests
def create_deposition(access_token: str,
upload_type: str,
title: str,
description: str,
**kwargs):
"""Creates a deposition in your Zenodo account.
Parameters
----------
access_token: str
Your Zenodo access token.
upload_type: str
title: str
description: str
kwargs:
Returns
-------
deposition_id: str
ID of the created deposition.
"""
url = "https://zenodo.org/api/deposit/depositions"
headers = {"Content-Type": "application/json"}
data = {"metadata": {"upload_type": upload_type,
'title': title,
'description': description,
**kwargs}}
r = requests.post(url,
params={'access_token': access_token},
data=json.dumps(data),
headers=headers)
if r.status_code == 201:
print("New Deposition has been successfully created!")
return str(r.json()['id'])
else:
raise Exception(r.json()['message'])
def update_deposition(deposition_id: str,
access_token: str,
metadata: dict):
"""Updates the existing deposition with ``deposition_id`` in your Zenodo account.
Parameters
----------
deposition_id: str
ID of a deposition in your Zenodo account.
access_token: str
Your Zenodo access token.
metadata: dict
"""
url = f"https://zenodo.org/api/deposit/depositions/{deposition_id}?access_token={access_token}"
headers = {"Content-Type": "application/json"}
r = requests.put(url, data=json.dumps(metadata), headers=headers)
if r.status_code == 200:
print("Deposition has been successfully updated!")
else:
raise Exception(r.json()['message'])
def delete_deposition(deposition_id: str,
access_token: str):
"""Deletes the existing deposition with ``deposition_id`` in your Zenodo account.
Parameters
----------
deposition_id: str
ID of a deposition in your Zenodo account.
access_token: str
Your Zenodo Access token.
"""
r = requests.delete(f'https://zenodo.org/api/deposit/depositions/{deposition_id}',
params={'access_token': access_token})
if r.status_code == 201:
print(f"Deposition with id = {deposition_id} has been successfullu deleted!")
else:
raise Exception(r.json()['message'])
def publish_deposition(deposition_id: str,
access_token: str):
"""Publishes the existing deposition with ``deposition_id`` in your Zenodo account.
Parameters
----------
deposition_id: str
ID of a deposition in your Zenodo account.
access_token: str
Your Zenodo access token.
Returns
-------
download_link: str
Generated direct download link for the uploaded model in the deposition. Please **Note** that the link is usable **after** your published your deposition.
"""
r = requests.post(f'https://zenodo.org/api/deposit/depositions/{deposition_id}/actions/publish',
params={'access_token': access_token})
if r.status_code == 202:
print(f"Deposition with id = {deposition_id} has been successfully published!")
else:
raise Exception(r.json()['message'])
def get_all_deposition_ids(access_token: str):
"""Gets list of all of deposition IDs existed in your Zenodo account.
Parameters
----------
access_token: str
Your Zenodo access token.
Returns
-------
deposition_ids: list
List of deposition IDs.
"""
r = requests.get('https://zenodo.org/api/deposit/depositions',
params={'access_token': access_token})
if r.status_code != 200:
raise Exception(r.json()['message'])
deposition_ids = []
for deposition_dict in r.json():
deposition_ids.append(str(deposition_dict['id']))
return deposition_ids
|
/scArches-0.5.9.tar.gz/scArches-0.5.9/scarches/zenodo/deposition.py
| 0.628863 | 0.174551 |
deposition.py
|
pypi
|
import numpy as np
class EarlyStopping(object):
"""Class for EarlyStopping. This class contains the implementation of early stopping for TRVAE/CVAE training.
This early stopping class was inspired by:
Title: scvi-tools
Authors: Romain Lopez <[email protected]>,
Adam Gayoso <[email protected]>,
Galen Xing <[email protected]>
Date: 24th December 2020
Code version: 0.8.1
Availability: https://github.com/YosefLab/scvi-tools/blob/8f5a9cc362325abbb7be1e07f9523cfcf7e55ec0/scvi/core/trainers/trainer.py
Parameters
----------
early_stopping_metric: : String
The metric/loss which the early stopping criterion gets calculated on.
threshold: Float
The minimum value which counts as improvement.
patience: Integer
Number of epochs which are allowed to have no improvement until the training is stopped.
reduce_lr: Boolean
If 'True', the learning rate gets adjusted by 'lr_factor' after a given number of epochs with no
improvement.
lr_patience: Integer
Number of epochs which are allowed to have no improvement until the learning rate is adjusted.
lr_factor: Float
Scaling factor for adjusting the learning rate.
"""
def __init__(self,
early_stopping_metric: str = "val_unweighted_loss",
mode: str = "min",
threshold: float = 0,
patience: int = 20,
reduce_lr: bool = True,
lr_patience: int = 13,
lr_factor: float = 0.1):
self.early_stopping_metric = early_stopping_metric
self.mode = mode
self.threshold = threshold
self.patience = patience
self.reduce_lr = reduce_lr
self.lr_patience = lr_patience
self.lr_factor = lr_factor
self.epoch = 0
self.wait = 0
self.wait_lr = 0
self.current_performance = np.inf
if self.mode == "min":
self.best_performance = np.inf
self.best_performance_state = np.inf
else:
self.best_performance = -np.inf
self.best_performance_state = -np.inf
if patience == 0:
self.is_better = lambda a, b: True
self.step = lambda a: False
def step(self, scalar):
self.epoch += 1
if self.epoch < self.patience:
continue_training = True
lr_update = False
elif self.wait >= self.patience:
continue_training = False
lr_update = False
else:
if not self.reduce_lr:
lr_update = False
elif self.wait_lr >= self.lr_patience:
lr_update = True
self.wait_lr = 0
else:
lr_update = False
# Shift
self.current_performance = scalar
if self.mode == "min":
improvement = self.best_performance - self.current_performance
else:
improvement = self.current_performance - self.best_performance
# updating best performance
if improvement > 0:
self.best_performance = self.current_performance
if improvement < self.threshold:
self.wait += 1
self.wait_lr += 1
else:
self.wait = 0
self.wait_lr = 0
continue_training = True
if not continue_training:
print("\nStopping early: no improvement of more than " + str(self.threshold) +
" nats in " + str(self.patience) + " epochs")
print("If the early stopping criterion is too strong, "
"please instantiate it with different parameters in the train method.")
return continue_training, lr_update
def update_state(self, scalar):
if self.mode == "min":
improved = (self.best_performance_state - scalar) > 0
else:
improved = (scalar - self.best_performance_state) > 0
if improved:
self.best_performance_state = scalar
return improved
|
/scArches-0.5.9.tar.gz/scArches-0.5.9/scarches/utils/monitor.py
| 0.907661 | 0.534612 |
monitor.py
|
pypi
|
import scanpy as sc
import pandas as pd
import os
import numpy as np
import anndata
from sklearn.neighbors import KNeighborsTransformer
#These function were created by Lisa Sikemma
def weighted_knn_trainer(train_adata, train_adata_emb, n_neighbors=50):
"""Trains a weighted KNN classifier on ``train_adata``.
Parameters
----------
train_adata: :class:`~anndata.AnnData`
Annotated dataset to be used to train KNN classifier with ``label_key`` as the target variable.
train_adata_emb: str
Name of the obsm layer to be used for calculation of neighbors. If set to "X", anndata.X will be
used
n_neighbors: int
Number of nearest neighbors in KNN classifier.
"""
print(
f"Weighted KNN with n_neighbors = {n_neighbors} ... ",
end="",
)
k_neighbors_transformer = KNeighborsTransformer(
n_neighbors=n_neighbors,
mode="distance",
algorithm="brute",
metric="euclidean",
n_jobs=-1,
)
if train_adata_emb == "X":
train_emb = train_adata.X
elif train_adata_emb in train_adata.obsm.keys():
train_emb = train_adata.obsm[train_adata_emb]
else:
raise ValueError(
"train_adata_emb should be set to either 'X' or the name of the obsm layer to be used!"
)
k_neighbors_transformer.fit(train_emb)
return k_neighbors_transformer
def weighted_knn_transfer(
query_adata,
query_adata_emb,
ref_adata_obs,
label_keys,
knn_model,
threshold=1,
pred_unknown=False,
mode="package",
):
"""Annotates ``query_adata`` cells with an input trained weighted KNN classifier.
Parameters
----------
query_adata: :class:`~anndata.AnnData`
Annotated dataset to be used to queryate KNN classifier. Embedding to be used
query_adata_emb: str
Name of the obsm layer to be used for label transfer. If set to "X",
query_adata.X will be used
ref_adata_obs: :class:`pd.DataFrame`
obs of ref Anndata
label_keys: str
Names of the columns to be used as target variables (e.g. cell_type) in ``query_adata``.
knn_model: :class:`~sklearn.neighbors._graph.KNeighborsTransformer`
knn model trained on reference adata with weighted_knn_trainer function
threshold: float
Threshold of uncertainty used to annotating cells as "Unknown". cells with
uncertainties higher than this value will be annotated as "Unknown".
Set to 1 to keep all predictions. This enables one to later on play
with thresholds.
pred_unknown: bool
``False`` by default. Whether to annotate any cell as "unknown" or not.
If `False`, ``threshold`` will not be used and each cell will be annotated
with the label which is the most common in its ``n_neighbors`` nearest cells.
mode: str
Has to be one of "paper" or "package". If mode is set to "package",
uncertainties will be 1 - P(pred_label), otherwise it will be 1 - P(true_label).
"""
if not type(knn_model) == KNeighborsTransformer:
raise ValueError(
"knn_model should be of type sklearn.neighbors._graph.KNeighborsTransformer!"
)
if query_adata_emb == "X":
query_emb = query_adata.X
elif query_adata_emb in query_adata.obsm.keys():
query_emb = query_adata.obsm[query_adata_emb]
else:
raise ValueError(
"query_adata_emb should be set to either 'X' or the name of the obsm layer to be used!"
)
top_k_distances, top_k_indices = knn_model.kneighbors(X=query_emb)
stds = np.std(top_k_distances, axis=1)
stds = (2.0 / stds) ** 2
stds = stds.reshape(-1, 1)
top_k_distances_tilda = np.exp(-np.true_divide(top_k_distances, stds))
weights = top_k_distances_tilda / np.sum(
top_k_distances_tilda, axis=1, keepdims=True
)
cols = ref_adata_obs.columns[ref_adata_obs.columns.str.startswith(label_keys)]
uncertainties = pd.DataFrame(columns=cols, index=query_adata.obs_names)
pred_labels = pd.DataFrame(columns=cols, index=query_adata.obs_names)
for i in range(len(weights)):
for j in cols:
y_train_labels = ref_adata_obs[j].values
unique_labels = np.unique(y_train_labels[top_k_indices[i]])
best_label, best_prob = None, 0.0
for candidate_label in unique_labels:
candidate_prob = weights[
i, y_train_labels[top_k_indices[i]] == candidate_label
].sum()
if best_prob < candidate_prob:
best_prob = candidate_prob
best_label = candidate_label
if pred_unknown:
if best_prob >= threshold:
pred_label = best_label
else:
pred_label = "Unknown"
else:
pred_label = best_label
if mode == "package":
uncertainties.iloc[i][j] = (max(1 - best_prob, 0))
else:
raise Exception("Inquery Mode!")
pred_labels.iloc[i][j] = (pred_label)
print("finished!")
return pred_labels, uncertainties
|
/scArches-0.5.9.tar.gz/scArches-0.5.9/scarches/utils/knn.py
| 0.868255 | 0.579609 |
knn.py
|
pypi
|
import scanpy as sc
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from ..dataset import remove_sparsity
def opt_louvain(adata, label_key, cluster_key, function=None, resolutions=None,
inplace=True, plot=False, verbose=True, **kwargs):
"""
This Louvain Clustering method was taken from scIB:
Title: scIB
Authors: Malte Luecken,
Maren Buettner,
Daniel Strobl,
Michaela Mueller
Date: 4th October 2020
Code version: 0.2.0
Availability: https://github.com/theislab/scib/blob/master/scIB/clustering.py
params:
label_key: name of column in adata.obs containing biological labels to be
optimised against
cluster_key: name of column to be added to adata.obs during clustering.
Will be overwritten if exists and `force=True`
function: function that computes the cost to be optimised over. Must take as
arguments (adata, group1, group2, **kwargs) and returns a number for maximising
resolutions: list if resolutions to be optimised over. If `resolutions=None`,
default resolutions of 20 values ranging between 0.1 and 2 will be used
returns:
res_max: resolution of maximum score
score_max: maximum score
score_all: `pd.DataFrame` containing all scores at resolutions. Can be used to plot the score profile.
clustering: only if `inplace=False`, return cluster assignment as `pd.Series`
plot: if `plot=True` plot the score profile over resolution
"""
adata = remove_sparsity(adata)
if resolutions is None:
n = 20
resolutions = [2 * x / n for x in range(1, n + 1)]
score_max = 0
res_max = resolutions[0]
clustering = None
score_all = []
# maren's edit - recompute neighbors if not existing
try:
adata.uns['neighbors']
except KeyError:
if verbose:
print('computing neigbours for opt_cluster')
sc.pp.neighbors(adata)
for res in resolutions:
sc.tl.louvain(adata, resolution=res, key_added=cluster_key)
score = function(adata, label_key, cluster_key, **kwargs)
score_all.append(score)
if score_max < score:
score_max = score
res_max = res
clustering = adata.obs[cluster_key]
del adata.obs[cluster_key]
if verbose:
print(f'optimised clustering against {label_key}')
print(f'optimal cluster resolution: {res_max}')
print(f'optimal score: {score_max}')
score_all = pd.DataFrame(zip(resolutions, score_all), columns=('resolution', 'score'))
if plot:
# score vs. resolution profile
sns.lineplot(data=score_all, x='resolution', y='score').set_title('Optimal cluster resolution profile')
plt.show()
if inplace:
adata.obs[cluster_key] = clustering
return res_max, score_max, score_all
else:
return res_max, score_max, score_all, clustering
|
/scArches-0.5.9.tar.gz/scArches-0.5.9/scarches/metrics/clustering.py
| 0.788176 | 0.490907 |
clustering.py
|
pypi
|
import numpy as np
import pandas as pd
from scipy.stats import entropy
from sklearn.metrics import silhouette_score, normalized_mutual_info_score, silhouette_samples
from sklearn.neighbors import NearestNeighbors
from sklearn.preprocessing import LabelEncoder
from ..dataset import remove_sparsity
from .clustering import opt_louvain
def entropy_batch_mixing(adata, label_key='batch',
n_neighbors=50, n_pools=50, n_samples_per_pool=100):
"""Computes Entory of Batch mixing metric for ``adata`` given the batch column name.
Parameters
----------
adata: :class:`~anndata.AnnData`
Annotated dataset.
label_key: str
Name of the column which contains information about different studies in ``adata.obs`` data frame.
n_neighbors: int
Number of nearest neighbors.
n_pools: int
Number of EBM computation which will be averaged.
n_samples_per_pool: int
Number of samples to be used in each pool of execution.
Returns
-------
score: float
EBM score. A float between zero and one.
"""
adata = remove_sparsity(adata)
n_cat = len(adata.obs[label_key].unique().tolist())
print(f'Calculating EBM with n_cat = {n_cat}')
neighbors = NearestNeighbors(n_neighbors=n_neighbors + 1).fit(adata.X)
indices = neighbors.kneighbors(adata.X, return_distance=False)[:, 1:]
batch_indices = np.vectorize(lambda i: adata.obs[label_key].values[i])(indices)
entropies = np.apply_along_axis(__entropy_from_indices, axis=1, arr=batch_indices, n_cat=n_cat)
# average n_pools entropy results where each result is an average of n_samples_per_pool random samples.
if n_pools == 1:
score = np.mean(entropies)
else:
score = np.mean([
np.mean(entropies[np.random.choice(len(entropies), size=n_samples_per_pool)])
for _ in range(n_pools)
])
return score
def nmi(adata, label_key, verbose=False, nmi_method='arithmetic'):
cluster_key = 'cluster'
opt_louvain(adata, label_key=label_key, cluster_key=cluster_key, function=nmi_helper,
plot=False, verbose=verbose, inplace=True)
print('NMI...')
nmi_score = nmi_helper(adata, group1=cluster_key, group2=label_key, method=nmi_method)
return nmi_score
def asw(adata, label_key, batch_key):
print('silhouette score...')
sil_global = silhouette(adata, group_key=label_key, metric='euclidean')
_, sil_clus = silhouette_batch(adata, batch_key=batch_key, group_key=label_key,
metric='euclidean', verbose=False)
sil_clus = sil_clus['silhouette_score'].mean()
return sil_clus, sil_global
def knn_purity(adata, label_key, n_neighbors=30):
"""Computes KNN Purity metric for ``adata`` given the batch column name.
Parameters
----------
adata: :class:`~anndata.AnnData`
Annotated dataset.
label_key: str
Name of the column which contains information about different studies in ``adata.obs`` data frame.
n_neighbors: int
Number of nearest neighbors.
Returns
-------
score: float
KNN purity score. A float between 0 and 1.
"""
adata = remove_sparsity(adata)
labels = LabelEncoder().fit_transform(adata.obs[label_key].to_numpy())
nbrs = NearestNeighbors(n_neighbors=n_neighbors + 1).fit(adata.X)
indices = nbrs.kneighbors(adata.X, return_distance=False)[:, 1:]
neighbors_labels = np.vectorize(lambda i: labels[i])(indices)
# pre cell purity scores
scores = ((neighbors_labels - labels.reshape(-1, 1)) == 0).mean(axis=1)
res = [
np.mean(scores[labels == i]) for i in np.unique(labels)
] # per cell-type purity
return np.mean(res)
def __entropy_from_indices(indices, n_cat):
return entropy(np.unique(indices, return_counts=True)[1].astype(np.int32), base=n_cat)
def nmi_helper(adata, group1, group2, method="arithmetic"):
"""
This NMI function was taken from scIB:
Title: scIB
Authors: Malte Luecken,
Maren Buettner,
Daniel Strobl,
Michaela Mueller
Date: 4th October 2020
Code version: 0.2.0
Availability: https://github.com/theislab/scib/blob/master/scIB/metrics.py
Normalized mutual information NMI based on 2 different cluster assignments `group1` and `group2`
params:
adata: Anndata object
group1: column name of `adata.obs` or group assignment
group2: column name of `adata.obs` or group assignment
method: NMI implementation
'max': scikit method with `average_method='max'`
'min': scikit method with `average_method='min'`
'geometric': scikit method with `average_method='geometric'`
'arithmetic': scikit method with `average_method='arithmetic'`
return:
normalized mutual information (NMI)
"""
adata = remove_sparsity(adata)
if isinstance(group1, str):
group1 = adata.obs[group1].tolist()
elif isinstance(group1, pd.Series):
group1 = group1.tolist()
labels = adata.obs[group2].values
labels_encoded = LabelEncoder().fit_transform(labels)
group2 = labels_encoded
if len(group1) != len(group2):
raise ValueError(f'different lengths in group1 ({len(group1)}) and group2 ({len(group2)})')
# choose method
if method in ['max', 'min', 'geometric', 'arithmetic']:
nmi_value = normalized_mutual_info_score(group1, group2, average_method=method)
else:
raise ValueError(f"Method {method} not valid")
return nmi_value
def silhouette(adata, group_key, metric='euclidean', scale=True):
"""
This ASW function was taken from scIB:
Title: scIB
Authors: Malte Luecken,
Maren Buettner,
Daniel Strobl,
Michaela Mueller
Date: 4th October 2020
Code version: 0.2.0
Availability: https://github.com/theislab/scib/blob/master/scIB/metrics.py
wrapper for sklearn silhouette function values range from [-1, 1] with 1 being an ideal fit, 0 indicating
overlapping clusters and -1 indicating misclassified cells
"""
adata = remove_sparsity(adata)
labels = adata.obs[group_key].values
labels_encoded = LabelEncoder().fit_transform(labels)
asw = silhouette_score(adata.X, labels_encoded, metric=metric)
if scale:
asw = (asw + 1)/2
return asw
def silhouette_batch(adata, batch_key, group_key, metric='euclidean', verbose=True, scale=True):
"""
This ASW function was taken from scIB:
Title: scIB
Authors: Malte Luecken,
Maren Buettner,
Daniel Strobl,
Michaela Mueller
Date: 4th October 2020
Code version: 0.2.0
Availability: https://github.com/theislab/scib/blob/master/scIB/metrics.py
Silhouette score of batch labels subsetted for each group.
params:
batch_key: batches to be compared against
group_key: group labels to be subsetted by e.g. cell type
metric: see sklearn silhouette score
embed: name of column in adata.obsm
returns:
all scores: absolute silhouette scores per group label
group means: if `mean=True`
"""
adata = remove_sparsity(adata)
glob_batches = adata.obs[batch_key].values
batch_enc = LabelEncoder()
batch_enc.fit(glob_batches)
sil_all = pd.DataFrame(columns=['group', 'silhouette_score'])
for group in adata.obs[group_key].unique():
adata_group = adata[adata.obs[group_key] == group]
if adata_group.obs[batch_key].nunique() == 1:
continue
batches = batch_enc.transform(adata_group.obs[batch_key])
sil_per_group = silhouette_samples(adata_group.X, batches, metric=metric)
# take only absolute value
sil_per_group = [abs(i) for i in sil_per_group]
if scale:
# scale s.t. highest number is optimal
sil_per_group = [1 - i for i in sil_per_group]
d = pd.DataFrame({'group': [group] * len(sil_per_group), 'silhouette_score': sil_per_group})
sil_all = sil_all.append(d)
sil_all = sil_all.reset_index(drop=True)
sil_means = sil_all.groupby('group').mean()
if verbose:
print(f'mean silhouette per cell: {sil_means}')
return sil_all, sil_means
|
/scArches-0.5.9.tar.gz/scArches-0.5.9/scarches/metrics/metrics.py
| 0.92211 | 0.646976 |
metrics.py
|
pypi
|
from itertools import product
import numpy as np
import matplotlib.pyplot as plt
from typing import Union
def plot_abs_bfs_key(scores, terms, key, n_points=30, lim_val=2.3, fontsize=8, scale_y=2, yt_step=0.3,
title=None, ax=None):
txt_args = dict(
rotation='vertical',
verticalalignment='bottom',
horizontalalignment='center',
fontsize=fontsize,
)
ax = ax if ax is not None else plt.axes()
ax.grid(False)
bfs = np.abs(scores[key]['bf'])
srt = np.argsort(bfs)[::-1][:n_points]
top = bfs.max()
ax.set_ylim(top=top * scale_y)
yt = np.arange(0, top * 1.1, yt_step)
ax.set_yticks(yt)
ax.set_xlim(0.1, n_points + 0.9)
xt = np.arange(0, n_points + 1, 5)
xt[0] = 1
ax.set_xticks(xt)
for i, (bf, term) in enumerate(zip(bfs[srt], terms[srt])):
ax.text(i+1, bf, term, **txt_args)
ax.axhline(y=lim_val, color='red', linestyle='--', label='')
ax.set_xlabel("Rank")
ax.set_ylabel("Absolute log bayes factors")
ax.set_title(key if title is None else title)
return ax.figure
def plot_abs_bfs(adata, scores_key="bf_scores", terms: Union[str, list]="terms",
keys=None, n_cols=3, **kwargs):
"""\
Plot the absolute bayes scores rankings.
"""
scores = adata.uns[scores_key]
if isinstance(terms, str):
terms = np.asarray(adata.uns[terms])
else:
terms = np.asarray(terms)
if len(terms) != len(next(iter(scores.values()))["bf"]):
raise ValueError('Incorrect length of terms.')
if keys is None:
keys = list(scores.keys())
if len(keys) == 1:
keys = keys[0]
if isinstance(keys, str):
return plot_abs_bfs_key(scores, terms, keys, **kwargs)
n_keys = len(keys)
if n_keys <= n_cols:
n_cols = n_keys
n_rows = 1
else:
n_rows = int(np.ceil(n_keys / n_cols))
fig, axs = plt.subplots(n_rows, n_cols)
for key, ix in zip(keys, product(range(n_rows), range(n_cols))):
if n_rows == 1:
ix = ix[1]
elif n_cols == 1:
ix = ix[0]
plot_abs_bfs_key(scores, terms, key, ax=axs[ix], **kwargs)
n_inactive = n_rows * n_cols - n_keys
if n_inactive > 0:
for i in range(n_inactive):
axs[n_rows-1, -(i+1)].axis('off')
return fig
|
/scArches-0.5.9.tar.gz/scArches-0.5.9/scarches/plotting/terms_scores.py
| 0.859103 | 0.359378 |
terms_scores.py
|
pypi
|
from ..metrics.metrics import entropy_batch_mixing, knn_purity, asw, nmi
from ..models import SCVI, SCANVI, TOTALVI
from scipy.sparse import issparse
import numpy as np
import scanpy as sc
import torch
from typing import Union, Optional
from sklearn.metrics import f1_score
import anndata
import matplotlib.pyplot as plt
sc.settings.set_figure_params(dpi=200, frameon=False)
torch.set_printoptions(precision=3, sci_mode=False, edgeitems=7)
np.set_printoptions(precision=2, edgeitems=7)
class SCVI_EVAL:
def __init__(
self,
model: Union[SCVI, SCANVI, TOTALVI],
adata: anndata.AnnData,
trainer: Optional['Trainer'] = None,
cell_type_key: str = None,
batch_key: str = None,
):
from scvi.data import get_from_registry
self.outer_model = model
self.model = model.model
self.model.eval()
if trainer is None:
self.trainer = model.trainer
else:
self.trainer = trainer
self.adata = adata
self.modified = getattr(model.model, 'encode_covariates', True)
self.annotated = type(model) is SCANVI
self.predictions = None
self.certainty = None
self.prediction_names = None
self.class_check = None
self.post_adata_2 = None
if trainer is not None:
if self.trainer.use_cuda:
self.device = torch.device('cuda')
else:
self.device = torch.device('cpu')
else:
self.device = next(self.model.parameters()).get_device()
if issparse(self.adata.X):
X = self.adata.X.toarray()
else:
X = self.adata.X
self.x_tensor = torch.tensor(X, device=self.device)
self.labels = None
self.label_tensor = None
if self.annotated:
self.labels = get_from_registry(self.adata, "labels").astype(np.int8)
self.label_tensor = torch.tensor(self.labels, device=self.device)
self.cell_types = self.adata.obs[cell_type_key].tolist()
self.batch_indices = get_from_registry(self.adata, "batch_indices").astype(np.int8)
self.batch_tensor = torch.tensor(self.batch_indices, device=self.device)
self.batch_names = self.adata.obs[batch_key].tolist()
self.celltype_enc = [0]*len(self.adata.obs[cell_type_key].unique().tolist())
for i, cell_type in enumerate(self.adata.obs[cell_type_key].unique().tolist()):
label = self.adata.obs['_scvi_labels'].unique().tolist()[i]
self.celltype_enc[label] = cell_type
self.post_adata = self.latent_as_anndata()
def latent_as_anndata(self):
if type(self.outer_model) is TOTALVI:
latent = self.outer_model.get_latent_representation(self.adata)
else:
if self.modified:
latents = self.model.sample_from_posterior_z(
self.x_tensor,
y=self.label_tensor,
batch_index=self.batch_tensor
)
else:
latents = self.model.sample_from_posterior_z(
self.x_tensor,
y=self.label_tensor,
)
if self.annotated:
latent = latents.cpu().detach().numpy()
latent2, _, _ = self.model.encoder_z2_z1(latents, self.label_tensor)
latent2 = latent2.cpu().detach().numpy()
post_adata_2 = sc.AnnData(latent2)
post_adata_2.obs['cell_type'] = self.cell_types
post_adata_2.obs['batch'] = self.batch_names
self.post_adata_2 = post_adata_2
else:
latent = latents.cpu().detach().numpy()
post_adata = sc.AnnData(latent)
post_adata.obs['cell_type'] = self.cell_types
post_adata.obs['batch'] = self.batch_names
return post_adata
def get_model_arch(self):
for name, p in self.model.named_parameters():
print(name, " - ", p.size(0), p.size(-1))
def plot_latent(self,
show=True,
save=False,
dir_path=None,
n_neighbors=8,
predictions=False,
in_one=False,
colors=None):
"""
if save:
if dir_path is None:
name = 'scanvi_latent.png'
else:
name = f'{dir_path}.png'
else:
name = False
"""
if self.model is None:
print("Not possible if no model is provided")
return
if save:
show = False
if dir_path is None:
dir_path = 'scanvi_latent'
sc.pp.neighbors(self.post_adata, n_neighbors=n_neighbors)
sc.tl.umap(self.post_adata)
if in_one:
color = ['cell_type', 'batch']
if predictions:
color.append(['certainty', 'predictions', 'type_check'])
sc.pl.umap(self.post_adata,
color=color,
ncols=2,
frameon=False,
wspace=0.6,
show=show,
save=f'{dir_path}_complete.png' if dir_path else None)
else:
sc.pl.umap(self.post_adata,
color=['cell_type'],
frameon=False,
wspace=0.6,
show=show,
palette=colors,
save=f'{dir_path}_celltypes.png' if dir_path else None)
sc.pl.umap(self.post_adata,
color=['batch'],
frameon=False,
wspace=0.6,
show=show,
save=f'{dir_path}_batch.png' if dir_path else None)
if predictions:
sc.pl.umap(self.post_adata,
color=['predictions'],
frameon=False,
wspace=0.6,
show=show,
save=f'{dir_path}_predictions.png' if dir_path else None)
sc.pl.umap(self.post_adata,
color=['certainty'],
frameon=False,
wspace=0.6,
show=show,
save=f'{dir_path}_certainty.png' if dir_path else None)
sc.pl.umap(self.post_adata,
color=['type_check'],
ncols=2,
frameon=False,
wspace=0.6,
show=show,
save=f'{dir_path}_type_check.png' if dir_path else None)
def plot_history(self, show=True, save=False, dir_path=None):
if self.trainer is None:
print("Not possible if no trainer is provided")
return
if self.annotated:
fig, axs = plt.subplots(2, 1)
elbo_full = self.trainer.history["elbo_full_dataset"]
x_1 = np.linspace(0, len(elbo_full), len(elbo_full))
axs[0].plot(x_1, elbo_full, label="Full")
accuracy_labelled_set = self.trainer.history["accuracy_labelled_set"]
accuracy_unlabelled_set = self.trainer.history["accuracy_unlabelled_set"]
if len(accuracy_labelled_set) != 0:
x_2 = np.linspace(0, len(accuracy_labelled_set), (len(accuracy_labelled_set)))
axs[1].plot(x_2, accuracy_labelled_set, label="accuracy labelled")
if len(accuracy_unlabelled_set) != 0:
x_3 = np.linspace(0, len(accuracy_unlabelled_set), (len(accuracy_unlabelled_set)))
axs[1].plot(x_3, accuracy_unlabelled_set, label="accuracy unlabelled")
axs[0].set_xlabel('Epochs')
axs[0].set_ylabel('ELBO')
axs[1].set_xlabel('Epochs')
axs[1].set_ylabel('Accuracy')
axs[0].legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)
axs[1].legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)
if save:
if dir_path is None:
plt.savefig('scanvi_history.png', bbox_inches='tight')
else:
plt.savefig(f'{dir_path}.png', bbox_inches='tight')
if show:
plt.show()
else:
fig = plt.figure()
elbo_train = self.trainer.history["elbo_train_set"]
elbo_test = self.trainer.history["elbo_test_set"]
x = np.linspace(0, len(elbo_train), len(elbo_train))
plt.plot(x, elbo_train, label="train")
plt.plot(x, elbo_test, label="test")
plt.ylim(min(elbo_train) - 50, min(elbo_train) + 1000)
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)
if save:
if dir_path is None:
plt.savefig('scvi_history.png', bbox_inches='tight')
else:
plt.savefig(f'{dir_path}.png', bbox_inches='tight')
if show:
plt.show()
def get_ebm(self, n_neighbors=50, n_pools=50, n_samples_per_pool=100, verbose=True):
ebm_score = entropy_batch_mixing(
adata=self.post_adata,
label_key='batch',
n_neighbors=n_neighbors,
n_pools=n_pools,
n_samples_per_pool=n_samples_per_pool
)
if verbose:
print("Entropy of Batchmixing-Score:", ebm_score)
return ebm_score
def get_knn_purity(self, n_neighbors=50, verbose=True):
knn_score = knn_purity(
adata=self.post_adata,
label_key='cell_type',
n_neighbors=n_neighbors
)
if verbose:
print("KNN Purity-Score:", knn_score)
return knn_score
def get_asw(self):
asw_score_batch, asw_score_cell_types = asw(adata=self.post_adata, label_key='cell_type',batch_key='batch')
print("ASW on batch:", asw_score_batch)
print("ASW on celltypes:", asw_score_cell_types)
return asw_score_batch, asw_score_cell_types
def get_nmi(self):
nmi_score = nmi(adata=self.post_adata, label_key='cell_type')
print("NMI score:", nmi_score)
return nmi_score
def get_latent_score(self):
ebm = self.get_ebm(verbose=False)
knn = self.get_knn_purity(verbose=False)
score = ebm + knn
print("Latent-Space Score (KNN + EBM):", score)
return score
def get_classification_accuracy(self):
if self.annotated:
if self.modified:
softmax = self.model.classify(self.x_tensor, batch_index=self.batch_tensor)
else:
softmax = self.model.classify(self.x_tensor)
softmax = softmax.cpu().detach().numpy()
self.predictions = np.argmax(softmax, axis=1)
self.certainty = np.max(softmax, axis=1)
self.prediction_names = [0]*self.predictions.shape[0]
for index, label in np.ndenumerate(self.predictions):
self.prediction_names[index[0]] = self.celltype_enc[label]
self.class_check = np.array(np.expand_dims(self.predictions, axis=1) == self.labels)
class_check_labels = [0] * self.class_check.shape[0]
for index, check in np.ndenumerate(self.class_check):
class_check_labels[index[0]] = 'Correct' if check else 'Incorrect'
accuracy = np.sum(self.class_check) / self.class_check.shape[0]
self.post_adata.obs['certainty'] = self.certainty
self.post_adata.obs['type_check'] = class_check_labels
self.post_adata.obs['predictions'] = self.prediction_names
print("Classification Accuracy: %0.2f" % accuracy)
return accuracy
else:
print("Classification ratio not available for scVI models")
def get_f1_score(self):
if self.annotated:
if self.modified:
predictions = self.model.classify(self.x_tensor, batch_index=self.batch_tensor)
else:
predictions = self.model.classify(self.x_tensor)
self.predictions = predictions.cpu().detach().numpy()
self.predictions = np.expand_dims(np.argmax(self.predictions, axis=1), axis=1)
score = f1_score(self.labels, self.predictions, average='macro')
print("F1 Score: %0.2f" % score)
return score
else:
print("F1 Score not available for scVI models")
|
/scArches-0.5.9.tar.gz/scArches-0.5.9/scarches/plotting/scvi_eval.py
| 0.80502 | 0.237985 |
scvi_eval.py
|
pypi
|
import numpy as np
import scanpy as sc
import torch
import anndata
import matplotlib.pyplot as plt
from typing import Union
from ..dataset.trvae._utils import label_encoder
from ..metrics.metrics import entropy_batch_mixing, knn_purity, asw, nmi
from ..models import trVAE, TRVAE
from ..trainers import trVAETrainer
sc.settings.set_figure_params(dpi=200, frameon=False)
sc.set_figure_params(dpi=200)
torch.set_printoptions(precision=3, sci_mode=False, edgeitems=7)
np.set_printoptions(precision=2, edgeitems=7)
class TRVAE_EVAL:
def __init__(
self,
model: Union[trVAE, TRVAE],
adata: anndata.AnnData,
trainer: trVAETrainer = None,
condition_key: str = None,
cell_type_key: str = None
):
if type(model) is TRVAE:
trainer = model.trainer
model = model.model
self.model = model
self.trainer = trainer
self.adata = adata
self.device = model.device
self.conditions, _ = label_encoder(
self.adata,
encoder=model.condition_encoder,
condition_key=condition_key,
)
self.cell_type_names = None
self.batch_names = None
if cell_type_key is not None:
self.cell_type_names = adata.obs[cell_type_key].tolist()
if condition_key is not None:
self.batch_names = adata.obs[condition_key].tolist()
self.adata_latent = self.latent_as_anndata()
def latent_as_anndata(self):
if self.model.calculate_mmd == 'z' or self.model.use_mmd == False:
latent = self.model.get_latent(
self.adata.X,
c=self.conditions,
)
else:
latent = self.model.get_y(
self.adata.X,
c=self.conditions
)
adata_latent = sc.AnnData(latent)
if self.cell_type_names is not None:
adata_latent.obs['cell_type'] = self.cell_type_names
if self.batch_names is not None:
adata_latent.obs['batch'] = self.batch_names
return adata_latent
def get_model_arch(self):
for name, p in self.model.named_parameters():
print(name, " - ", p.size(0), p.size(-1))
def plot_latent(self,
show=True,
save=False,
dir_path=None,
n_neighbors=8,
):
if save:
show=False
if dir_path is None:
save = False
sc.pp.neighbors(self.adata_latent, n_neighbors=n_neighbors)
sc.tl.umap(self.adata_latent)
color = [
'cell_type' if self.cell_type_names is not None else None,
'batch' if self.batch_names is not None else None,
]
sc.pl.umap(self.adata_latent,
color=color,
frameon=False,
wspace=0.6,
show=show)
if save:
plt.savefig(f'{dir_path}_batch.png', bbox_inches='tight')
def plot_history(self, show=True, save=False, dir_path=None):
if save:
show = False
if dir_path is None:
save = False
if self.trainer is None:
print("Not possible if no trainer is provided")
return
fig = plt.figure()
elbo_train = self.trainer.logs["epoch_loss"]
elbo_test = self.trainer.logs["val_loss"]
x = np.linspace(0, len(elbo_train), num=len(elbo_train))
plt.plot(x, elbo_train, label="Train")
plt.plot(x, elbo_test, label="Validate")
plt.ylim(min(elbo_test) - 50, max(elbo_test) + 50)
plt.legend()
if save:
plt.savefig(f'{dir_path}.png', bbox_inches='tight')
if show:
plt.show()
plt.clf()
def get_ebm(self, n_neighbors=50, n_pools=50, n_samples_per_pool=100, verbose=True):
ebm_score = entropy_batch_mixing(
adata=self.adata_latent,
label_key='batch',
n_neighbors=n_neighbors,
n_pools=n_pools,
n_samples_per_pool=n_samples_per_pool
)
if verbose:
print("Entropy of Batchmixing-Score: %0.2f" % ebm_score)
return ebm_score
def get_knn_purity(self, n_neighbors=50, verbose=True):
knn_score = knn_purity(
adata=self.adata_latent,
label_key='cell_type',
n_neighbors=n_neighbors
)
if verbose:
print("KNN Purity-Score: %0.2f" % knn_score)
return knn_score
def get_asw(self):
asw_score_batch, asw_score_cell_types = asw(adata=self.adata_latent, label_key='cell_type', batch_key='batch')
print("ASW on batch:", asw_score_batch)
print("ASW on celltypes:", asw_score_cell_types)
return asw_score_batch, asw_score_cell_types
def get_nmi(self):
nmi_score = nmi(adata=self.adata_latent, label_key='cell_type')
print("NMI score:", nmi_score)
return nmi_score
def get_latent_score(self):
ebm = self.get_ebm(verbose=False)
knn = self.get_knn_purity(verbose=False)
score = ebm + knn
print("Latent-Space Score EBM+KNN, EBM, KNN: %0.2f, %0.2f, %0.2f" % (score, ebm, knn))
return score
|
/scArches-0.5.9.tar.gz/scArches-0.5.9/scarches/plotting/trvae_eval.py
| 0.789599 | 0.330674 |
trvae_eval.py
|
pypi
|
from ..trvae.unsupervised import trVAETrainer
import torch
import sys
class ProxGroupLasso:
def __init__(self, alpha, omega=None, inplace=True):
# omega - vector of coefficients with size
# equal to the number of groups
if omega is None:
self._group_coeff = alpha
else:
self._group_coeff = (omega*alpha).view(-1)
# to check for update
self._alpha = alpha
self._inplace = inplace
def __call__(self, W):
if not self._inplace:
W = W.clone()
norm_vect = W.norm(p=2, dim=0)
norm_g_gr_vect = norm_vect>self._group_coeff
scaled_norm_vector = norm_vect/self._group_coeff
scaled_norm_vector+=(~(scaled_norm_vector>0)).float()
W-=W/scaled_norm_vector
W*=norm_g_gr_vect.float()
return W
class ProxL1:
def __init__(self, alpha, I=None, inplace=True):
self._I = ~I.bool() if I is not None else None
self._alpha=alpha
self._inplace=inplace
def __call__(self, W):
if not self._inplace:
W = W.clone()
W_geq_alpha = W>=self._alpha
W_leq_neg_alpha = W<=-self._alpha
W_cond_joint = ~W_geq_alpha&~W_leq_neg_alpha
if self._I is not None:
W_geq_alpha &= self._I
W_leq_neg_alpha &= self._I
W_cond_joint &= self._I
W -= W_geq_alpha.float()*self._alpha
W += W_leq_neg_alpha.float()*self._alpha
W -= W_cond_joint.float()*W
return W
class expiMapTrainer(trVAETrainer):
"""ScArches Unsupervised Trainer class. This class contains the implementation of the unsupervised CVAE/TRVAE
Trainer.
Parameters
----------
model: trVAE
Number of input features (i.e. gene in case of scRNA-seq).
adata: : `~anndata.AnnData`
Annotated data matrix. Has to be count data for 'nb' and 'zinb' loss and normalized log transformed data
for 'mse' loss.
alpha: Float
Group Lasso regularization coefficient
omega: Tensor or None
If not 'None', vector of coefficients for each group
beta: Float or None
HSIC regularization coefficient for new unannotated terms.
condition_key: String
column name of conditions in `adata.obs` data frame.
cell_type_key: String
column name of celltypes in `adata.obs` data frame.
train_frac: Float
Defines the fraction of data that is used for training and data that is used for validation.
batch_size: Integer
Defines the batch size that is used during each Iteration
n_samples: Integer or None
Defines how many samples are being used during each epoch. This should only be used if hardware resources
are limited.
clip_value: Float
If the value is greater than 0, all gradients with an higher value will be clipped during training.
weight_decay: Float
Defines the scaling factor for weight decay in the Adam optimizer.
alpha_iter_anneal: Integer or None
If not 'None', the KL Loss scaling factor will be annealed from 0 to 1 every iteration until the input
integer is reached.
alpha_epoch_anneal: Integer or None
If not 'None', the KL Loss scaling factor will be annealed from 0 to 1 every epoch until the input
integer is reached.
use_early_stopping: Boolean
If 'True' the EarlyStopping class is being used for training to prevent overfitting.
early_stopping_kwargs: Dict
Passes custom Earlystopping parameters.
use_stratified_sampling: Boolean
If 'True', the sampler tries to load equally distributed batches concerning the conditions in every
iteration.
use_stratified_split: Boolean
If `True`, the train and validation data will be constructed in such a way that both have same distribution
of conditions in the data.
monitor: Boolean
If `True', the progress of the training will be printed after each epoch.
n_workers: Integer
Passes the 'n_workers' parameter for the torch.utils.data.DataLoader class.
seed: Integer
Define a specific random seed to get reproducable results.
alpha_l1: Float
L1 regularization coefficient for the soft mask of reference and new constrained terms.
Specifies the strength for deactivating the genes which are not in the corresponding annotations \ groups
in the mask.
alpha_l1_epoch_anneal: Integer
If not 'None', the alpha_l1 scaling factor will be annealed from 0 to 1 every 'alpha_l1_anneal_each' epochs
until the input integer is reached.
alpha_l1_anneal_each: Integer
Anneal alpha_l1 every alpha_l1_anneal_each'th epoch, i.e. for 5 (default)
do annealing every 5th epoch.
gamma_ext: Float
L1 regularization coefficient for the new unconstrained terms. Specifies the strength of
sparcity enforcement.
gamma_epoch_anneal: Integer
If not 'None', the gamma_ext scaling factor will be annealed from 0 to 1 every 'gamma_anneal_each' epochs
until the input integer is reached.
gamma_anneal_each: Integer
Anneal gamma_ext every gamma_anneal_each'th epoch, i.e. for 5 (default)
do annealing every 5th epoch.
beta: Float
HSIC regularization coefficient for the unconstrained terms.
Multiplies the HSIC loss terms if not 'None'.
"""
def __init__(
self,
model,
adata,
alpha,
omega=None,
alpha_l1=None,
alpha_l1_epoch_anneal=None,
alpha_l1_anneal_each=5,
gamma_ext=None,
gamma_epoch_anneal=None,
gamma_anneal_each=5,
beta=1.,
print_stats=False,
**kwargs
):
super().__init__(model, adata, **kwargs)
self.print_stats = print_stats
self.alpha = alpha
self.omega = omega
if self.omega is not None:
self.omega = self.omega.to(self.device)
self.gamma_ext = gamma_ext
self.gamma_epoch_anneal = gamma_epoch_anneal
self.gamma_anneal_each = gamma_anneal_each
self.alpha_l1 = alpha_l1
self.alpha_l1_epoch_anneal = alpha_l1_epoch_anneal
self.alpha_l1_anneal_each = alpha_l1_anneal_each
if self.model.use_hsic:
self.beta = beta
else:
self.beta = None
self.watch_lr = None
self.use_prox_ops = self.check_prox_ops()
self.prox_ops = {}
self.corr_coeffs = self.init_anneal()
def check_prox_ops(self):
use_prox_ops = {}
use_main = self.model.decoder.L0.expr_L.weight.requires_grad
use_prox_ops['main_group_lasso'] = use_main and self.alpha is not None
use_mask = use_main and self.model.mask is not None
use_prox_ops['main_soft_mask'] = use_mask and self.alpha_l1 is not None
use_ext = self.model.n_ext_decoder > 0 and self.gamma_ext is not None
use_ext = use_ext and self.model.decoder.L0.ext_L.weight.requires_grad
use_prox_ops['ext_unannot_l1'] = use_ext
use_ext_m = self.model.n_ext_m_decoder > 0 and self.alpha_l1 is not None
use_ext_m = use_ext_m and self.model.decoder.L0.ext_L_m.weight.requires_grad
use_prox_ops['ext_soft_mask'] = use_ext_m and self.model.ext_mask is not None
return use_prox_ops
def init_anneal(self):
corr_coeffs = {}
use_soft_mask = self.use_prox_ops['main_soft_mask'] or self.use_prox_ops['ext_soft_mask']
if use_soft_mask and self.alpha_l1_epoch_anneal is not None:
corr_coeffs['alpha_l1'] = 1. / self.alpha_l1_epoch_anneal
else:
corr_coeffs['alpha_l1'] = 1.
if self.use_prox_ops['ext_unannot_l1'] and self.gamma_epoch_anneal is not None:
corr_coeffs['gamma_ext'] = 1. / self.gamma_epoch_anneal
else:
corr_coeffs['gamma_ext'] = 1.
return corr_coeffs
def anneal(self):
any_change = False
if self.corr_coeffs['gamma_ext'] < 1.:
any_change = True
time_to_anneal = self.epoch > 0 and self.epoch % self.gamma_anneal_each == 0
if time_to_anneal:
self.corr_coeffs['gamma_ext'] = min(self.epoch / self.gamma_epoch_anneal, 1.)
if self.print_stats:
print('New gamma_ext anneal coefficient:', self.corr_coeffs['gamma_ext'])
if self.corr_coeffs['alpha_l1'] < 1.:
any_change = True
time_to_anneal = self.epoch > 0 and self.epoch % self.self.alpha_l1_anneal_each == 0
if time_to_anneal:
self.corr_coeffs['alpha_l1'] = min(self.epoch / self.alpha_l1_epoch_anneal, 1.)
if self.print_stats:
print('New alpha_l1 anneal coefficient:', self.corr_coeffs['alpha_l1'])
return any_change
def init_prox_ops(self):
if any(self.use_prox_ops.values()) and self.watch_lr is None:
self.watch_lr = self.optimizer.param_groups[0]['lr']
if 'main_group_lasso' not in self.prox_ops and self.use_prox_ops['main_group_lasso']:
print('Init the group lasso proximal operator for the main terms.')
alpha_corr = self.alpha * self.watch_lr
self.prox_ops['main_group_lasso'] = ProxGroupLasso(alpha_corr, self.omega)
if 'main_soft_mask' not in self.prox_ops and self.use_prox_ops['main_soft_mask']:
print('Init the soft mask proximal operator for the main terms.')
main_mask = self.model.mask.to(self.device)
alpha_l1_corr = self.alpha_l1 * self.watch_lr * self.corr_coeffs['alpha_l1']
self.prox_ops['main_soft_mask'] = ProxL1(alpha_l1_corr, main_mask)
if 'ext_unannot_l1' not in self.prox_ops and self.use_prox_ops['ext_unannot_l1']:
print('Init the L1 proximal operator for the unannotated extension.')
gamma_ext_corr = self.gamma_ext * self.watch_lr * self.corr_coeffs['gamma_ext']
self.prox_ops['ext_unannot_l1'] = ProxL1(gamma_ext_corr)
if 'ext_soft_mask' not in self.prox_ops and self.use_prox_ops['ext_soft_mask']:
print('Init the soft mask proximal operator for the annotated extension.')
ext_mask = self.model.ext_mask.to(self.device)
alpha_l1_corr = self.alpha_l1 * self.watch_lr * self.corr_coeffs['alpha_l1']
self.prox_ops['ext_soft_mask'] = ProxL1(alpha_l1_corr, ext_mask)
def update_prox_ops(self):
if 'main_group_lasso' in self.prox_ops:
alpha_corr = self.alpha * self.watch_lr
if self.prox_ops['main_group_lasso']._alpha != alpha_corr:
self.prox_ops['main_group_lasso'] = ProxGroupLasso(alpha_corr, self.omega)
if 'ext_unannot_l1' in self.prox_ops:
gamma_ext_corr = self.gamma_ext * self.watch_lr * self.corr_coeffs['gamma_ext']
if self.prox_ops['ext_unannot_l1']._alpha != gamma_ext_corr:
self.prox_ops['ext_unannot_l1']._alpha = gamma_ext_corr
for mask_key in ('main_soft_mask', 'ext_soft_mask'):
if mask_key in self.prox_ops:
alpha_l1_corr = self.alpha_l1 * self.watch_lr * self.corr_coeffs['alpha_l1']
if self.prox_ops[mask_key]._alpha != alpha_l1_corr:
self.prox_ops[mask_key]._alpha = alpha_l1_corr
def apply_prox_ops(self):
if 'main_soft_mask' in self.prox_ops:
self.prox_ops['main_soft_mask'](self.model.decoder.L0.expr_L.weight.data)
if 'main_group_lasso' in self.prox_ops:
self.prox_ops['main_group_lasso'](self.model.decoder.L0.expr_L.weight.data)
if 'ext_unannot_l1' in self.prox_ops:
self.prox_ops['ext_unannot_l1'](self.model.decoder.L0.ext_L.weight.data)
if 'ext_soft_mask' in self.prox_ops:
self.prox_ops['ext_soft_mask'](self.model.decoder.L0.ext_L_m.weight.data)
def on_iteration(self, batch_data):
self.init_prox_ops()
super().on_iteration(batch_data)
self.apply_prox_ops()
def check_early_stop(self):
continue_training = super().check_early_stop()
if continue_training:
new_lr = self.optimizer.param_groups[0]['lr']
if self.watch_lr is not None and self.watch_lr != new_lr:
self.watch_lr = new_lr
self.update_prox_ops()
return continue_training
def on_epoch_end(self):
if self.print_stats:
if self.use_prox_ops['main_group_lasso']:
n_deact_terms = self.model.decoder.n_inactive_terms()
msg = f'Number of deactivated terms: {n_deact_terms}'
if self.epoch > 0:
msg = '\n' + msg
print(msg)
print('-------------------')
if self.use_prox_ops['main_soft_mask']:
main_mask = self.prox_ops['main_soft_mask']._I
share_deact_genes = (self.model.decoder.L0.expr_L.weight.data.abs()==0) & main_mask
share_deact_genes = share_deact_genes.float().sum().cpu().numpy() / self.model.n_inact_genes
print('Share of deactivated inactive genes: %.4f' % share_deact_genes)
print('-------------------')
if self.use_prox_ops['ext_soft_mask']:
ext_mask = self.prox_ops['ext_soft_mask']._I
share_deact_ext_genes = (self.model.decoder.L0.ext_L_m.weight.data.abs()==0) & ext_mask
share_deact_ext_genes = share_deact_ext_genes.float().sum().cpu().numpy() / self.model.n_inact_ext_genes
print('Share of deactivated inactive genes in extension terms: %.4f' % share_deact_ext_genes)
print('-------------------')
if self.use_prox_ops['ext_unannot_l1']:
active_genes = (self.model.decoder.L0.ext_L.weight.data.abs().cpu().numpy()>0).sum(0)
print('Active genes in unannotated extension terms:', active_genes)
sparse_share = 1. - active_genes / self.model.input_dim
print('Sparcity share in unannotated extension terms:', sparse_share)
print('-------------------')
any_change = self.anneal()
if any_change:
self.update_prox_ops()
super().on_epoch_end()
def loss(self, total_batch=None):
recon_loss, kl_loss, hsic_loss = self.model(**total_batch)
if self.beta is not None and self.model.use_hsic:
weighted_hsic = self.beta * hsic_loss
self.iter_logs["hsic_loss"].append(hsic_loss.item())
else:
weighted_hsic = 0.
loss = recon_loss + self.calc_alpha_coeff()*kl_loss + weighted_hsic
self.iter_logs["loss"].append(loss.item())
self.iter_logs["unweighted_loss"].append((recon_loss + kl_loss + hsic_loss).item())
self.iter_logs["recon_loss"].append(recon_loss.item())
self.iter_logs["kl_loss"].append(kl_loss.item())
return loss
|
/scArches-0.5.9.tar.gz/scArches-0.5.9/scarches/trainers/expimap/regularized.py
| 0.716219 | 0.554531 |
regularized.py
|
pypi
|
import torch
from scipy import sparse
from anndata import AnnData
from collections import defaultdict
from ._utils import shuffle_adata, print_progress
from ...utils.monitor import EarlyStopping
class vaeArithTrainer:
"""
This class contains the implementation of the VAEARITH Trainer
Parameters
----------
model: vaeArith
adata: : `~anndata.AnnData`
Annotated Data Matrix for training VAE network.
n_epochs: int
Number of epochs to iterate and optimize network weights
train_frac: Float
Defines the fraction of data that is used for training and data that is used for validation.
batch_size: integer
size of each batch of training dataset to be fed to network while training.
patience: int
Number of consecutive epochs in which network loss is not going lower.
After this limit, the network will stop training.
threshold: float
Threshold for difference between consecutive validation loss values
if the difference is upper than this `threshold`, this epoch will not
considered as an epoch in early stopping.
shuffle: bool
if `True` shuffles the training dataset
early_stopping_kwargs: Dict
Passes custom Earlystopping parameters
"""
def __init__(self, model, adata, train_frac: float = 0.9, batch_size = 32, shuffle=True, early_stopping_kwargs: dict = {
"early_stopping_metric": "val_loss",
"threshold": 0,
"patience": 20,
"reduce_lr": True,
"lr_patience": 13,
"lr_factor": 0.1}, **kwargs): # maybe add more parameters
self.model = model
self.seed = kwargs.get("seed", 2021)
torch.manual_seed(self.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(self.seed)
self.model.cuda() # put model to cuda(gpu)
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.adata = adata
self.train_frac = train_frac
self.shuffle = shuffle
self.batch_size = batch_size
early_stopping_kwargs = (early_stopping_kwargs if early_stopping_kwargs else dict())
self.early_stopping = EarlyStopping(**early_stopping_kwargs)
self.monitor = kwargs.pop("monitor", True)
# Optimization attributes
self.optim = None
# self.weight_decay = weight_decay
self.epoch = -1 # epoch = self.epoch + 1 in compute metrics
# self.training_time = 0
# self.n_iter = 0
self.best_epoch = None
self.best_state_dict = None
self.logs = defaultdict(list)
@staticmethod
def _anndataToTensor(adata: AnnData) -> torch.Tensor:
data_ndarray = adata.X.A
data_tensor = torch.from_numpy(data_ndarray)
return data_tensor
def train_valid_split(self, adata: AnnData, train_frac = 0.9):
if train_frac == 1:
return adata, None
n_obs = adata.shape[0]
shuffled = shuffle_adata(adata)
train_adata = shuffled[:int(train_frac * n_obs)] # maybe not the best way to round
valid_adata = shuffled[int(train_frac * n_obs):]
return train_adata, valid_adata
def train(self, n_epochs = 100, lr = 0.001, eps = 1e-8, **extras_kwargs):
self.n_epochs = n_epochs
params = filter(lambda p: p.requires_grad, self.model.parameters())
self.optim = torch.optim.Adam(
params, lr=lr, eps=eps) # consider changing the param. like weight_decay, eps, etc.
train_data, valid_data = self.train_valid_split(self.adata) # possible bad of using static method this way. Think about adding static methods to util.py
if self.shuffle:
train_adata = shuffle_adata(train_data)
valid_adata = shuffle_adata(valid_data)
loss_hist = []
for self.epoch in range(self.n_epochs):
self.model.train()
self.iter_logs = defaultdict(list)
train_loss = 0
loss_hist.append(0)
for lower in range(0, train_adata.shape[0], self.batch_size):
upper = min(lower + self.batch_size, train_adata.shape[0])
if sparse.issparse(train_adata.X):
x_mb = torch.from_numpy(train_adata[lower:upper, :].X.A)
else:
x_mb = torch.from_numpy(train_adata[lower:upper, :].X)
if upper - lower > 1:
x_mb = x_mb.to(self.device) #to cuda or cpu
reconstructions, mu, logvar = self.model(x_mb)
loss = self.model._loss_function(x_mb, reconstructions, mu, logvar)
self.optim.zero_grad()
loss.backward()
self.optim.step()
self.iter_logs["loss"].append(loss.item())
train_loss += loss.item() # loss.item() contains the loss of entire mini-batch divided by the batch size
self.on_epoch_end()
valid_loss = 0
train_loss_end_epoch = 0
self.iter_logs = defaultdict(list)
with torch.no_grad(): # disables the gradient calculation
self.model.eval()
for lower in range(0, train_adata.shape[0], self.batch_size):
upper = min(lower + self.batch_size, train_adata.shape[0])
if sparse.issparse(train_adata.X):
x_mb = torch.from_numpy(train_adata[lower:upper, :].X.A)
else:
x_mb = torch.from_numpy(train_adata[lower:upper, :].X)
if upper - lower > 1:
x_mb = x_mb.to(self.device)
reconstructions, mu, logvar = self.model(x_mb)
loss = self.model._loss_function(x_mb, reconstructions, mu, logvar)
train_loss_end_epoch += loss.item()
for lower in range(0, valid_adata.shape[0], self.batch_size):
upper = min(lower + self.batch_size, valid_adata.shape[0])
if sparse.issparse(valid_adata.X):
x_mb = torch.from_numpy(valid_adata[lower:upper, :].X.A)
else:
x_mb = torch.from_numpy(valid_adata[lower:upper, :].X)
if upper - lower > 1:
x_mb = x_mb.to(self.device)
reconstructions, mu, logvar = self.model(x_mb)
loss = self.model._loss_function(x_mb, reconstructions, mu, logvar)
self.iter_logs["loss"].append(loss.item())
valid_loss += loss.item() # loss.item() contains the loss of entire mini-batch divided by the batch size
# Get Validation Logs
for key in self.iter_logs:
if "loss" in key:
self.logs["val_" + key].append(
sum(self.iter_logs[key][:]) / len(self.iter_logs[key][:]))
# Monitor Logs
if self.monitor:
print_progress(self.epoch, self.logs, self.n_epochs)
if not self.check_early_stop():
break
if self.best_state_dict is not None:
print("Saving best state of network...")
print("Best State was in Epoch", self.best_epoch)
self.model.load_state_dict(self.best_state_dict)
def on_epoch_end(self):
# Get Train Epoch Logs
for key in self.iter_logs:
if "loss" in key:
self.logs["epoch_" + key].append(
sum(self.iter_logs[key][:]) / len(self.iter_logs[key][:]))
def check_early_stop(self):
# Calculate Early Stopping and best state
early_stopping_metric = self.early_stopping.early_stopping_metric
if self.early_stopping.update_state(self.logs[early_stopping_metric][-1]):
self.best_state_dict = self.model.state_dict()
self.best_epoch = self.epoch
continue_training, update_lr = self.early_stopping.step(self.logs[early_stopping_metric][-1])
if update_lr:
print(f'\nADJUSTED LR')
for param_group in self.optim.param_groups:
param_group["lr"] *= self.early_stopping.lr_factor
return continue_training
|
/scArches-0.5.9.tar.gz/scArches-0.5.9/scarches/trainers/scgen/trainer.py
| 0.857201 | 0.621656 |
trainer.py
|
pypi
|
from random import shuffle
import sys
import anndata
import numpy as np
from scipy import sparse
from sklearn import preprocessing
def print_progress(epoch, logs, n_epochs=10000):
"""Creates Message for '_print_progress_bar'.
Parameters
----------
epoch: Integer
Current epoch iteration.
logs: dict
Dictionary of all current losses.
n_epochs: Integer
Maximum value of epochs.
Returns
-------
"""
message = ""
for key in logs:
if "loss" in key and ("epoch_" in key or "val_" in key) and "unweighted" not in key:
message += f" - {key:s}: {logs[key][-1]:7.10f}"
_print_progress_bar(epoch + 1, n_epochs, prefix='', suffix=message, decimals=1, length=20)
def _print_progress_bar(iteration, total, prefix='', suffix='', decimals=1, length=100, fill='█'):
"""Prints out message with a progress bar.
Parameters
----------
iteration: Integer
Current epoch.
total: Integer
Maximum value of epochs.
prefix: String
String before the progress bar.
suffix: String
String after the progress bar.
decimals: Integer
Digits after comma for all the losses.
length: Integer
Length of the progress bar.
fill: String
Symbol for filling the bar.
Returns
-------
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filled_len = int(length * iteration // total)
bar = fill * filled_len + '-' * (length - filled_len)
sys.stdout.write('\r%s |%s| %s%s %s' % (prefix, bar, percent, '%', suffix)),
if iteration == total:
sys.stdout.write('\n')
sys.stdout.flush()
def extractor(data, cell_type, conditions, cell_type_key="cell_type", condition_key="condition"):
"""
Returns a list of `data` files while filtering for a specific `cell_type`.
Parameters
----------
data: `~anndata.AnnData`
Annotated data matrix
cell_type: basestring
specific cell type to be extracted from `data`.
conditions: dict
dictionary of stimulated/control of `data`.
Returns
-------
list of `data` files while filtering for a specific `cell_type`.
"""
cell_with_both_condition = data[data.obs[cell_type_key] == cell_type]
condtion_1 = data[(data.obs[cell_type_key] == cell_type) & (data.obs[condition_key] == conditions["ctrl"])]
condtion_2 = data[(data.obs[cell_type_key] == cell_type) & (data.obs[condition_key] == conditions["stim"])]
training = data[~((data.obs[cell_type_key] == cell_type) & (data.obs[condition_key] == conditions["stim"]))]
return [training, condtion_1, condtion_2, cell_with_both_condition]
def balancer(adata, cell_type_key="cell_type", condition_key="condition"):
"""
Makes cell type population equal.
Parameters
----------
adata: `~anndata.AnnData`
Annotated data matrix.
Returns
-------
balanced_data: `~anndata.AnnData`
Equal cell type population Annotated data matrix.
"""
class_names = np.unique(adata.obs[cell_type_key])
class_pop = {}
for cls in class_names:
class_pop[cls] = adata.copy()[adata.obs[cell_type_key] == cls].shape[0]
max_number = np.max(list(class_pop.values()))
all_data_x = []
all_data_label = []
all_data_condition = []
for cls in class_names:
temp = adata.copy()[adata.obs[cell_type_key] == cls]
index = np.random.choice(range(len(temp)), max_number)
if sparse.issparse(temp.X):
temp_x = temp.X.A[index]
else:
temp_x = temp.X[index]
all_data_x.append(temp_x)
temp_ct = np.repeat(cls, max_number)
all_data_label.append(temp_ct)
temp_cc = np.repeat(np.unique(temp.obs[condition_key]), max_number)
all_data_condition.append(temp_cc)
balanced_data = anndata.AnnData(np.concatenate(all_data_x))
balanced_data.obs[cell_type_key] = np.concatenate(all_data_label)
balanced_data.obs[condition_key] = np.concatenate(all_data_label)
class_names = np.unique(balanced_data.obs[cell_type_key])
class_pop = {}
for cls in class_names:
class_pop[cls] = len(balanced_data[balanced_data.obs[cell_type_key] == cls])
return balanced_data
def data_remover(adata, remain_list, remove_list, cell_type_key, condition_key):
"""
Removes specific cell type in stimulated condition form `adata`.
Parameters
----------
adata: `~anndata.AnnData`
Annotated data matrix
remain_list: list
list of cell types which are going to be remained in `adata`.
remove_list: list
list of cell types which are going to be removed from `adata`.
Returns
-------
merged_data: list
returns array of specified cell types in stimulated condition
"""
source_data = []
for i in remain_list:
source_data.append(extractor(adata, i, conditions={"ctrl": "control", "stim": "stimulated"},
cell_type_key=cell_type_key, condition_key=condition_key)[3])
target_data = []
for i in remove_list:
target_data.append(extractor(adata, i, conditions={"ctrl": "control", "stim": "stimulated"},
cell_type_key=cell_type_key, condition_key=condition_key)[1])
merged_data = training_data_provider(source_data, target_data)
merged_data.var_names = adata.var_names
return merged_data
def training_data_provider(train_s, train_t):
"""
Concatenates two lists containing adata files
Parameters
----------
train_s: `~anndata.AnnData`
Annotated data matrix.
train_t: `~anndata.AnnData`
Annotated data matrix.
Returns
-------
Concatenated Annotated data matrix.
"""
train_s_X = []
train_s_diet = []
train_s_groups = []
for i in train_s:
train_s_X.append(i.X.A)
train_s_diet.append(i.obs["condition"].tolist())
train_s_groups.append(i.obs["cell_type"].tolist())
train_s_X = np.concatenate(train_s_X)
temp = []
for i in train_s_diet:
temp = temp + i
train_s_diet = temp
temp = []
for i in train_s_groups:
temp = temp + i
train_s_groups = temp
train_t_X = []
train_t_diet = []
train_t_groups = []
for i in train_t:
train_t_X.append(i.X.A)
train_t_diet.append(i.obs["condition"].tolist())
train_t_groups.append(i.obs["cell_type"].tolist())
temp = []
for i in train_t_diet:
temp = temp + i
train_t_diet = temp
temp = []
for i in train_t_groups:
temp = temp + i
train_t_groups = temp
train_t_X = np.concatenate(train_t_X)
train_real = np.concatenate([train_s_X, train_t_X]) # concat all
train_real = anndata.AnnData(train_real)
train_real.obs["condition"] = train_s_diet + train_t_diet
train_real.obs["cell_type"] = train_s_groups + train_t_groups
return train_real
def shuffle_adata(adata):
"""
Shuffles the `adata`.
Parameters
----------
adata: `~anndata.AnnData`
Annotated data matrix.
labels: numpy nd-array
list of encoded labels
Returns
-------
adata: `~anndata.AnnData`
Shuffled annotated data matrix.
labels: numpy nd-array
Array of shuffled labels if `labels` is not None.
"""
if sparse.issparse(adata.X):
adata.X = adata.X.A
ind_list = [i for i in range(adata.shape[0])]
shuffle(ind_list)
new_adata = adata[ind_list, :]
return new_adata
def label_encoder(adata):
"""
Encode labels of Annotated `adata` matrix using sklearn.preprocessing.LabelEncoder class.
Parameters
----------
adata: `~anndata.AnnData`
Annotated data matrix.
Returns
-------
labels: numpy nd-array
Array of encoded labels
"""
le = preprocessing.LabelEncoder()
labels = le.fit_transform(adata.obs["condition"].tolist())
return labels.reshape(-1, 1), le
|
/scArches-0.5.9.tar.gz/scArches-0.5.9/scarches/trainers/scgen/_utils.py
| 0.721351 | 0.380759 |
_utils.py
|
pypi
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.