body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
---|---|---|---|---|---|---|---|---|---|
c40a8e83777a384fa7c09f7925ae43a6d8f0063a7499fe411dda86d6fc31b317
|
@external_stylesheets.setter
def external_stylesheets(self, external_stylesheets: list) -> None:
'Set optional external stylesheets to be used in the Dash\n application. The input variable `external_stylesheets` should be\n a list.'
self._external_stylesheets = external_stylesheets
|
Set optional external stylesheets to be used in the Dash
application. The input variable `external_stylesheets` should be
a list.
|
webviz_config/_theme_class.py
|
external_stylesheets
|
magnesj/webviz-config
| 44 |
python
|
@external_stylesheets.setter
def external_stylesheets(self, external_stylesheets: list) -> None:
'Set optional external stylesheets to be used in the Dash\n application. The input variable `external_stylesheets` should be\n a list.'
self._external_stylesheets = external_stylesheets
|
@external_stylesheets.setter
def external_stylesheets(self, external_stylesheets: list) -> None:
'Set optional external stylesheets to be used in the Dash\n application. The input variable `external_stylesheets` should be\n a list.'
self._external_stylesheets = external_stylesheets<|docstring|>Set optional external stylesheets to be used in the Dash
application. The input variable `external_stylesheets` should be
a list.<|endoftext|>
|
81d403103576e081e793107e976a1efac24f52796b85df3f6d19d269f15e5c6d
|
@assets.setter
def assets(self, assets: list) -> None:
'Set optional theme assets to be copied over to the `./assets` folder\n when the webviz dash application is created. The input variable\n `assets` should be a list of absolute file paths to the different\n assets.\n '
self._assets = assets
|
Set optional theme assets to be copied over to the `./assets` folder
when the webviz dash application is created. The input variable
`assets` should be a list of absolute file paths to the different
assets.
|
webviz_config/_theme_class.py
|
assets
|
magnesj/webviz-config
| 44 |
python
|
@assets.setter
def assets(self, assets: list) -> None:
'Set optional theme assets to be copied over to the `./assets` folder\n when the webviz dash application is created. The input variable\n `assets` should be a list of absolute file paths to the different\n assets.\n '
self._assets = assets
|
@assets.setter
def assets(self, assets: list) -> None:
'Set optional theme assets to be copied over to the `./assets` folder\n when the webviz dash application is created. The input variable\n `assets` should be a list of absolute file paths to the different\n assets.\n '
self._assets = assets<|docstring|>Set optional theme assets to be copied over to the `./assets` folder
when the webviz dash application is created. The input variable
`assets` should be a list of absolute file paths to the different
assets.<|endoftext|>
|
bd166e5c32368c8916f4c0d7177fc7cff945f2101f0eebbfacb93779a235ae90
|
def __init__(self, content_caching_parent_capabilities_id=None, imports=None, namespaces=None, personal_content=None, query_parameters=None, shared_content=None, prioritization=None, local_vars_configuration=None):
'ComputerContentCachingParentCapabilities - a model defined in OpenAPI'
if (local_vars_configuration is None):
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._content_caching_parent_capabilities_id = None
self._imports = None
self._namespaces = None
self._personal_content = None
self._query_parameters = None
self._shared_content = None
self._prioritization = None
self.discriminator = None
if (content_caching_parent_capabilities_id is not None):
self.content_caching_parent_capabilities_id = content_caching_parent_capabilities_id
if (imports is not None):
self.imports = imports
if (namespaces is not None):
self.namespaces = namespaces
if (personal_content is not None):
self.personal_content = personal_content
if (query_parameters is not None):
self.query_parameters = query_parameters
if (shared_content is not None):
self.shared_content = shared_content
if (prioritization is not None):
self.prioritization = prioritization
|
ComputerContentCachingParentCapabilities - a model defined in OpenAPI
|
jamf/models/computer_content_caching_parent_capabilities.py
|
__init__
|
jensenbox/python-jamf
| 1 |
python
|
def __init__(self, content_caching_parent_capabilities_id=None, imports=None, namespaces=None, personal_content=None, query_parameters=None, shared_content=None, prioritization=None, local_vars_configuration=None):
if (local_vars_configuration is None):
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._content_caching_parent_capabilities_id = None
self._imports = None
self._namespaces = None
self._personal_content = None
self._query_parameters = None
self._shared_content = None
self._prioritization = None
self.discriminator = None
if (content_caching_parent_capabilities_id is not None):
self.content_caching_parent_capabilities_id = content_caching_parent_capabilities_id
if (imports is not None):
self.imports = imports
if (namespaces is not None):
self.namespaces = namespaces
if (personal_content is not None):
self.personal_content = personal_content
if (query_parameters is not None):
self.query_parameters = query_parameters
if (shared_content is not None):
self.shared_content = shared_content
if (prioritization is not None):
self.prioritization = prioritization
|
def __init__(self, content_caching_parent_capabilities_id=None, imports=None, namespaces=None, personal_content=None, query_parameters=None, shared_content=None, prioritization=None, local_vars_configuration=None):
if (local_vars_configuration is None):
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._content_caching_parent_capabilities_id = None
self._imports = None
self._namespaces = None
self._personal_content = None
self._query_parameters = None
self._shared_content = None
self._prioritization = None
self.discriminator = None
if (content_caching_parent_capabilities_id is not None):
self.content_caching_parent_capabilities_id = content_caching_parent_capabilities_id
if (imports is not None):
self.imports = imports
if (namespaces is not None):
self.namespaces = namespaces
if (personal_content is not None):
self.personal_content = personal_content
if (query_parameters is not None):
self.query_parameters = query_parameters
if (shared_content is not None):
self.shared_content = shared_content
if (prioritization is not None):
self.prioritization = prioritization<|docstring|>ComputerContentCachingParentCapabilities - a model defined in OpenAPI<|endoftext|>
|
187776c2c4840567382766ca950f99c3f6f1f8ad71654cd737ad8cb2d7b30097
|
@property
def content_caching_parent_capabilities_id(self):
'Gets the content_caching_parent_capabilities_id of this ComputerContentCachingParentCapabilities. # noqa: E501\n\n\n :return: The content_caching_parent_capabilities_id of this ComputerContentCachingParentCapabilities. # noqa: E501\n :rtype: str\n '
return self._content_caching_parent_capabilities_id
|
Gets the content_caching_parent_capabilities_id of this ComputerContentCachingParentCapabilities. # noqa: E501
:return: The content_caching_parent_capabilities_id of this ComputerContentCachingParentCapabilities. # noqa: E501
:rtype: str
|
jamf/models/computer_content_caching_parent_capabilities.py
|
content_caching_parent_capabilities_id
|
jensenbox/python-jamf
| 1 |
python
|
@property
def content_caching_parent_capabilities_id(self):
'Gets the content_caching_parent_capabilities_id of this ComputerContentCachingParentCapabilities. # noqa: E501\n\n\n :return: The content_caching_parent_capabilities_id of this ComputerContentCachingParentCapabilities. # noqa: E501\n :rtype: str\n '
return self._content_caching_parent_capabilities_id
|
@property
def content_caching_parent_capabilities_id(self):
'Gets the content_caching_parent_capabilities_id of this ComputerContentCachingParentCapabilities. # noqa: E501\n\n\n :return: The content_caching_parent_capabilities_id of this ComputerContentCachingParentCapabilities. # noqa: E501\n :rtype: str\n '
return self._content_caching_parent_capabilities_id<|docstring|>Gets the content_caching_parent_capabilities_id of this ComputerContentCachingParentCapabilities. # noqa: E501
:return: The content_caching_parent_capabilities_id of this ComputerContentCachingParentCapabilities. # noqa: E501
:rtype: str<|endoftext|>
|
caf7db01989b53d8d85f1c5ab25a0d8b7f85a209985eb249b08dfe6972e06ba8
|
@content_caching_parent_capabilities_id.setter
def content_caching_parent_capabilities_id(self, content_caching_parent_capabilities_id):
'Sets the content_caching_parent_capabilities_id of this ComputerContentCachingParentCapabilities.\n\n\n :param content_caching_parent_capabilities_id: The content_caching_parent_capabilities_id of this ComputerContentCachingParentCapabilities. # noqa: E501\n :type content_caching_parent_capabilities_id: str\n '
self._content_caching_parent_capabilities_id = content_caching_parent_capabilities_id
|
Sets the content_caching_parent_capabilities_id of this ComputerContentCachingParentCapabilities.
:param content_caching_parent_capabilities_id: The content_caching_parent_capabilities_id of this ComputerContentCachingParentCapabilities. # noqa: E501
:type content_caching_parent_capabilities_id: str
|
jamf/models/computer_content_caching_parent_capabilities.py
|
content_caching_parent_capabilities_id
|
jensenbox/python-jamf
| 1 |
python
|
@content_caching_parent_capabilities_id.setter
def content_caching_parent_capabilities_id(self, content_caching_parent_capabilities_id):
'Sets the content_caching_parent_capabilities_id of this ComputerContentCachingParentCapabilities.\n\n\n :param content_caching_parent_capabilities_id: The content_caching_parent_capabilities_id of this ComputerContentCachingParentCapabilities. # noqa: E501\n :type content_caching_parent_capabilities_id: str\n '
self._content_caching_parent_capabilities_id = content_caching_parent_capabilities_id
|
@content_caching_parent_capabilities_id.setter
def content_caching_parent_capabilities_id(self, content_caching_parent_capabilities_id):
'Sets the content_caching_parent_capabilities_id of this ComputerContentCachingParentCapabilities.\n\n\n :param content_caching_parent_capabilities_id: The content_caching_parent_capabilities_id of this ComputerContentCachingParentCapabilities. # noqa: E501\n :type content_caching_parent_capabilities_id: str\n '
self._content_caching_parent_capabilities_id = content_caching_parent_capabilities_id<|docstring|>Sets the content_caching_parent_capabilities_id of this ComputerContentCachingParentCapabilities.
:param content_caching_parent_capabilities_id: The content_caching_parent_capabilities_id of this ComputerContentCachingParentCapabilities. # noqa: E501
:type content_caching_parent_capabilities_id: str<|endoftext|>
|
1941cad6abea63f5282c6c557a154a92b3990f682756dce0c8c9f8e081976982
|
@property
def imports(self):
'Gets the imports of this ComputerContentCachingParentCapabilities. # noqa: E501\n\n\n :return: The imports of this ComputerContentCachingParentCapabilities. # noqa: E501\n :rtype: bool\n '
return self._imports
|
Gets the imports of this ComputerContentCachingParentCapabilities. # noqa: E501
:return: The imports of this ComputerContentCachingParentCapabilities. # noqa: E501
:rtype: bool
|
jamf/models/computer_content_caching_parent_capabilities.py
|
imports
|
jensenbox/python-jamf
| 1 |
python
|
@property
def imports(self):
'Gets the imports of this ComputerContentCachingParentCapabilities. # noqa: E501\n\n\n :return: The imports of this ComputerContentCachingParentCapabilities. # noqa: E501\n :rtype: bool\n '
return self._imports
|
@property
def imports(self):
'Gets the imports of this ComputerContentCachingParentCapabilities. # noqa: E501\n\n\n :return: The imports of this ComputerContentCachingParentCapabilities. # noqa: E501\n :rtype: bool\n '
return self._imports<|docstring|>Gets the imports of this ComputerContentCachingParentCapabilities. # noqa: E501
:return: The imports of this ComputerContentCachingParentCapabilities. # noqa: E501
:rtype: bool<|endoftext|>
|
05aeaebef6d616922718b1f136a43885e661d7eab3f6252353b7e7db5f5e3c56
|
@imports.setter
def imports(self, imports):
'Sets the imports of this ComputerContentCachingParentCapabilities.\n\n\n :param imports: The imports of this ComputerContentCachingParentCapabilities. # noqa: E501\n :type imports: bool\n '
self._imports = imports
|
Sets the imports of this ComputerContentCachingParentCapabilities.
:param imports: The imports of this ComputerContentCachingParentCapabilities. # noqa: E501
:type imports: bool
|
jamf/models/computer_content_caching_parent_capabilities.py
|
imports
|
jensenbox/python-jamf
| 1 |
python
|
@imports.setter
def imports(self, imports):
'Sets the imports of this ComputerContentCachingParentCapabilities.\n\n\n :param imports: The imports of this ComputerContentCachingParentCapabilities. # noqa: E501\n :type imports: bool\n '
self._imports = imports
|
@imports.setter
def imports(self, imports):
'Sets the imports of this ComputerContentCachingParentCapabilities.\n\n\n :param imports: The imports of this ComputerContentCachingParentCapabilities. # noqa: E501\n :type imports: bool\n '
self._imports = imports<|docstring|>Sets the imports of this ComputerContentCachingParentCapabilities.
:param imports: The imports of this ComputerContentCachingParentCapabilities. # noqa: E501
:type imports: bool<|endoftext|>
|
ed3c7ef8c8d9421691ae6cfea02a249a62aa59351c6a74e7eb7a81c9894b6eb4
|
@property
def namespaces(self):
'Gets the namespaces of this ComputerContentCachingParentCapabilities. # noqa: E501\n\n\n :return: The namespaces of this ComputerContentCachingParentCapabilities. # noqa: E501\n :rtype: bool\n '
return self._namespaces
|
Gets the namespaces of this ComputerContentCachingParentCapabilities. # noqa: E501
:return: The namespaces of this ComputerContentCachingParentCapabilities. # noqa: E501
:rtype: bool
|
jamf/models/computer_content_caching_parent_capabilities.py
|
namespaces
|
jensenbox/python-jamf
| 1 |
python
|
@property
def namespaces(self):
'Gets the namespaces of this ComputerContentCachingParentCapabilities. # noqa: E501\n\n\n :return: The namespaces of this ComputerContentCachingParentCapabilities. # noqa: E501\n :rtype: bool\n '
return self._namespaces
|
@property
def namespaces(self):
'Gets the namespaces of this ComputerContentCachingParentCapabilities. # noqa: E501\n\n\n :return: The namespaces of this ComputerContentCachingParentCapabilities. # noqa: E501\n :rtype: bool\n '
return self._namespaces<|docstring|>Gets the namespaces of this ComputerContentCachingParentCapabilities. # noqa: E501
:return: The namespaces of this ComputerContentCachingParentCapabilities. # noqa: E501
:rtype: bool<|endoftext|>
|
ad212932ef017938c222d50cd5f5c5ddffedefe9aa8f9f8fadba8f40d2c43e48
|
@namespaces.setter
def namespaces(self, namespaces):
'Sets the namespaces of this ComputerContentCachingParentCapabilities.\n\n\n :param namespaces: The namespaces of this ComputerContentCachingParentCapabilities. # noqa: E501\n :type namespaces: bool\n '
self._namespaces = namespaces
|
Sets the namespaces of this ComputerContentCachingParentCapabilities.
:param namespaces: The namespaces of this ComputerContentCachingParentCapabilities. # noqa: E501
:type namespaces: bool
|
jamf/models/computer_content_caching_parent_capabilities.py
|
namespaces
|
jensenbox/python-jamf
| 1 |
python
|
@namespaces.setter
def namespaces(self, namespaces):
'Sets the namespaces of this ComputerContentCachingParentCapabilities.\n\n\n :param namespaces: The namespaces of this ComputerContentCachingParentCapabilities. # noqa: E501\n :type namespaces: bool\n '
self._namespaces = namespaces
|
@namespaces.setter
def namespaces(self, namespaces):
'Sets the namespaces of this ComputerContentCachingParentCapabilities.\n\n\n :param namespaces: The namespaces of this ComputerContentCachingParentCapabilities. # noqa: E501\n :type namespaces: bool\n '
self._namespaces = namespaces<|docstring|>Sets the namespaces of this ComputerContentCachingParentCapabilities.
:param namespaces: The namespaces of this ComputerContentCachingParentCapabilities. # noqa: E501
:type namespaces: bool<|endoftext|>
|
0cebc95241ece33bb9aa8ab0dbcbc651df550ad4892ff026c7bc551b458024b1
|
@property
def personal_content(self):
'Gets the personal_content of this ComputerContentCachingParentCapabilities. # noqa: E501\n\n\n :return: The personal_content of this ComputerContentCachingParentCapabilities. # noqa: E501\n :rtype: bool\n '
return self._personal_content
|
Gets the personal_content of this ComputerContentCachingParentCapabilities. # noqa: E501
:return: The personal_content of this ComputerContentCachingParentCapabilities. # noqa: E501
:rtype: bool
|
jamf/models/computer_content_caching_parent_capabilities.py
|
personal_content
|
jensenbox/python-jamf
| 1 |
python
|
@property
def personal_content(self):
'Gets the personal_content of this ComputerContentCachingParentCapabilities. # noqa: E501\n\n\n :return: The personal_content of this ComputerContentCachingParentCapabilities. # noqa: E501\n :rtype: bool\n '
return self._personal_content
|
@property
def personal_content(self):
'Gets the personal_content of this ComputerContentCachingParentCapabilities. # noqa: E501\n\n\n :return: The personal_content of this ComputerContentCachingParentCapabilities. # noqa: E501\n :rtype: bool\n '
return self._personal_content<|docstring|>Gets the personal_content of this ComputerContentCachingParentCapabilities. # noqa: E501
:return: The personal_content of this ComputerContentCachingParentCapabilities. # noqa: E501
:rtype: bool<|endoftext|>
|
61a991855ede7dafdb5608e90b07f4eaaeca3b0c9315cbe648f82de813cd90a8
|
@personal_content.setter
def personal_content(self, personal_content):
'Sets the personal_content of this ComputerContentCachingParentCapabilities.\n\n\n :param personal_content: The personal_content of this ComputerContentCachingParentCapabilities. # noqa: E501\n :type personal_content: bool\n '
self._personal_content = personal_content
|
Sets the personal_content of this ComputerContentCachingParentCapabilities.
:param personal_content: The personal_content of this ComputerContentCachingParentCapabilities. # noqa: E501
:type personal_content: bool
|
jamf/models/computer_content_caching_parent_capabilities.py
|
personal_content
|
jensenbox/python-jamf
| 1 |
python
|
@personal_content.setter
def personal_content(self, personal_content):
'Sets the personal_content of this ComputerContentCachingParentCapabilities.\n\n\n :param personal_content: The personal_content of this ComputerContentCachingParentCapabilities. # noqa: E501\n :type personal_content: bool\n '
self._personal_content = personal_content
|
@personal_content.setter
def personal_content(self, personal_content):
'Sets the personal_content of this ComputerContentCachingParentCapabilities.\n\n\n :param personal_content: The personal_content of this ComputerContentCachingParentCapabilities. # noqa: E501\n :type personal_content: bool\n '
self._personal_content = personal_content<|docstring|>Sets the personal_content of this ComputerContentCachingParentCapabilities.
:param personal_content: The personal_content of this ComputerContentCachingParentCapabilities. # noqa: E501
:type personal_content: bool<|endoftext|>
|
6b7affc48cf86b179451a96345d8ce94d69381707d56773f19b3b82166a10715
|
@property
def query_parameters(self):
'Gets the query_parameters of this ComputerContentCachingParentCapabilities. # noqa: E501\n\n\n :return: The query_parameters of this ComputerContentCachingParentCapabilities. # noqa: E501\n :rtype: bool\n '
return self._query_parameters
|
Gets the query_parameters of this ComputerContentCachingParentCapabilities. # noqa: E501
:return: The query_parameters of this ComputerContentCachingParentCapabilities. # noqa: E501
:rtype: bool
|
jamf/models/computer_content_caching_parent_capabilities.py
|
query_parameters
|
jensenbox/python-jamf
| 1 |
python
|
@property
def query_parameters(self):
'Gets the query_parameters of this ComputerContentCachingParentCapabilities. # noqa: E501\n\n\n :return: The query_parameters of this ComputerContentCachingParentCapabilities. # noqa: E501\n :rtype: bool\n '
return self._query_parameters
|
@property
def query_parameters(self):
'Gets the query_parameters of this ComputerContentCachingParentCapabilities. # noqa: E501\n\n\n :return: The query_parameters of this ComputerContentCachingParentCapabilities. # noqa: E501\n :rtype: bool\n '
return self._query_parameters<|docstring|>Gets the query_parameters of this ComputerContentCachingParentCapabilities. # noqa: E501
:return: The query_parameters of this ComputerContentCachingParentCapabilities. # noqa: E501
:rtype: bool<|endoftext|>
|
96a423acda405ceb3b1aeb4605e80bdb150beaef475bf05fb9156e64ef0ce7f7
|
@query_parameters.setter
def query_parameters(self, query_parameters):
'Sets the query_parameters of this ComputerContentCachingParentCapabilities.\n\n\n :param query_parameters: The query_parameters of this ComputerContentCachingParentCapabilities. # noqa: E501\n :type query_parameters: bool\n '
self._query_parameters = query_parameters
|
Sets the query_parameters of this ComputerContentCachingParentCapabilities.
:param query_parameters: The query_parameters of this ComputerContentCachingParentCapabilities. # noqa: E501
:type query_parameters: bool
|
jamf/models/computer_content_caching_parent_capabilities.py
|
query_parameters
|
jensenbox/python-jamf
| 1 |
python
|
@query_parameters.setter
def query_parameters(self, query_parameters):
'Sets the query_parameters of this ComputerContentCachingParentCapabilities.\n\n\n :param query_parameters: The query_parameters of this ComputerContentCachingParentCapabilities. # noqa: E501\n :type query_parameters: bool\n '
self._query_parameters = query_parameters
|
@query_parameters.setter
def query_parameters(self, query_parameters):
'Sets the query_parameters of this ComputerContentCachingParentCapabilities.\n\n\n :param query_parameters: The query_parameters of this ComputerContentCachingParentCapabilities. # noqa: E501\n :type query_parameters: bool\n '
self._query_parameters = query_parameters<|docstring|>Sets the query_parameters of this ComputerContentCachingParentCapabilities.
:param query_parameters: The query_parameters of this ComputerContentCachingParentCapabilities. # noqa: E501
:type query_parameters: bool<|endoftext|>
|
8b6e5dbb69b09dd1b7b1ae7bb6c6244091ff5ba01038ad3b7d2505aaacea3c35
|
@property
def shared_content(self):
'Gets the shared_content of this ComputerContentCachingParentCapabilities. # noqa: E501\n\n\n :return: The shared_content of this ComputerContentCachingParentCapabilities. # noqa: E501\n :rtype: bool\n '
return self._shared_content
|
Gets the shared_content of this ComputerContentCachingParentCapabilities. # noqa: E501
:return: The shared_content of this ComputerContentCachingParentCapabilities. # noqa: E501
:rtype: bool
|
jamf/models/computer_content_caching_parent_capabilities.py
|
shared_content
|
jensenbox/python-jamf
| 1 |
python
|
@property
def shared_content(self):
'Gets the shared_content of this ComputerContentCachingParentCapabilities. # noqa: E501\n\n\n :return: The shared_content of this ComputerContentCachingParentCapabilities. # noqa: E501\n :rtype: bool\n '
return self._shared_content
|
@property
def shared_content(self):
'Gets the shared_content of this ComputerContentCachingParentCapabilities. # noqa: E501\n\n\n :return: The shared_content of this ComputerContentCachingParentCapabilities. # noqa: E501\n :rtype: bool\n '
return self._shared_content<|docstring|>Gets the shared_content of this ComputerContentCachingParentCapabilities. # noqa: E501
:return: The shared_content of this ComputerContentCachingParentCapabilities. # noqa: E501
:rtype: bool<|endoftext|>
|
48469b2b33844797435c4eae8bbec5a153842a24dd08dc509aed630f471136ae
|
@shared_content.setter
def shared_content(self, shared_content):
'Sets the shared_content of this ComputerContentCachingParentCapabilities.\n\n\n :param shared_content: The shared_content of this ComputerContentCachingParentCapabilities. # noqa: E501\n :type shared_content: bool\n '
self._shared_content = shared_content
|
Sets the shared_content of this ComputerContentCachingParentCapabilities.
:param shared_content: The shared_content of this ComputerContentCachingParentCapabilities. # noqa: E501
:type shared_content: bool
|
jamf/models/computer_content_caching_parent_capabilities.py
|
shared_content
|
jensenbox/python-jamf
| 1 |
python
|
@shared_content.setter
def shared_content(self, shared_content):
'Sets the shared_content of this ComputerContentCachingParentCapabilities.\n\n\n :param shared_content: The shared_content of this ComputerContentCachingParentCapabilities. # noqa: E501\n :type shared_content: bool\n '
self._shared_content = shared_content
|
@shared_content.setter
def shared_content(self, shared_content):
'Sets the shared_content of this ComputerContentCachingParentCapabilities.\n\n\n :param shared_content: The shared_content of this ComputerContentCachingParentCapabilities. # noqa: E501\n :type shared_content: bool\n '
self._shared_content = shared_content<|docstring|>Sets the shared_content of this ComputerContentCachingParentCapabilities.
:param shared_content: The shared_content of this ComputerContentCachingParentCapabilities. # noqa: E501
:type shared_content: bool<|endoftext|>
|
a6a6835c5dc884b6661fe3dda608a13f44486e46d72fc28d05d74cbb3191a978
|
@property
def prioritization(self):
'Gets the prioritization of this ComputerContentCachingParentCapabilities. # noqa: E501\n\n\n :return: The prioritization of this ComputerContentCachingParentCapabilities. # noqa: E501\n :rtype: bool\n '
return self._prioritization
|
Gets the prioritization of this ComputerContentCachingParentCapabilities. # noqa: E501
:return: The prioritization of this ComputerContentCachingParentCapabilities. # noqa: E501
:rtype: bool
|
jamf/models/computer_content_caching_parent_capabilities.py
|
prioritization
|
jensenbox/python-jamf
| 1 |
python
|
@property
def prioritization(self):
'Gets the prioritization of this ComputerContentCachingParentCapabilities. # noqa: E501\n\n\n :return: The prioritization of this ComputerContentCachingParentCapabilities. # noqa: E501\n :rtype: bool\n '
return self._prioritization
|
@property
def prioritization(self):
'Gets the prioritization of this ComputerContentCachingParentCapabilities. # noqa: E501\n\n\n :return: The prioritization of this ComputerContentCachingParentCapabilities. # noqa: E501\n :rtype: bool\n '
return self._prioritization<|docstring|>Gets the prioritization of this ComputerContentCachingParentCapabilities. # noqa: E501
:return: The prioritization of this ComputerContentCachingParentCapabilities. # noqa: E501
:rtype: bool<|endoftext|>
|
a8c3def180e674394d66d4e5a8a7102607133e7c3fa8aa6a928c17fe58ce51cd
|
@prioritization.setter
def prioritization(self, prioritization):
'Sets the prioritization of this ComputerContentCachingParentCapabilities.\n\n\n :param prioritization: The prioritization of this ComputerContentCachingParentCapabilities. # noqa: E501\n :type prioritization: bool\n '
self._prioritization = prioritization
|
Sets the prioritization of this ComputerContentCachingParentCapabilities.
:param prioritization: The prioritization of this ComputerContentCachingParentCapabilities. # noqa: E501
:type prioritization: bool
|
jamf/models/computer_content_caching_parent_capabilities.py
|
prioritization
|
jensenbox/python-jamf
| 1 |
python
|
@prioritization.setter
def prioritization(self, prioritization):
'Sets the prioritization of this ComputerContentCachingParentCapabilities.\n\n\n :param prioritization: The prioritization of this ComputerContentCachingParentCapabilities. # noqa: E501\n :type prioritization: bool\n '
self._prioritization = prioritization
|
@prioritization.setter
def prioritization(self, prioritization):
'Sets the prioritization of this ComputerContentCachingParentCapabilities.\n\n\n :param prioritization: The prioritization of this ComputerContentCachingParentCapabilities. # noqa: E501\n :type prioritization: bool\n '
self._prioritization = prioritization<|docstring|>Sets the prioritization of this ComputerContentCachingParentCapabilities.
:param prioritization: The prioritization of this ComputerContentCachingParentCapabilities. # noqa: E501
:type prioritization: bool<|endoftext|>
|
5a4e41bb6a0def746593298cb605df98f1366e957c4ca89b12010ea7db707963
|
def to_dict(self):
'Returns the model properties as a dict'
result = {}
for (attr, _) in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
return result
|
Returns the model properties as a dict
|
jamf/models/computer_content_caching_parent_capabilities.py
|
to_dict
|
jensenbox/python-jamf
| 1 |
python
|
def to_dict(self):
result = {}
for (attr, _) in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
return result
|
def to_dict(self):
result = {}
for (attr, _) in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
return result<|docstring|>Returns the model properties as a dict<|endoftext|>
|
cbb19eaa2fc8a113d9e32f924ef280a7e97563f8915f94f65dab438997af2e99
|
def to_str(self):
'Returns the string representation of the model'
return pprint.pformat(self.to_dict())
|
Returns the string representation of the model
|
jamf/models/computer_content_caching_parent_capabilities.py
|
to_str
|
jensenbox/python-jamf
| 1 |
python
|
def to_str(self):
return pprint.pformat(self.to_dict())
|
def to_str(self):
return pprint.pformat(self.to_dict())<|docstring|>Returns the string representation of the model<|endoftext|>
|
772243a2c2b3261a9b954d07aaf295e3c1242a579a495e2d6a5679c677861703
|
def __repr__(self):
'For `print` and `pprint`'
return self.to_str()
|
For `print` and `pprint`
|
jamf/models/computer_content_caching_parent_capabilities.py
|
__repr__
|
jensenbox/python-jamf
| 1 |
python
|
def __repr__(self):
return self.to_str()
|
def __repr__(self):
return self.to_str()<|docstring|>For `print` and `pprint`<|endoftext|>
|
e1d3947da68403c4865fd043ca290ba466e045bad9d50292d5fb5b736ac2ab89
|
def __eq__(self, other):
'Returns true if both objects are equal'
if (not isinstance(other, ComputerContentCachingParentCapabilities)):
return False
return (self.to_dict() == other.to_dict())
|
Returns true if both objects are equal
|
jamf/models/computer_content_caching_parent_capabilities.py
|
__eq__
|
jensenbox/python-jamf
| 1 |
python
|
def __eq__(self, other):
if (not isinstance(other, ComputerContentCachingParentCapabilities)):
return False
return (self.to_dict() == other.to_dict())
|
def __eq__(self, other):
if (not isinstance(other, ComputerContentCachingParentCapabilities)):
return False
return (self.to_dict() == other.to_dict())<|docstring|>Returns true if both objects are equal<|endoftext|>
|
7954d02bc09fedecd5a074434db76e84de652db96110887c518c32f76d37e919
|
def __ne__(self, other):
'Returns true if both objects are not equal'
if (not isinstance(other, ComputerContentCachingParentCapabilities)):
return True
return (self.to_dict() != other.to_dict())
|
Returns true if both objects are not equal
|
jamf/models/computer_content_caching_parent_capabilities.py
|
__ne__
|
jensenbox/python-jamf
| 1 |
python
|
def __ne__(self, other):
if (not isinstance(other, ComputerContentCachingParentCapabilities)):
return True
return (self.to_dict() != other.to_dict())
|
def __ne__(self, other):
if (not isinstance(other, ComputerContentCachingParentCapabilities)):
return True
return (self.to_dict() != other.to_dict())<|docstring|>Returns true if both objects are not equal<|endoftext|>
|
c5ffa81203ba9877647ec6c2cf868f6756fc3faec515440123172d0b831dd612
|
def get_legislation_affecting_rcw_cite(biennium: str, rcw_cite: str) -> Dict[(str, Any)]:
'See: http://wslwebservices.leg.wa.gov/rcwciteaffectedservice.asmx?op=GetLegislationAffectingRcwCite'
argdict: Dict[(str, Any)] = dict(biennium=biennium, rcwCite=rcw_cite)
keydict: Dict[(str, Any)] = {'billnumber': int, 'substituteversion': int, 'engrossedversion': int, 'active': (lambda boolstr: (boolstr.lower() == 'true'))}
return waleg.call('RcwCiteAffected', 'GetLegislationAffectingRcwCite', argdict, keydict)
|
See: http://wslwebservices.leg.wa.gov/rcwciteaffectedservice.asmx?op=GetLegislationAffectingRcwCite
|
wa_leg_api/rcwciteaffected.py
|
get_legislation_affecting_rcw_cite
|
ryansloan/wa-leg-api
| 3 |
python
|
def get_legislation_affecting_rcw_cite(biennium: str, rcw_cite: str) -> Dict[(str, Any)]:
argdict: Dict[(str, Any)] = dict(biennium=biennium, rcwCite=rcw_cite)
keydict: Dict[(str, Any)] = {'billnumber': int, 'substituteversion': int, 'engrossedversion': int, 'active': (lambda boolstr: (boolstr.lower() == 'true'))}
return waleg.call('RcwCiteAffected', 'GetLegislationAffectingRcwCite', argdict, keydict)
|
def get_legislation_affecting_rcw_cite(biennium: str, rcw_cite: str) -> Dict[(str, Any)]:
argdict: Dict[(str, Any)] = dict(biennium=biennium, rcwCite=rcw_cite)
keydict: Dict[(str, Any)] = {'billnumber': int, 'substituteversion': int, 'engrossedversion': int, 'active': (lambda boolstr: (boolstr.lower() == 'true'))}
return waleg.call('RcwCiteAffected', 'GetLegislationAffectingRcwCite', argdict, keydict)<|docstring|>See: http://wslwebservices.leg.wa.gov/rcwciteaffectedservice.asmx?op=GetLegislationAffectingRcwCite<|endoftext|>
|
137686993c5c9cbef09dec2e26a9c6c25ce5ef7ff1fbd7dc0bc0495107f34bd1
|
def get_legislation_affecting_rcw(biennium: str, rcw_cite: str) -> Dict[(str, Any)]:
'See: http://wslwebservices.leg.wa.gov/rcwciteaffectedservice.asmx?op=GetLegislationAffectingRcw'
argdict: Dict[(str, Any)] = dict(biennium=biennium, rcwCite=rcw_cite)
keydict: Dict[(str, Any)] = {'billnumber': int, 'substituteversion': int, 'engrossedversion': int, 'active': (lambda boolstr: (boolstr.lower() == 'true'))}
return waleg.call('RcwCiteAffected', 'GetLegislationAffectingRcw', argdict, keydict)
|
See: http://wslwebservices.leg.wa.gov/rcwciteaffectedservice.asmx?op=GetLegislationAffectingRcw
|
wa_leg_api/rcwciteaffected.py
|
get_legislation_affecting_rcw
|
ryansloan/wa-leg-api
| 3 |
python
|
def get_legislation_affecting_rcw(biennium: str, rcw_cite: str) -> Dict[(str, Any)]:
argdict: Dict[(str, Any)] = dict(biennium=biennium, rcwCite=rcw_cite)
keydict: Dict[(str, Any)] = {'billnumber': int, 'substituteversion': int, 'engrossedversion': int, 'active': (lambda boolstr: (boolstr.lower() == 'true'))}
return waleg.call('RcwCiteAffected', 'GetLegislationAffectingRcw', argdict, keydict)
|
def get_legislation_affecting_rcw(biennium: str, rcw_cite: str) -> Dict[(str, Any)]:
argdict: Dict[(str, Any)] = dict(biennium=biennium, rcwCite=rcw_cite)
keydict: Dict[(str, Any)] = {'billnumber': int, 'substituteversion': int, 'engrossedversion': int, 'active': (lambda boolstr: (boolstr.lower() == 'true'))}
return waleg.call('RcwCiteAffected', 'GetLegislationAffectingRcw', argdict, keydict)<|docstring|>See: http://wslwebservices.leg.wa.gov/rcwciteaffectedservice.asmx?op=GetLegislationAffectingRcw<|endoftext|>
|
55eb2c1c98644dc267061c06b5ed6b99311b545a8a14f8e238746bfd846d1ff3
|
def _match_query(query, attrs, attrs_checked):
'Match an ldap query to an attribute dictionary.\n\n The characters &, |, and ! are supported in the query. No syntax checking\n is performed, so malformed queries will not work correctly.\n '
inner = query[1:(- 1)]
if inner.startswith(('&', '|')):
if (inner[0] == '&'):
matchfn = all
else:
matchfn = any
groups = _paren_groups(inner[1:])
return matchfn((_match_query(group, attrs, attrs_checked) for group in groups))
if inner.startswith('!'):
return (not _match_query(query[2:(- 1)], attrs, attrs_checked))
(k, _sep, v) = inner.partition('=')
attrs_checked.add(k.lower())
return _match(k, v, attrs)
|
Match an ldap query to an attribute dictionary.
The characters &, |, and ! are supported in the query. No syntax checking
is performed, so malformed queries will not work correctly.
|
keystone/tests/unit/fakeldap.py
|
_match_query
|
knikolla/keystone
| 615 |
python
|
def _match_query(query, attrs, attrs_checked):
'Match an ldap query to an attribute dictionary.\n\n The characters &, |, and ! are supported in the query. No syntax checking\n is performed, so malformed queries will not work correctly.\n '
inner = query[1:(- 1)]
if inner.startswith(('&', '|')):
if (inner[0] == '&'):
matchfn = all
else:
matchfn = any
groups = _paren_groups(inner[1:])
return matchfn((_match_query(group, attrs, attrs_checked) for group in groups))
if inner.startswith('!'):
return (not _match_query(query[2:(- 1)], attrs, attrs_checked))
(k, _sep, v) = inner.partition('=')
attrs_checked.add(k.lower())
return _match(k, v, attrs)
|
def _match_query(query, attrs, attrs_checked):
'Match an ldap query to an attribute dictionary.\n\n The characters &, |, and ! are supported in the query. No syntax checking\n is performed, so malformed queries will not work correctly.\n '
inner = query[1:(- 1)]
if inner.startswith(('&', '|')):
if (inner[0] == '&'):
matchfn = all
else:
matchfn = any
groups = _paren_groups(inner[1:])
return matchfn((_match_query(group, attrs, attrs_checked) for group in groups))
if inner.startswith('!'):
return (not _match_query(query[2:(- 1)], attrs, attrs_checked))
(k, _sep, v) = inner.partition('=')
attrs_checked.add(k.lower())
return _match(k, v, attrs)<|docstring|>Match an ldap query to an attribute dictionary.
The characters &, |, and ! are supported in the query. No syntax checking
is performed, so malformed queries will not work correctly.<|endoftext|>
|
8b61af187d06195199301614d780516d2250d7e0869ef69ebd93e53f2d85c967
|
def _paren_groups(source):
'Split a string into parenthesized groups.'
count = 0
start = 0
result = []
for pos in range(len(source)):
if (source[pos] == '('):
if (count == 0):
start = pos
count += 1
if (source[pos] == ')'):
count -= 1
if (count == 0):
result.append(source[start:(pos + 1)])
return result
|
Split a string into parenthesized groups.
|
keystone/tests/unit/fakeldap.py
|
_paren_groups
|
knikolla/keystone
| 615 |
python
|
def _paren_groups(source):
count = 0
start = 0
result = []
for pos in range(len(source)):
if (source[pos] == '('):
if (count == 0):
start = pos
count += 1
if (source[pos] == ')'):
count -= 1
if (count == 0):
result.append(source[start:(pos + 1)])
return result
|
def _paren_groups(source):
count = 0
start = 0
result = []
for pos in range(len(source)):
if (source[pos] == '('):
if (count == 0):
start = pos
count += 1
if (source[pos] == ')'):
count -= 1
if (count == 0):
result.append(source[start:(pos + 1)])
return result<|docstring|>Split a string into parenthesized groups.<|endoftext|>
|
d518857f8226c21cc53fe52a024ea9f75d18c2e163dc380593a49db64cf46c0c
|
def _match(key, value, attrs):
'Match a given key and value against an attribute list.'
def match_with_wildcards(norm_val, val_list):
if norm_val.startswith('*'):
if norm_val.endswith('*'):
for x in val_list:
if (norm_val[1:(- 1)] in x):
return True
else:
for x in val_list:
if (norm_val[1:] == x[((len(x) - len(norm_val)) + 1):]):
return True
elif norm_val.endswith('*'):
for x in val_list:
if (norm_val[:(- 1)] == x[:(len(norm_val) - 1)]):
return True
else:
for x in val_list:
if (check_value == x):
return True
return False
if (key not in attrs):
return False
if (value == '*'):
return True
if (key == 'serviceId'):
str_sids = [str(x) for x in attrs[key]]
return (str(value) in str_sids)
if (key != 'objectclass'):
check_value = _internal_attr(key, value)[0].lower()
norm_values = list((_internal_attr(key, x)[0].lower() for x in attrs[key]))
return match_with_wildcards(check_value, norm_values)
values = _subs(value)
for v in values:
if (v in attrs[key]):
return True
return False
|
Match a given key and value against an attribute list.
|
keystone/tests/unit/fakeldap.py
|
_match
|
knikolla/keystone
| 615 |
python
|
def _match(key, value, attrs):
def match_with_wildcards(norm_val, val_list):
if norm_val.startswith('*'):
if norm_val.endswith('*'):
for x in val_list:
if (norm_val[1:(- 1)] in x):
return True
else:
for x in val_list:
if (norm_val[1:] == x[((len(x) - len(norm_val)) + 1):]):
return True
elif norm_val.endswith('*'):
for x in val_list:
if (norm_val[:(- 1)] == x[:(len(norm_val) - 1)]):
return True
else:
for x in val_list:
if (check_value == x):
return True
return False
if (key not in attrs):
return False
if (value == '*'):
return True
if (key == 'serviceId'):
str_sids = [str(x) for x in attrs[key]]
return (str(value) in str_sids)
if (key != 'objectclass'):
check_value = _internal_attr(key, value)[0].lower()
norm_values = list((_internal_attr(key, x)[0].lower() for x in attrs[key]))
return match_with_wildcards(check_value, norm_values)
values = _subs(value)
for v in values:
if (v in attrs[key]):
return True
return False
|
def _match(key, value, attrs):
def match_with_wildcards(norm_val, val_list):
if norm_val.startswith('*'):
if norm_val.endswith('*'):
for x in val_list:
if (norm_val[1:(- 1)] in x):
return True
else:
for x in val_list:
if (norm_val[1:] == x[((len(x) - len(norm_val)) + 1):]):
return True
elif norm_val.endswith('*'):
for x in val_list:
if (norm_val[:(- 1)] == x[:(len(norm_val) - 1)]):
return True
else:
for x in val_list:
if (check_value == x):
return True
return False
if (key not in attrs):
return False
if (value == '*'):
return True
if (key == 'serviceId'):
str_sids = [str(x) for x in attrs[key]]
return (str(value) in str_sids)
if (key != 'objectclass'):
check_value = _internal_attr(key, value)[0].lower()
norm_values = list((_internal_attr(key, x)[0].lower() for x in attrs[key]))
return match_with_wildcards(check_value, norm_values)
values = _subs(value)
for v in values:
if (v in attrs[key]):
return True
return False<|docstring|>Match a given key and value against an attribute list.<|endoftext|>
|
9e2115fc5dedfee6c62a25e1d46a50c38f91351dc9574abf0663ac05f730f3b0
|
def _subs(value):
"Return a list of subclass strings.\n\n The strings represent the ldap objectclass plus any subclasses that\n inherit from it. Fakeldap doesn't know about the ldap object structure,\n so subclasses need to be defined manually in the dictionary below.\n\n "
subs = {'groupOfNames': ['keystoneProject', 'keystoneRole', 'keystoneProjectRole']}
if (value in subs):
return ([value] + subs[value])
return [value]
|
Return a list of subclass strings.
The strings represent the ldap objectclass plus any subclasses that
inherit from it. Fakeldap doesn't know about the ldap object structure,
so subclasses need to be defined manually in the dictionary below.
|
keystone/tests/unit/fakeldap.py
|
_subs
|
knikolla/keystone
| 615 |
python
|
def _subs(value):
"Return a list of subclass strings.\n\n The strings represent the ldap objectclass plus any subclasses that\n inherit from it. Fakeldap doesn't know about the ldap object structure,\n so subclasses need to be defined manually in the dictionary below.\n\n "
subs = {'groupOfNames': ['keystoneProject', 'keystoneRole', 'keystoneProjectRole']}
if (value in subs):
return ([value] + subs[value])
return [value]
|
def _subs(value):
"Return a list of subclass strings.\n\n The strings represent the ldap objectclass plus any subclasses that\n inherit from it. Fakeldap doesn't know about the ldap object structure,\n so subclasses need to be defined manually in the dictionary below.\n\n "
subs = {'groupOfNames': ['keystoneProject', 'keystoneRole', 'keystoneProjectRole']}
if (value in subs):
return ([value] + subs[value])
return [value]<|docstring|>Return a list of subclass strings.
The strings represent the ldap objectclass plus any subclasses that
inherit from it. Fakeldap doesn't know about the ldap object structure,
so subclasses need to be defined manually in the dictionary below.<|endoftext|>
|
082fac186e27aad6993833298ac5dc5cf983e0f8f9c65735d44b78aee9976bc0
|
def simple_bind_s(self, who='', cred='', serverctrls=None, clientctrls=None):
'Provide for compatibility but this method is ignored.'
if server_fail:
raise ldap.SERVER_DOWN
whos = ['cn=Admin', CONF.ldap.user]
if ((who in whos) and (cred in ['password', CONF.ldap.password])):
return
attrs = self.db.get(self.key(who))
if (not attrs):
LOG.debug('who=%s not found, binding anonymously', who)
db_password = ''
if attrs:
try:
db_password = attrs['userPassword'][0]
except (KeyError, IndexError):
LOG.debug('bind fail: password for who=%s not found', who)
raise ldap.INAPPROPRIATE_AUTH
if (cred != db_password):
LOG.debug('bind fail: password for who=%s does not match', who)
raise ldap.INVALID_CREDENTIALS
|
Provide for compatibility but this method is ignored.
|
keystone/tests/unit/fakeldap.py
|
simple_bind_s
|
knikolla/keystone
| 615 |
python
|
def simple_bind_s(self, who=, cred=, serverctrls=None, clientctrls=None):
if server_fail:
raise ldap.SERVER_DOWN
whos = ['cn=Admin', CONF.ldap.user]
if ((who in whos) and (cred in ['password', CONF.ldap.password])):
return
attrs = self.db.get(self.key(who))
if (not attrs):
LOG.debug('who=%s not found, binding anonymously', who)
db_password =
if attrs:
try:
db_password = attrs['userPassword'][0]
except (KeyError, IndexError):
LOG.debug('bind fail: password for who=%s not found', who)
raise ldap.INAPPROPRIATE_AUTH
if (cred != db_password):
LOG.debug('bind fail: password for who=%s does not match', who)
raise ldap.INVALID_CREDENTIALS
|
def simple_bind_s(self, who=, cred=, serverctrls=None, clientctrls=None):
if server_fail:
raise ldap.SERVER_DOWN
whos = ['cn=Admin', CONF.ldap.user]
if ((who in whos) and (cred in ['password', CONF.ldap.password])):
return
attrs = self.db.get(self.key(who))
if (not attrs):
LOG.debug('who=%s not found, binding anonymously', who)
db_password =
if attrs:
try:
db_password = attrs['userPassword'][0]
except (KeyError, IndexError):
LOG.debug('bind fail: password for who=%s not found', who)
raise ldap.INAPPROPRIATE_AUTH
if (cred != db_password):
LOG.debug('bind fail: password for who=%s does not match', who)
raise ldap.INVALID_CREDENTIALS<|docstring|>Provide for compatibility but this method is ignored.<|endoftext|>
|
e914d7e8c4478e113775474eb4d0be176564632471798d675f3331c1eeff7543
|
def unbind_s(self):
'Provide for compatibility but this method is ignored.'
if server_fail:
raise ldap.SERVER_DOWN
|
Provide for compatibility but this method is ignored.
|
keystone/tests/unit/fakeldap.py
|
unbind_s
|
knikolla/keystone
| 615 |
python
|
def unbind_s(self):
if server_fail:
raise ldap.SERVER_DOWN
|
def unbind_s(self):
if server_fail:
raise ldap.SERVER_DOWN<|docstring|>Provide for compatibility but this method is ignored.<|endoftext|>
|
276196af5ecf09ddde0e5b281783eb7683dd7fd18fa8b944965b19155af7e157
|
def add_s(self, dn, modlist):
'Add an object with the specified attributes at dn.'
if server_fail:
raise ldap.SERVER_DOWN
id_attr_in_modlist = False
id_attr = self._dn_to_id_attr(dn)
id_value = self._dn_to_id_value(dn)
for (k, dummy_v) in modlist:
if (k is None):
raise TypeError(('must be string, not None. modlist=%s' % modlist))
if (k == id_attr):
for val in dummy_v:
if (common.utf8_decode(val) == id_value):
id_attr_in_modlist = True
if (not id_attr_in_modlist):
LOG.debug('id_attribute=%(attr)s missing, attributes=%(attrs)s', {'attr': id_attr, 'attrs': modlist})
raise ldap.NAMING_VIOLATION
key = self.key(dn)
LOG.debug('add item: dn=%(dn)s, attrs=%(attrs)s', {'dn': dn, 'attrs': modlist})
if (key in self.db):
LOG.debug('add item failed: dn=%s is already in store.', dn)
raise ldap.ALREADY_EXISTS(dn)
self.db[key] = {k: _internal_attr(k, v) for (k, v) in modlist}
self.db.sync()
|
Add an object with the specified attributes at dn.
|
keystone/tests/unit/fakeldap.py
|
add_s
|
knikolla/keystone
| 615 |
python
|
def add_s(self, dn, modlist):
if server_fail:
raise ldap.SERVER_DOWN
id_attr_in_modlist = False
id_attr = self._dn_to_id_attr(dn)
id_value = self._dn_to_id_value(dn)
for (k, dummy_v) in modlist:
if (k is None):
raise TypeError(('must be string, not None. modlist=%s' % modlist))
if (k == id_attr):
for val in dummy_v:
if (common.utf8_decode(val) == id_value):
id_attr_in_modlist = True
if (not id_attr_in_modlist):
LOG.debug('id_attribute=%(attr)s missing, attributes=%(attrs)s', {'attr': id_attr, 'attrs': modlist})
raise ldap.NAMING_VIOLATION
key = self.key(dn)
LOG.debug('add item: dn=%(dn)s, attrs=%(attrs)s', {'dn': dn, 'attrs': modlist})
if (key in self.db):
LOG.debug('add item failed: dn=%s is already in store.', dn)
raise ldap.ALREADY_EXISTS(dn)
self.db[key] = {k: _internal_attr(k, v) for (k, v) in modlist}
self.db.sync()
|
def add_s(self, dn, modlist):
if server_fail:
raise ldap.SERVER_DOWN
id_attr_in_modlist = False
id_attr = self._dn_to_id_attr(dn)
id_value = self._dn_to_id_value(dn)
for (k, dummy_v) in modlist:
if (k is None):
raise TypeError(('must be string, not None. modlist=%s' % modlist))
if (k == id_attr):
for val in dummy_v:
if (common.utf8_decode(val) == id_value):
id_attr_in_modlist = True
if (not id_attr_in_modlist):
LOG.debug('id_attribute=%(attr)s missing, attributes=%(attrs)s', {'attr': id_attr, 'attrs': modlist})
raise ldap.NAMING_VIOLATION
key = self.key(dn)
LOG.debug('add item: dn=%(dn)s, attrs=%(attrs)s', {'dn': dn, 'attrs': modlist})
if (key in self.db):
LOG.debug('add item failed: dn=%s is already in store.', dn)
raise ldap.ALREADY_EXISTS(dn)
self.db[key] = {k: _internal_attr(k, v) for (k, v) in modlist}
self.db.sync()<|docstring|>Add an object with the specified attributes at dn.<|endoftext|>
|
b5594b951eb09f5fd6e32f4f034f745e115a410406a8687e3adb0905874eca2d
|
def delete_s(self, dn):
'Remove the ldap object at specified dn.'
return self.delete_ext_s(dn, serverctrls=[])
|
Remove the ldap object at specified dn.
|
keystone/tests/unit/fakeldap.py
|
delete_s
|
knikolla/keystone
| 615 |
python
|
def delete_s(self, dn):
return self.delete_ext_s(dn, serverctrls=[])
|
def delete_s(self, dn):
return self.delete_ext_s(dn, serverctrls=[])<|docstring|>Remove the ldap object at specified dn.<|endoftext|>
|
b790a9c567ca29f2aea3ccfa570d7a65a102f6bc7a18de863d89ff19ee041534
|
def delete_ext_s(self, dn, serverctrls, clientctrls=None):
'Remove the ldap object at specified dn.'
if server_fail:
raise ldap.SERVER_DOWN
try:
key = self.key(dn)
LOG.debug('FakeLdap delete item: dn=%s', dn)
del self.db[key]
except KeyError:
LOG.debug('delete item failed: dn=%s not found.', dn)
raise ldap.NO_SUCH_OBJECT
self.db.sync()
|
Remove the ldap object at specified dn.
|
keystone/tests/unit/fakeldap.py
|
delete_ext_s
|
knikolla/keystone
| 615 |
python
|
def delete_ext_s(self, dn, serverctrls, clientctrls=None):
if server_fail:
raise ldap.SERVER_DOWN
try:
key = self.key(dn)
LOG.debug('FakeLdap delete item: dn=%s', dn)
del self.db[key]
except KeyError:
LOG.debug('delete item failed: dn=%s not found.', dn)
raise ldap.NO_SUCH_OBJECT
self.db.sync()
|
def delete_ext_s(self, dn, serverctrls, clientctrls=None):
if server_fail:
raise ldap.SERVER_DOWN
try:
key = self.key(dn)
LOG.debug('FakeLdap delete item: dn=%s', dn)
del self.db[key]
except KeyError:
LOG.debug('delete item failed: dn=%s not found.', dn)
raise ldap.NO_SUCH_OBJECT
self.db.sync()<|docstring|>Remove the ldap object at specified dn.<|endoftext|>
|
87d725372210094cae69c4775b9a7a0afb6d80754a9481bc8e20109e29b25eea
|
def modify_s(self, dn, modlist):
'Modify the object at dn using the attribute list.\n\n :param dn: an LDAP DN\n :param modlist: a list of tuples in the following form:\n ([MOD_ADD | MOD_DELETE | MOD_REPACE], attribute, value)\n '
if server_fail:
raise ldap.SERVER_DOWN
key = self.key(dn)
LOG.debug('modify item: dn=%(dn)s attrs=%(attrs)s', {'dn': dn, 'attrs': modlist})
try:
entry = self.db[key]
except KeyError:
LOG.debug('modify item failed: dn=%s not found.', dn)
raise ldap.NO_SUCH_OBJECT
for (cmd, k, v) in modlist:
values = entry.setdefault(k, [])
if (cmd == ldap.MOD_ADD):
v = _internal_attr(k, v)
for x in v:
if (x in values):
raise ldap.TYPE_OR_VALUE_EXISTS
values += v
elif (cmd == ldap.MOD_REPLACE):
values[:] = _internal_attr(k, v)
elif (cmd == ldap.MOD_DELETE):
if (v is None):
if (not values):
LOG.debug('modify item failed: item has no attribute "%s" to delete', k)
raise ldap.NO_SUCH_ATTRIBUTE
values[:] = []
else:
for val in _internal_attr(k, v):
try:
values.remove(val)
except ValueError:
LOG.debug('modify item failed: item has no attribute "%(k)s" with value "%(v)s" to delete', {'k': k, 'v': val})
raise ldap.NO_SUCH_ATTRIBUTE
else:
LOG.debug('modify item failed: unknown command %s', cmd)
raise NotImplementedError(('modify_s action %s not implemented' % cmd))
self.db[key] = entry
self.db.sync()
|
Modify the object at dn using the attribute list.
:param dn: an LDAP DN
:param modlist: a list of tuples in the following form:
([MOD_ADD | MOD_DELETE | MOD_REPACE], attribute, value)
|
keystone/tests/unit/fakeldap.py
|
modify_s
|
knikolla/keystone
| 615 |
python
|
def modify_s(self, dn, modlist):
'Modify the object at dn using the attribute list.\n\n :param dn: an LDAP DN\n :param modlist: a list of tuples in the following form:\n ([MOD_ADD | MOD_DELETE | MOD_REPACE], attribute, value)\n '
if server_fail:
raise ldap.SERVER_DOWN
key = self.key(dn)
LOG.debug('modify item: dn=%(dn)s attrs=%(attrs)s', {'dn': dn, 'attrs': modlist})
try:
entry = self.db[key]
except KeyError:
LOG.debug('modify item failed: dn=%s not found.', dn)
raise ldap.NO_SUCH_OBJECT
for (cmd, k, v) in modlist:
values = entry.setdefault(k, [])
if (cmd == ldap.MOD_ADD):
v = _internal_attr(k, v)
for x in v:
if (x in values):
raise ldap.TYPE_OR_VALUE_EXISTS
values += v
elif (cmd == ldap.MOD_REPLACE):
values[:] = _internal_attr(k, v)
elif (cmd == ldap.MOD_DELETE):
if (v is None):
if (not values):
LOG.debug('modify item failed: item has no attribute "%s" to delete', k)
raise ldap.NO_SUCH_ATTRIBUTE
values[:] = []
else:
for val in _internal_attr(k, v):
try:
values.remove(val)
except ValueError:
LOG.debug('modify item failed: item has no attribute "%(k)s" with value "%(v)s" to delete', {'k': k, 'v': val})
raise ldap.NO_SUCH_ATTRIBUTE
else:
LOG.debug('modify item failed: unknown command %s', cmd)
raise NotImplementedError(('modify_s action %s not implemented' % cmd))
self.db[key] = entry
self.db.sync()
|
def modify_s(self, dn, modlist):
'Modify the object at dn using the attribute list.\n\n :param dn: an LDAP DN\n :param modlist: a list of tuples in the following form:\n ([MOD_ADD | MOD_DELETE | MOD_REPACE], attribute, value)\n '
if server_fail:
raise ldap.SERVER_DOWN
key = self.key(dn)
LOG.debug('modify item: dn=%(dn)s attrs=%(attrs)s', {'dn': dn, 'attrs': modlist})
try:
entry = self.db[key]
except KeyError:
LOG.debug('modify item failed: dn=%s not found.', dn)
raise ldap.NO_SUCH_OBJECT
for (cmd, k, v) in modlist:
values = entry.setdefault(k, [])
if (cmd == ldap.MOD_ADD):
v = _internal_attr(k, v)
for x in v:
if (x in values):
raise ldap.TYPE_OR_VALUE_EXISTS
values += v
elif (cmd == ldap.MOD_REPLACE):
values[:] = _internal_attr(k, v)
elif (cmd == ldap.MOD_DELETE):
if (v is None):
if (not values):
LOG.debug('modify item failed: item has no attribute "%s" to delete', k)
raise ldap.NO_SUCH_ATTRIBUTE
values[:] = []
else:
for val in _internal_attr(k, v):
try:
values.remove(val)
except ValueError:
LOG.debug('modify item failed: item has no attribute "%(k)s" with value "%(v)s" to delete', {'k': k, 'v': val})
raise ldap.NO_SUCH_ATTRIBUTE
else:
LOG.debug('modify item failed: unknown command %s', cmd)
raise NotImplementedError(('modify_s action %s not implemented' % cmd))
self.db[key] = entry
self.db.sync()<|docstring|>Modify the object at dn using the attribute list.
:param dn: an LDAP DN
:param modlist: a list of tuples in the following form:
([MOD_ADD | MOD_DELETE | MOD_REPACE], attribute, value)<|endoftext|>
|
e3475d63f984d5dee66874e6bf72b261ac39e86db5482e181b152e00c9ee539c
|
def search_s(self, base, scope, filterstr='(objectClass=*)', attrlist=None, attrsonly=0):
'Search for all matching objects under base using the query.\n\n Args:\n base -- dn to search under\n scope -- search scope (base, subtree, onelevel)\n filterstr -- filter objects by\n attrlist -- attrs to return. Returns all attrs if not specified\n\n '
if server_fail:
raise ldap.SERVER_DOWN
if ((not filterstr) and (scope != ldap.SCOPE_BASE)):
raise AssertionError('Search without filter on onelevel or subtree scope')
if (scope == ldap.SCOPE_BASE):
try:
item_dict = self.db[self.key(base)]
except KeyError:
LOG.debug('search fail: dn not found for SCOPE_BASE')
raise ldap.NO_SUCH_OBJECT
results = [(base, item_dict)]
elif (scope == ldap.SCOPE_SUBTREE):
try:
item_dict = self.db[self.key(base)]
except KeyError:
LOG.debug('search fail: dn not found for SCOPE_SUBTREE')
raise ldap.NO_SUCH_OBJECT
results = [(base, item_dict)]
extraresults = [(k[len(self.__prefix):], v) for (k, v) in self.db.items() if re.match(('%s.*,%s' % (re.escape(self.__prefix), re.escape(base))), k)]
results.extend(extraresults)
elif (scope == ldap.SCOPE_ONELEVEL):
def get_entries():
base_dn = ldap.dn.str2dn(base)
base_len = len(base_dn)
for (k, v) in self.db.items():
if (not k.startswith(self.__prefix)):
continue
k_dn_str = k[len(self.__prefix):]
k_dn = ldap.dn.str2dn(k_dn_str)
if (len(k_dn) != (base_len + 1)):
continue
if (k_dn[(- base_len):] != base_dn):
continue
(yield (k_dn_str, v))
results = list(get_entries())
else:
raise ldap.PROTOCOL_ERROR
objects = []
for (dn, attrs) in results:
(id_attr, id_val, _) = ldap.dn.str2dn(dn)[0][0]
match_attrs = attrs.copy()
match_attrs[id_attr] = [id_val]
attrs_checked = set()
if ((not filterstr) or _match_query(filterstr, match_attrs, attrs_checked)):
if (filterstr and (scope != ldap.SCOPE_BASE) and ('objectclass' not in attrs_checked)):
raise AssertionError('No objectClass in search filter')
attrs = {k: v for (k, v) in attrs.items() if ((not attrlist) or (k in attrlist))}
objects.append((dn, attrs))
return objects
|
Search for all matching objects under base using the query.
Args:
base -- dn to search under
scope -- search scope (base, subtree, onelevel)
filterstr -- filter objects by
attrlist -- attrs to return. Returns all attrs if not specified
|
keystone/tests/unit/fakeldap.py
|
search_s
|
knikolla/keystone
| 615 |
python
|
def search_s(self, base, scope, filterstr='(objectClass=*)', attrlist=None, attrsonly=0):
'Search for all matching objects under base using the query.\n\n Args:\n base -- dn to search under\n scope -- search scope (base, subtree, onelevel)\n filterstr -- filter objects by\n attrlist -- attrs to return. Returns all attrs if not specified\n\n '
if server_fail:
raise ldap.SERVER_DOWN
if ((not filterstr) and (scope != ldap.SCOPE_BASE)):
raise AssertionError('Search without filter on onelevel or subtree scope')
if (scope == ldap.SCOPE_BASE):
try:
item_dict = self.db[self.key(base)]
except KeyError:
LOG.debug('search fail: dn not found for SCOPE_BASE')
raise ldap.NO_SUCH_OBJECT
results = [(base, item_dict)]
elif (scope == ldap.SCOPE_SUBTREE):
try:
item_dict = self.db[self.key(base)]
except KeyError:
LOG.debug('search fail: dn not found for SCOPE_SUBTREE')
raise ldap.NO_SUCH_OBJECT
results = [(base, item_dict)]
extraresults = [(k[len(self.__prefix):], v) for (k, v) in self.db.items() if re.match(('%s.*,%s' % (re.escape(self.__prefix), re.escape(base))), k)]
results.extend(extraresults)
elif (scope == ldap.SCOPE_ONELEVEL):
def get_entries():
base_dn = ldap.dn.str2dn(base)
base_len = len(base_dn)
for (k, v) in self.db.items():
if (not k.startswith(self.__prefix)):
continue
k_dn_str = k[len(self.__prefix):]
k_dn = ldap.dn.str2dn(k_dn_str)
if (len(k_dn) != (base_len + 1)):
continue
if (k_dn[(- base_len):] != base_dn):
continue
(yield (k_dn_str, v))
results = list(get_entries())
else:
raise ldap.PROTOCOL_ERROR
objects = []
for (dn, attrs) in results:
(id_attr, id_val, _) = ldap.dn.str2dn(dn)[0][0]
match_attrs = attrs.copy()
match_attrs[id_attr] = [id_val]
attrs_checked = set()
if ((not filterstr) or _match_query(filterstr, match_attrs, attrs_checked)):
if (filterstr and (scope != ldap.SCOPE_BASE) and ('objectclass' not in attrs_checked)):
raise AssertionError('No objectClass in search filter')
attrs = {k: v for (k, v) in attrs.items() if ((not attrlist) or (k in attrlist))}
objects.append((dn, attrs))
return objects
|
def search_s(self, base, scope, filterstr='(objectClass=*)', attrlist=None, attrsonly=0):
'Search for all matching objects under base using the query.\n\n Args:\n base -- dn to search under\n scope -- search scope (base, subtree, onelevel)\n filterstr -- filter objects by\n attrlist -- attrs to return. Returns all attrs if not specified\n\n '
if server_fail:
raise ldap.SERVER_DOWN
if ((not filterstr) and (scope != ldap.SCOPE_BASE)):
raise AssertionError('Search without filter on onelevel or subtree scope')
if (scope == ldap.SCOPE_BASE):
try:
item_dict = self.db[self.key(base)]
except KeyError:
LOG.debug('search fail: dn not found for SCOPE_BASE')
raise ldap.NO_SUCH_OBJECT
results = [(base, item_dict)]
elif (scope == ldap.SCOPE_SUBTREE):
try:
item_dict = self.db[self.key(base)]
except KeyError:
LOG.debug('search fail: dn not found for SCOPE_SUBTREE')
raise ldap.NO_SUCH_OBJECT
results = [(base, item_dict)]
extraresults = [(k[len(self.__prefix):], v) for (k, v) in self.db.items() if re.match(('%s.*,%s' % (re.escape(self.__prefix), re.escape(base))), k)]
results.extend(extraresults)
elif (scope == ldap.SCOPE_ONELEVEL):
def get_entries():
base_dn = ldap.dn.str2dn(base)
base_len = len(base_dn)
for (k, v) in self.db.items():
if (not k.startswith(self.__prefix)):
continue
k_dn_str = k[len(self.__prefix):]
k_dn = ldap.dn.str2dn(k_dn_str)
if (len(k_dn) != (base_len + 1)):
continue
if (k_dn[(- base_len):] != base_dn):
continue
(yield (k_dn_str, v))
results = list(get_entries())
else:
raise ldap.PROTOCOL_ERROR
objects = []
for (dn, attrs) in results:
(id_attr, id_val, _) = ldap.dn.str2dn(dn)[0][0]
match_attrs = attrs.copy()
match_attrs[id_attr] = [id_val]
attrs_checked = set()
if ((not filterstr) or _match_query(filterstr, match_attrs, attrs_checked)):
if (filterstr and (scope != ldap.SCOPE_BASE) and ('objectclass' not in attrs_checked)):
raise AssertionError('No objectClass in search filter')
attrs = {k: v for (k, v) in attrs.items() if ((not attrlist) or (k in attrlist))}
objects.append((dn, attrs))
return objects<|docstring|>Search for all matching objects under base using the query.
Args:
base -- dn to search under
scope -- search scope (base, subtree, onelevel)
filterstr -- filter objects by
attrlist -- attrs to return. Returns all attrs if not specified<|endoftext|>
|
29de18e041d67b59ed05209281d87ca9010fe5b5279770a316e1614b933853c5
|
def result3(self, msgid=ldap.RES_ANY, all=1, timeout=None, resp_ctrl_classes=None):
'Execute async request.\n\n Only msgid param is supported. Request info is fetched from global\n variable `PendingRequests` by msgid, executed using search_s and\n limited if requested.\n '
if ((all != 1) or (timeout is not None) or (resp_ctrl_classes is not None)):
raise exception.NotImplemented()
params = PendingRequests[msgid]
results = self.search_s(*params[:5])
serverctrls = params[5]
ctrl = serverctrls[0]
if ctrl.size:
rdata = results[:ctrl.size]
else:
rdata = results
rtype = None
rmsgid = None
serverctrls = None
return (rtype, rdata, rmsgid, serverctrls)
|
Execute async request.
Only msgid param is supported. Request info is fetched from global
variable `PendingRequests` by msgid, executed using search_s and
limited if requested.
|
keystone/tests/unit/fakeldap.py
|
result3
|
knikolla/keystone
| 615 |
python
|
def result3(self, msgid=ldap.RES_ANY, all=1, timeout=None, resp_ctrl_classes=None):
'Execute async request.\n\n Only msgid param is supported. Request info is fetched from global\n variable `PendingRequests` by msgid, executed using search_s and\n limited if requested.\n '
if ((all != 1) or (timeout is not None) or (resp_ctrl_classes is not None)):
raise exception.NotImplemented()
params = PendingRequests[msgid]
results = self.search_s(*params[:5])
serverctrls = params[5]
ctrl = serverctrls[0]
if ctrl.size:
rdata = results[:ctrl.size]
else:
rdata = results
rtype = None
rmsgid = None
serverctrls = None
return (rtype, rdata, rmsgid, serverctrls)
|
def result3(self, msgid=ldap.RES_ANY, all=1, timeout=None, resp_ctrl_classes=None):
'Execute async request.\n\n Only msgid param is supported. Request info is fetched from global\n variable `PendingRequests` by msgid, executed using search_s and\n limited if requested.\n '
if ((all != 1) or (timeout is not None) or (resp_ctrl_classes is not None)):
raise exception.NotImplemented()
params = PendingRequests[msgid]
results = self.search_s(*params[:5])
serverctrls = params[5]
ctrl = serverctrls[0]
if ctrl.size:
rdata = results[:ctrl.size]
else:
rdata = results
rtype = None
rmsgid = None
serverctrls = None
return (rtype, rdata, rmsgid, serverctrls)<|docstring|>Execute async request.
Only msgid param is supported. Request info is fetched from global
variable `PendingRequests` by msgid, executed using search_s and
limited if requested.<|endoftext|>
|
001ee0d8d503b4a07b747e04f3a75cdfe9a59fd6fea5821c72f7ebe99e1d7b0f
|
def unbind_ext_s(self):
'Added to extend FakeLdap as connector class.'
pass
|
Added to extend FakeLdap as connector class.
|
keystone/tests/unit/fakeldap.py
|
unbind_ext_s
|
knikolla/keystone
| 615 |
python
|
def unbind_ext_s(self):
pass
|
def unbind_ext_s(self):
pass<|docstring|>Added to extend FakeLdap as connector class.<|endoftext|>
|
d973f384ff5e96b64056b94af40fdfc78c1d20632f7e5bcc06800445afada701
|
def delete_ext_s(self, dn, serverctrls, clientctrls=None):
'Remove the ldap object at specified dn.'
if server_fail:
raise ldap.SERVER_DOWN
try:
children = self._getChildren(dn)
if children:
raise ldap.NOT_ALLOWED_ON_NONLEAF
except KeyError:
LOG.debug('delete item failed: dn=%s not found.', dn)
raise ldap.NO_SUCH_OBJECT
super(FakeLdapNoSubtreeDelete, self).delete_ext_s(dn, serverctrls, clientctrls)
|
Remove the ldap object at specified dn.
|
keystone/tests/unit/fakeldap.py
|
delete_ext_s
|
knikolla/keystone
| 615 |
python
|
def delete_ext_s(self, dn, serverctrls, clientctrls=None):
if server_fail:
raise ldap.SERVER_DOWN
try:
children = self._getChildren(dn)
if children:
raise ldap.NOT_ALLOWED_ON_NONLEAF
except KeyError:
LOG.debug('delete item failed: dn=%s not found.', dn)
raise ldap.NO_SUCH_OBJECT
super(FakeLdapNoSubtreeDelete, self).delete_ext_s(dn, serverctrls, clientctrls)
|
def delete_ext_s(self, dn, serverctrls, clientctrls=None):
if server_fail:
raise ldap.SERVER_DOWN
try:
children = self._getChildren(dn)
if children:
raise ldap.NOT_ALLOWED_ON_NONLEAF
except KeyError:
LOG.debug('delete item failed: dn=%s not found.', dn)
raise ldap.NO_SUCH_OBJECT
super(FakeLdapNoSubtreeDelete, self).delete_ext_s(dn, serverctrls, clientctrls)<|docstring|>Remove the ldap object at specified dn.<|endoftext|>
|
b94c8ab9a7b63f87f37dfe0c85d28acb2c2904eb13a98ad15d02079d96fbe10c
|
@fill_doc
def fetch_atlas_difumo(dimension=64, resolution_mm=2, data_dir=None, resume=True, verbose=1):
"Fetch DiFuMo brain atlas\n\n Dictionaries of Functional Modes, or “DiFuMo”, can serve as atlases to extract\n functional signals with different dimensionalities (64, 128, 256, 512, and 1024).\n These modes are optimized to represent well raw BOLD timeseries,\n over a with range of experimental conditions.\n See :footcite:`DADI2020117126`.\n\n .. versionadded:: 0.7.1\n\n Notes\n -----\n Direct download links from OSF:\n\n - 64: https://osf.io/pqu9r/download\n - 128: https://osf.io/wjvd5/download\n - 256: https://osf.io/3vrct/download\n - 512: https://osf.io/9b76y/download\n - 1024: https://osf.io/34792/download\n\n Parameters\n ----------\n dimension : int, optional\n Number of dimensions in the dictionary. Valid resolutions\n available are {64, 128, 256, 512, 1024}.\n Default=64.\n\n resolution_mm : int, optional\n The resolution in mm of the atlas to fetch. Valid options\n available are {2, 3}. Default=2mm.\n %(data_dir)s\n %(resume)s\n %(verbose)s\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n Dictionary-like object, the interest attributes are :\n\n - 'maps': str, 4D path to nifti file containing regions definition.\n - 'labels': Numpy recarray containing the labels of the regions.\n - 'description': str, general description of the dataset.\n\n References\n ----------\n .. footbibliography::\n\n "
dic = {64: 'pqu9r', 128: 'wjvd5', 256: '3vrct', 512: '9b76y', 1024: '34792'}
valid_dimensions = [64, 128, 256, 512, 1024]
valid_resolution_mm = [2, 3]
if (dimension not in valid_dimensions):
raise ValueError('Requested dimension={} is not available. Valid options: {}'.format(dimension, valid_dimensions))
if (resolution_mm not in valid_resolution_mm):
raise ValueError('Requested resolution_mm={} is not available. Valid options: {}'.format(resolution_mm, valid_resolution_mm))
url = 'https://osf.io/{}/download'.format(dic[dimension])
opts = {'uncompress': True}
csv_file = os.path.join('{0}', 'labels_{0}_dictionary.csv')
if (resolution_mm != 3):
nifti_file = os.path.join('{0}', '2mm', 'maps.nii.gz')
else:
nifti_file = os.path.join('{0}', '3mm', 'maps.nii.gz')
files = [(csv_file.format(dimension), url, opts), (nifti_file.format(dimension), url, opts)]
dataset_name = 'difumo_atlases'
data_dir = _get_dataset_dir(dataset_name=dataset_name, data_dir=data_dir, verbose=verbose)
files_ = _fetch_files(data_dir, files, verbose=verbose)
labels = np.recfromcsv(files_[0])
readme_files = [('README.md', 'https://osf.io/4k9bf/download', {'move': 'README.md'})]
if (not os.path.exists(os.path.join(data_dir, 'README.md'))):
_fetch_files(data_dir, readme_files, verbose=verbose)
fdescr = _get_dataset_descr(dataset_name)
params = dict(description=fdescr, maps=files_[1], labels=labels)
return Bunch(**params)
|
Fetch DiFuMo brain atlas
Dictionaries of Functional Modes, or “DiFuMo”, can serve as atlases to extract
functional signals with different dimensionalities (64, 128, 256, 512, and 1024).
These modes are optimized to represent well raw BOLD timeseries,
over a with range of experimental conditions.
See :footcite:`DADI2020117126`.
.. versionadded:: 0.7.1
Notes
-----
Direct download links from OSF:
- 64: https://osf.io/pqu9r/download
- 128: https://osf.io/wjvd5/download
- 256: https://osf.io/3vrct/download
- 512: https://osf.io/9b76y/download
- 1024: https://osf.io/34792/download
Parameters
----------
dimension : int, optional
Number of dimensions in the dictionary. Valid resolutions
available are {64, 128, 256, 512, 1024}.
Default=64.
resolution_mm : int, optional
The resolution in mm of the atlas to fetch. Valid options
available are {2, 3}. Default=2mm.
%(data_dir)s
%(resume)s
%(verbose)s
Returns
-------
data : sklearn.datasets.base.Bunch
Dictionary-like object, the interest attributes are :
- 'maps': str, 4D path to nifti file containing regions definition.
- 'labels': Numpy recarray containing the labels of the regions.
- 'description': str, general description of the dataset.
References
----------
.. footbibliography::
|
nilearn/datasets/atlas.py
|
fetch_atlas_difumo
|
lemiceterieux/nilearn
| 827 |
python
|
@fill_doc
def fetch_atlas_difumo(dimension=64, resolution_mm=2, data_dir=None, resume=True, verbose=1):
"Fetch DiFuMo brain atlas\n\n Dictionaries of Functional Modes, or “DiFuMo”, can serve as atlases to extract\n functional signals with different dimensionalities (64, 128, 256, 512, and 1024).\n These modes are optimized to represent well raw BOLD timeseries,\n over a with range of experimental conditions.\n See :footcite:`DADI2020117126`.\n\n .. versionadded:: 0.7.1\n\n Notes\n -----\n Direct download links from OSF:\n\n - 64: https://osf.io/pqu9r/download\n - 128: https://osf.io/wjvd5/download\n - 256: https://osf.io/3vrct/download\n - 512: https://osf.io/9b76y/download\n - 1024: https://osf.io/34792/download\n\n Parameters\n ----------\n dimension : int, optional\n Number of dimensions in the dictionary. Valid resolutions\n available are {64, 128, 256, 512, 1024}.\n Default=64.\n\n resolution_mm : int, optional\n The resolution in mm of the atlas to fetch. Valid options\n available are {2, 3}. Default=2mm.\n %(data_dir)s\n %(resume)s\n %(verbose)s\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n Dictionary-like object, the interest attributes are :\n\n - 'maps': str, 4D path to nifti file containing regions definition.\n - 'labels': Numpy recarray containing the labels of the regions.\n - 'description': str, general description of the dataset.\n\n References\n ----------\n .. footbibliography::\n\n "
dic = {64: 'pqu9r', 128: 'wjvd5', 256: '3vrct', 512: '9b76y', 1024: '34792'}
valid_dimensions = [64, 128, 256, 512, 1024]
valid_resolution_mm = [2, 3]
if (dimension not in valid_dimensions):
raise ValueError('Requested dimension={} is not available. Valid options: {}'.format(dimension, valid_dimensions))
if (resolution_mm not in valid_resolution_mm):
raise ValueError('Requested resolution_mm={} is not available. Valid options: {}'.format(resolution_mm, valid_resolution_mm))
url = 'https://osf.io/{}/download'.format(dic[dimension])
opts = {'uncompress': True}
csv_file = os.path.join('{0}', 'labels_{0}_dictionary.csv')
if (resolution_mm != 3):
nifti_file = os.path.join('{0}', '2mm', 'maps.nii.gz')
else:
nifti_file = os.path.join('{0}', '3mm', 'maps.nii.gz')
files = [(csv_file.format(dimension), url, opts), (nifti_file.format(dimension), url, opts)]
dataset_name = 'difumo_atlases'
data_dir = _get_dataset_dir(dataset_name=dataset_name, data_dir=data_dir, verbose=verbose)
files_ = _fetch_files(data_dir, files, verbose=verbose)
labels = np.recfromcsv(files_[0])
readme_files = [('README.md', 'https://osf.io/4k9bf/download', {'move': 'README.md'})]
if (not os.path.exists(os.path.join(data_dir, 'README.md'))):
_fetch_files(data_dir, readme_files, verbose=verbose)
fdescr = _get_dataset_descr(dataset_name)
params = dict(description=fdescr, maps=files_[1], labels=labels)
return Bunch(**params)
|
@fill_doc
def fetch_atlas_difumo(dimension=64, resolution_mm=2, data_dir=None, resume=True, verbose=1):
"Fetch DiFuMo brain atlas\n\n Dictionaries of Functional Modes, or “DiFuMo”, can serve as atlases to extract\n functional signals with different dimensionalities (64, 128, 256, 512, and 1024).\n These modes are optimized to represent well raw BOLD timeseries,\n over a with range of experimental conditions.\n See :footcite:`DADI2020117126`.\n\n .. versionadded:: 0.7.1\n\n Notes\n -----\n Direct download links from OSF:\n\n - 64: https://osf.io/pqu9r/download\n - 128: https://osf.io/wjvd5/download\n - 256: https://osf.io/3vrct/download\n - 512: https://osf.io/9b76y/download\n - 1024: https://osf.io/34792/download\n\n Parameters\n ----------\n dimension : int, optional\n Number of dimensions in the dictionary. Valid resolutions\n available are {64, 128, 256, 512, 1024}.\n Default=64.\n\n resolution_mm : int, optional\n The resolution in mm of the atlas to fetch. Valid options\n available are {2, 3}. Default=2mm.\n %(data_dir)s\n %(resume)s\n %(verbose)s\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n Dictionary-like object, the interest attributes are :\n\n - 'maps': str, 4D path to nifti file containing regions definition.\n - 'labels': Numpy recarray containing the labels of the regions.\n - 'description': str, general description of the dataset.\n\n References\n ----------\n .. footbibliography::\n\n "
dic = {64: 'pqu9r', 128: 'wjvd5', 256: '3vrct', 512: '9b76y', 1024: '34792'}
valid_dimensions = [64, 128, 256, 512, 1024]
valid_resolution_mm = [2, 3]
if (dimension not in valid_dimensions):
raise ValueError('Requested dimension={} is not available. Valid options: {}'.format(dimension, valid_dimensions))
if (resolution_mm not in valid_resolution_mm):
raise ValueError('Requested resolution_mm={} is not available. Valid options: {}'.format(resolution_mm, valid_resolution_mm))
url = 'https://osf.io/{}/download'.format(dic[dimension])
opts = {'uncompress': True}
csv_file = os.path.join('{0}', 'labels_{0}_dictionary.csv')
if (resolution_mm != 3):
nifti_file = os.path.join('{0}', '2mm', 'maps.nii.gz')
else:
nifti_file = os.path.join('{0}', '3mm', 'maps.nii.gz')
files = [(csv_file.format(dimension), url, opts), (nifti_file.format(dimension), url, opts)]
dataset_name = 'difumo_atlases'
data_dir = _get_dataset_dir(dataset_name=dataset_name, data_dir=data_dir, verbose=verbose)
files_ = _fetch_files(data_dir, files, verbose=verbose)
labels = np.recfromcsv(files_[0])
readme_files = [('README.md', 'https://osf.io/4k9bf/download', {'move': 'README.md'})]
if (not os.path.exists(os.path.join(data_dir, 'README.md'))):
_fetch_files(data_dir, readme_files, verbose=verbose)
fdescr = _get_dataset_descr(dataset_name)
params = dict(description=fdescr, maps=files_[1], labels=labels)
return Bunch(**params)<|docstring|>Fetch DiFuMo brain atlas
Dictionaries of Functional Modes, or “DiFuMo”, can serve as atlases to extract
functional signals with different dimensionalities (64, 128, 256, 512, and 1024).
These modes are optimized to represent well raw BOLD timeseries,
over a with range of experimental conditions.
See :footcite:`DADI2020117126`.
.. versionadded:: 0.7.1
Notes
-----
Direct download links from OSF:
- 64: https://osf.io/pqu9r/download
- 128: https://osf.io/wjvd5/download
- 256: https://osf.io/3vrct/download
- 512: https://osf.io/9b76y/download
- 1024: https://osf.io/34792/download
Parameters
----------
dimension : int, optional
Number of dimensions in the dictionary. Valid resolutions
available are {64, 128, 256, 512, 1024}.
Default=64.
resolution_mm : int, optional
The resolution in mm of the atlas to fetch. Valid options
available are {2, 3}. Default=2mm.
%(data_dir)s
%(resume)s
%(verbose)s
Returns
-------
data : sklearn.datasets.base.Bunch
Dictionary-like object, the interest attributes are :
- 'maps': str, 4D path to nifti file containing regions definition.
- 'labels': Numpy recarray containing the labels of the regions.
- 'description': str, general description of the dataset.
References
----------
.. footbibliography::<|endoftext|>
|
31a3e66515d0f75c7d01d9359c0a811861f8b348ffe15e3da424d22b1cc8b12c
|
@fill_doc
def fetch_atlas_craddock_2012(data_dir=None, url=None, resume=True, verbose=1):
'Download and return file names for the Craddock 2012 parcellation\n\n The provided images are in MNI152 space.\n\n See :footcite:`CreativeCommons` for the licence.\n\n See :footcite:`craddock2012whole` and :footcite:`nitrcClusterROI`\n for more information on this parcellation.\n\n Parameters\n ----------\n %(data_dir)s\n %(url)s\n %(resume)s\n %(verbose)s\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n Dictionary-like object, keys are:\n scorr_mean, tcorr_mean,\n scorr_2level, tcorr_2level,\n random\n\n References\n ----------\n .. footbibliography::\n\n '
if (url is None):
url = 'ftp://www.nitrc.org/home/groups/cluster_roi/htdocs/Parcellations/craddock_2011_parcellations.tar.gz'
opts = {'uncompress': True}
dataset_name = 'craddock_2012'
keys = ('scorr_mean', 'tcorr_mean', 'scorr_2level', 'tcorr_2level', 'random')
filenames = [('scorr05_mean_all.nii.gz', url, opts), ('tcorr05_mean_all.nii.gz', url, opts), ('scorr05_2level_all.nii.gz', url, opts), ('tcorr05_2level_all.nii.gz', url, opts), ('random_all.nii.gz', url, opts)]
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose)
sub_files = _fetch_files(data_dir, filenames, resume=resume, verbose=verbose)
fdescr = _get_dataset_descr(dataset_name)
params = dict(([('description', fdescr)] + list(zip(keys, sub_files))))
return Bunch(**params)
|
Download and return file names for the Craddock 2012 parcellation
The provided images are in MNI152 space.
See :footcite:`CreativeCommons` for the licence.
See :footcite:`craddock2012whole` and :footcite:`nitrcClusterROI`
for more information on this parcellation.
Parameters
----------
%(data_dir)s
%(url)s
%(resume)s
%(verbose)s
Returns
-------
data : sklearn.datasets.base.Bunch
Dictionary-like object, keys are:
scorr_mean, tcorr_mean,
scorr_2level, tcorr_2level,
random
References
----------
.. footbibliography::
|
nilearn/datasets/atlas.py
|
fetch_atlas_craddock_2012
|
lemiceterieux/nilearn
| 827 |
python
|
@fill_doc
def fetch_atlas_craddock_2012(data_dir=None, url=None, resume=True, verbose=1):
'Download and return file names for the Craddock 2012 parcellation\n\n The provided images are in MNI152 space.\n\n See :footcite:`CreativeCommons` for the licence.\n\n See :footcite:`craddock2012whole` and :footcite:`nitrcClusterROI`\n for more information on this parcellation.\n\n Parameters\n ----------\n %(data_dir)s\n %(url)s\n %(resume)s\n %(verbose)s\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n Dictionary-like object, keys are:\n scorr_mean, tcorr_mean,\n scorr_2level, tcorr_2level,\n random\n\n References\n ----------\n .. footbibliography::\n\n '
if (url is None):
url = 'ftp://www.nitrc.org/home/groups/cluster_roi/htdocs/Parcellations/craddock_2011_parcellations.tar.gz'
opts = {'uncompress': True}
dataset_name = 'craddock_2012'
keys = ('scorr_mean', 'tcorr_mean', 'scorr_2level', 'tcorr_2level', 'random')
filenames = [('scorr05_mean_all.nii.gz', url, opts), ('tcorr05_mean_all.nii.gz', url, opts), ('scorr05_2level_all.nii.gz', url, opts), ('tcorr05_2level_all.nii.gz', url, opts), ('random_all.nii.gz', url, opts)]
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose)
sub_files = _fetch_files(data_dir, filenames, resume=resume, verbose=verbose)
fdescr = _get_dataset_descr(dataset_name)
params = dict(([('description', fdescr)] + list(zip(keys, sub_files))))
return Bunch(**params)
|
@fill_doc
def fetch_atlas_craddock_2012(data_dir=None, url=None, resume=True, verbose=1):
'Download and return file names for the Craddock 2012 parcellation\n\n The provided images are in MNI152 space.\n\n See :footcite:`CreativeCommons` for the licence.\n\n See :footcite:`craddock2012whole` and :footcite:`nitrcClusterROI`\n for more information on this parcellation.\n\n Parameters\n ----------\n %(data_dir)s\n %(url)s\n %(resume)s\n %(verbose)s\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n Dictionary-like object, keys are:\n scorr_mean, tcorr_mean,\n scorr_2level, tcorr_2level,\n random\n\n References\n ----------\n .. footbibliography::\n\n '
if (url is None):
url = 'ftp://www.nitrc.org/home/groups/cluster_roi/htdocs/Parcellations/craddock_2011_parcellations.tar.gz'
opts = {'uncompress': True}
dataset_name = 'craddock_2012'
keys = ('scorr_mean', 'tcorr_mean', 'scorr_2level', 'tcorr_2level', 'random')
filenames = [('scorr05_mean_all.nii.gz', url, opts), ('tcorr05_mean_all.nii.gz', url, opts), ('scorr05_2level_all.nii.gz', url, opts), ('tcorr05_2level_all.nii.gz', url, opts), ('random_all.nii.gz', url, opts)]
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose)
sub_files = _fetch_files(data_dir, filenames, resume=resume, verbose=verbose)
fdescr = _get_dataset_descr(dataset_name)
params = dict(([('description', fdescr)] + list(zip(keys, sub_files))))
return Bunch(**params)<|docstring|>Download and return file names for the Craddock 2012 parcellation
The provided images are in MNI152 space.
See :footcite:`CreativeCommons` for the licence.
See :footcite:`craddock2012whole` and :footcite:`nitrcClusterROI`
for more information on this parcellation.
Parameters
----------
%(data_dir)s
%(url)s
%(resume)s
%(verbose)s
Returns
-------
data : sklearn.datasets.base.Bunch
Dictionary-like object, keys are:
scorr_mean, tcorr_mean,
scorr_2level, tcorr_2level,
random
References
----------
.. footbibliography::<|endoftext|>
|
66ce3b008a8454f87820ab5d66c12c94dd3c8451dfb57fb4bce2f05fa3b70a5c
|
@fill_doc
def fetch_atlas_destrieux_2009(lateralized=True, data_dir=None, url=None, resume=True, verbose=1):
'Download and load the Destrieux cortical atlas (dated 2009)\n\n see :footcite:`Fischl2004Automatically`,\n and :footcite:`Destrieux2009sulcal`.\n\n Parameters\n ----------\n lateralized : boolean, optional\n If True, returns an atlas with distinct regions for right and left\n hemispheres. Default=True.\n %(data_dir)s\n %(url)s\n %(resume)s\n %(verbose)s\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n Dictionary-like object, contains:\n\n - Cortical ROIs, lateralized or not (maps)\n - Labels of the ROIs (labels)\n\n References\n ----------\n .. footbibliography::\n\n '
if (url is None):
url = 'https://www.nitrc.org/frs/download.php/11942/'
url += 'destrieux2009.tgz'
opts = {'uncompress': True}
lat = ('_lateralized' if lateralized else '')
files = [((('destrieux2009_rois_labels' + lat) + '.csv'), url, opts), ((('destrieux2009_rois' + lat) + '.nii.gz'), url, opts), ('destrieux2009.rst', url, opts)]
dataset_name = 'destrieux_2009'
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose)
files_ = _fetch_files(data_dir, files, resume=resume, verbose=verbose)
params = dict(maps=files_[1], labels=np.recfromcsv(files_[0]))
with open(files_[2], 'r') as rst_file:
params['description'] = rst_file.read()
return Bunch(**params)
|
Download and load the Destrieux cortical atlas (dated 2009)
see :footcite:`Fischl2004Automatically`,
and :footcite:`Destrieux2009sulcal`.
Parameters
----------
lateralized : boolean, optional
If True, returns an atlas with distinct regions for right and left
hemispheres. Default=True.
%(data_dir)s
%(url)s
%(resume)s
%(verbose)s
Returns
-------
data : sklearn.datasets.base.Bunch
Dictionary-like object, contains:
- Cortical ROIs, lateralized or not (maps)
- Labels of the ROIs (labels)
References
----------
.. footbibliography::
|
nilearn/datasets/atlas.py
|
fetch_atlas_destrieux_2009
|
lemiceterieux/nilearn
| 827 |
python
|
@fill_doc
def fetch_atlas_destrieux_2009(lateralized=True, data_dir=None, url=None, resume=True, verbose=1):
'Download and load the Destrieux cortical atlas (dated 2009)\n\n see :footcite:`Fischl2004Automatically`,\n and :footcite:`Destrieux2009sulcal`.\n\n Parameters\n ----------\n lateralized : boolean, optional\n If True, returns an atlas with distinct regions for right and left\n hemispheres. Default=True.\n %(data_dir)s\n %(url)s\n %(resume)s\n %(verbose)s\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n Dictionary-like object, contains:\n\n - Cortical ROIs, lateralized or not (maps)\n - Labels of the ROIs (labels)\n\n References\n ----------\n .. footbibliography::\n\n '
if (url is None):
url = 'https://www.nitrc.org/frs/download.php/11942/'
url += 'destrieux2009.tgz'
opts = {'uncompress': True}
lat = ('_lateralized' if lateralized else )
files = [((('destrieux2009_rois_labels' + lat) + '.csv'), url, opts), ((('destrieux2009_rois' + lat) + '.nii.gz'), url, opts), ('destrieux2009.rst', url, opts)]
dataset_name = 'destrieux_2009'
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose)
files_ = _fetch_files(data_dir, files, resume=resume, verbose=verbose)
params = dict(maps=files_[1], labels=np.recfromcsv(files_[0]))
with open(files_[2], 'r') as rst_file:
params['description'] = rst_file.read()
return Bunch(**params)
|
@fill_doc
def fetch_atlas_destrieux_2009(lateralized=True, data_dir=None, url=None, resume=True, verbose=1):
'Download and load the Destrieux cortical atlas (dated 2009)\n\n see :footcite:`Fischl2004Automatically`,\n and :footcite:`Destrieux2009sulcal`.\n\n Parameters\n ----------\n lateralized : boolean, optional\n If True, returns an atlas with distinct regions for right and left\n hemispheres. Default=True.\n %(data_dir)s\n %(url)s\n %(resume)s\n %(verbose)s\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n Dictionary-like object, contains:\n\n - Cortical ROIs, lateralized or not (maps)\n - Labels of the ROIs (labels)\n\n References\n ----------\n .. footbibliography::\n\n '
if (url is None):
url = 'https://www.nitrc.org/frs/download.php/11942/'
url += 'destrieux2009.tgz'
opts = {'uncompress': True}
lat = ('_lateralized' if lateralized else )
files = [((('destrieux2009_rois_labels' + lat) + '.csv'), url, opts), ((('destrieux2009_rois' + lat) + '.nii.gz'), url, opts), ('destrieux2009.rst', url, opts)]
dataset_name = 'destrieux_2009'
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose)
files_ = _fetch_files(data_dir, files, resume=resume, verbose=verbose)
params = dict(maps=files_[1], labels=np.recfromcsv(files_[0]))
with open(files_[2], 'r') as rst_file:
params['description'] = rst_file.read()
return Bunch(**params)<|docstring|>Download and load the Destrieux cortical atlas (dated 2009)
see :footcite:`Fischl2004Automatically`,
and :footcite:`Destrieux2009sulcal`.
Parameters
----------
lateralized : boolean, optional
If True, returns an atlas with distinct regions for right and left
hemispheres. Default=True.
%(data_dir)s
%(url)s
%(resume)s
%(verbose)s
Returns
-------
data : sklearn.datasets.base.Bunch
Dictionary-like object, contains:
- Cortical ROIs, lateralized or not (maps)
- Labels of the ROIs (labels)
References
----------
.. footbibliography::<|endoftext|>
|
2efbfa03423e28826cf3085f8ece7c148a69ddbc17ca26788005848d1bcce979
|
@fill_doc
def fetch_atlas_harvard_oxford(atlas_name, data_dir=None, symmetric_split=False, resume=True, verbose=1):
'Load Harvard-Oxford parcellations from FSL.\n\n This function downloads Harvard Oxford atlas packaged from FSL 5.0\n and stores atlases in NILEARN_DATA folder in home directory.\n\n This function can also load Harvard Oxford atlas from your local directory\n specified by your FSL installed path given in `data_dir` argument.\n See documentation for details.\n\n Parameters\n ----------\n atlas_name : string\n Name of atlas to load. Can be:\n cort-maxprob-thr0-1mm, cort-maxprob-thr0-2mm,\n cort-maxprob-thr25-1mm, cort-maxprob-thr25-2mm,\n cort-maxprob-thr50-1mm, cort-maxprob-thr50-2mm,\n cort-prob-1mm, cort-prob-2mm,\n cortl-maxprob-thr0-1mm, cortl-maxprob-thr0-2mm,\n cortl-maxprob-thr25-1mm, cortl-maxprob-thr25-2mm,\n cortl-maxprob-thr50-1mm, cortl-maxprob-thr50-2mm,\n cortl-prob-1mm, cortl-prob-2mm,\n sub-maxprob-thr0-1mm, sub-maxprob-thr0-2mm,\n sub-maxprob-thr25-1mm, sub-maxprob-thr25-2mm,\n sub-maxprob-thr50-1mm, sub-maxprob-thr50-2mm,\n sub-prob-1mm, sub-prob-2mm\n\n data_dir : string, optional\n Path of data directory where data will be stored. Optionally,\n it can also be a FSL installation directory (which is dependent\n on your installation).\n Example, if FSL is installed in /usr/share/fsl/ then\n specifying as \'/usr/share/\' can get you Harvard Oxford atlas\n from your installed directory. Since we mimic same root directory\n as FSL to load it easily from your installation.\n\n symmetric_split : bool, optional\n If True, lateralized atlases of cort or sub with maxprob will be\n returned. For subcortical types (sub-maxprob), we split every\n symmetric region in left and right parts. Effectively doubles the\n number of regions.\n\n .. note::\n Not implemented for full probabilistic atlas (*-prob-* atlases).\n\n Default=False.\n %(resume)s\n %(verbose)s\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n Dictionary-like object, keys are:\n\n - "maps": nibabel.Nifti1Image, 4D maps if a probabilistic atlas is\n requested and 3D labels if a maximum probabilistic atlas was\n requested.\n\n - "labels": string list, labels of the regions in the atlas.\n\n See also\n --------\n nilearn.datasets.fetch_atlas_juelich\n\n '
atlases = ['cort-maxprob-thr0-1mm', 'cort-maxprob-thr0-2mm', 'cort-maxprob-thr25-1mm', 'cort-maxprob-thr25-2mm', 'cort-maxprob-thr50-1mm', 'cort-maxprob-thr50-2mm', 'cort-prob-1mm', 'cort-prob-2mm', 'cortl-maxprob-thr0-1mm', 'cortl-maxprob-thr0-2mm', 'cortl-maxprob-thr25-1mm', 'cortl-maxprob-thr25-2mm', 'cortl-maxprob-thr50-1mm', 'cortl-maxprob-thr50-2mm', 'cortl-prob-1mm', 'cortl-prob-2mm', 'sub-maxprob-thr0-1mm', 'sub-maxprob-thr0-2mm', 'sub-maxprob-thr25-1mm', 'sub-maxprob-thr25-2mm', 'sub-maxprob-thr50-1mm', 'sub-maxprob-thr50-2mm', 'sub-prob-1mm', 'sub-prob-2mm']
if (atlas_name not in atlases):
raise ValueError('Invalid atlas name: {0}. Please choose an atlas among:\n{1}'.format(atlas_name, '\n'.join(atlases)))
is_probabilistic = ('-prob-' in atlas_name)
if (is_probabilistic and symmetric_split):
raise ValueError('Region splitting not supported for probabilistic atlases')
(atlas_img, names, is_lateralized) = _get_atlas_data_and_labels('HarvardOxford', atlas_name, symmetric_split=symmetric_split, data_dir=data_dir, resume=resume, verbose=verbose)
atlas_niimg = check_niimg(atlas_img)
if ((not symmetric_split) or is_lateralized):
return Bunch(filename=atlas_img, maps=atlas_niimg, labels=names)
(new_atlas_data, new_names) = _compute_symmetric_split('HarvardOxford', atlas_niimg, names)
new_atlas_niimg = new_img_like(atlas_niimg, new_atlas_data, atlas_niimg.affine)
return Bunch(filename=atlas_img, maps=new_atlas_niimg, labels=new_names)
|
Load Harvard-Oxford parcellations from FSL.
This function downloads Harvard Oxford atlas packaged from FSL 5.0
and stores atlases in NILEARN_DATA folder in home directory.
This function can also load Harvard Oxford atlas from your local directory
specified by your FSL installed path given in `data_dir` argument.
See documentation for details.
Parameters
----------
atlas_name : string
Name of atlas to load. Can be:
cort-maxprob-thr0-1mm, cort-maxprob-thr0-2mm,
cort-maxprob-thr25-1mm, cort-maxprob-thr25-2mm,
cort-maxprob-thr50-1mm, cort-maxprob-thr50-2mm,
cort-prob-1mm, cort-prob-2mm,
cortl-maxprob-thr0-1mm, cortl-maxprob-thr0-2mm,
cortl-maxprob-thr25-1mm, cortl-maxprob-thr25-2mm,
cortl-maxprob-thr50-1mm, cortl-maxprob-thr50-2mm,
cortl-prob-1mm, cortl-prob-2mm,
sub-maxprob-thr0-1mm, sub-maxprob-thr0-2mm,
sub-maxprob-thr25-1mm, sub-maxprob-thr25-2mm,
sub-maxprob-thr50-1mm, sub-maxprob-thr50-2mm,
sub-prob-1mm, sub-prob-2mm
data_dir : string, optional
Path of data directory where data will be stored. Optionally,
it can also be a FSL installation directory (which is dependent
on your installation).
Example, if FSL is installed in /usr/share/fsl/ then
specifying as '/usr/share/' can get you Harvard Oxford atlas
from your installed directory. Since we mimic same root directory
as FSL to load it easily from your installation.
symmetric_split : bool, optional
If True, lateralized atlases of cort or sub with maxprob will be
returned. For subcortical types (sub-maxprob), we split every
symmetric region in left and right parts. Effectively doubles the
number of regions.
.. note::
Not implemented for full probabilistic atlas (*-prob-* atlases).
Default=False.
%(resume)s
%(verbose)s
Returns
-------
data : sklearn.datasets.base.Bunch
Dictionary-like object, keys are:
- "maps": nibabel.Nifti1Image, 4D maps if a probabilistic atlas is
requested and 3D labels if a maximum probabilistic atlas was
requested.
- "labels": string list, labels of the regions in the atlas.
See also
--------
nilearn.datasets.fetch_atlas_juelich
|
nilearn/datasets/atlas.py
|
fetch_atlas_harvard_oxford
|
lemiceterieux/nilearn
| 827 |
python
|
@fill_doc
def fetch_atlas_harvard_oxford(atlas_name, data_dir=None, symmetric_split=False, resume=True, verbose=1):
'Load Harvard-Oxford parcellations from FSL.\n\n This function downloads Harvard Oxford atlas packaged from FSL 5.0\n and stores atlases in NILEARN_DATA folder in home directory.\n\n This function can also load Harvard Oxford atlas from your local directory\n specified by your FSL installed path given in `data_dir` argument.\n See documentation for details.\n\n Parameters\n ----------\n atlas_name : string\n Name of atlas to load. Can be:\n cort-maxprob-thr0-1mm, cort-maxprob-thr0-2mm,\n cort-maxprob-thr25-1mm, cort-maxprob-thr25-2mm,\n cort-maxprob-thr50-1mm, cort-maxprob-thr50-2mm,\n cort-prob-1mm, cort-prob-2mm,\n cortl-maxprob-thr0-1mm, cortl-maxprob-thr0-2mm,\n cortl-maxprob-thr25-1mm, cortl-maxprob-thr25-2mm,\n cortl-maxprob-thr50-1mm, cortl-maxprob-thr50-2mm,\n cortl-prob-1mm, cortl-prob-2mm,\n sub-maxprob-thr0-1mm, sub-maxprob-thr0-2mm,\n sub-maxprob-thr25-1mm, sub-maxprob-thr25-2mm,\n sub-maxprob-thr50-1mm, sub-maxprob-thr50-2mm,\n sub-prob-1mm, sub-prob-2mm\n\n data_dir : string, optional\n Path of data directory where data will be stored. Optionally,\n it can also be a FSL installation directory (which is dependent\n on your installation).\n Example, if FSL is installed in /usr/share/fsl/ then\n specifying as \'/usr/share/\' can get you Harvard Oxford atlas\n from your installed directory. Since we mimic same root directory\n as FSL to load it easily from your installation.\n\n symmetric_split : bool, optional\n If True, lateralized atlases of cort or sub with maxprob will be\n returned. For subcortical types (sub-maxprob), we split every\n symmetric region in left and right parts. Effectively doubles the\n number of regions.\n\n .. note::\n Not implemented for full probabilistic atlas (*-prob-* atlases).\n\n Default=False.\n %(resume)s\n %(verbose)s\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n Dictionary-like object, keys are:\n\n - "maps": nibabel.Nifti1Image, 4D maps if a probabilistic atlas is\n requested and 3D labels if a maximum probabilistic atlas was\n requested.\n\n - "labels": string list, labels of the regions in the atlas.\n\n See also\n --------\n nilearn.datasets.fetch_atlas_juelich\n\n '
atlases = ['cort-maxprob-thr0-1mm', 'cort-maxprob-thr0-2mm', 'cort-maxprob-thr25-1mm', 'cort-maxprob-thr25-2mm', 'cort-maxprob-thr50-1mm', 'cort-maxprob-thr50-2mm', 'cort-prob-1mm', 'cort-prob-2mm', 'cortl-maxprob-thr0-1mm', 'cortl-maxprob-thr0-2mm', 'cortl-maxprob-thr25-1mm', 'cortl-maxprob-thr25-2mm', 'cortl-maxprob-thr50-1mm', 'cortl-maxprob-thr50-2mm', 'cortl-prob-1mm', 'cortl-prob-2mm', 'sub-maxprob-thr0-1mm', 'sub-maxprob-thr0-2mm', 'sub-maxprob-thr25-1mm', 'sub-maxprob-thr25-2mm', 'sub-maxprob-thr50-1mm', 'sub-maxprob-thr50-2mm', 'sub-prob-1mm', 'sub-prob-2mm']
if (atlas_name not in atlases):
raise ValueError('Invalid atlas name: {0}. Please choose an atlas among:\n{1}'.format(atlas_name, '\n'.join(atlases)))
is_probabilistic = ('-prob-' in atlas_name)
if (is_probabilistic and symmetric_split):
raise ValueError('Region splitting not supported for probabilistic atlases')
(atlas_img, names, is_lateralized) = _get_atlas_data_and_labels('HarvardOxford', atlas_name, symmetric_split=symmetric_split, data_dir=data_dir, resume=resume, verbose=verbose)
atlas_niimg = check_niimg(atlas_img)
if ((not symmetric_split) or is_lateralized):
return Bunch(filename=atlas_img, maps=atlas_niimg, labels=names)
(new_atlas_data, new_names) = _compute_symmetric_split('HarvardOxford', atlas_niimg, names)
new_atlas_niimg = new_img_like(atlas_niimg, new_atlas_data, atlas_niimg.affine)
return Bunch(filename=atlas_img, maps=new_atlas_niimg, labels=new_names)
|
@fill_doc
def fetch_atlas_harvard_oxford(atlas_name, data_dir=None, symmetric_split=False, resume=True, verbose=1):
'Load Harvard-Oxford parcellations from FSL.\n\n This function downloads Harvard Oxford atlas packaged from FSL 5.0\n and stores atlases in NILEARN_DATA folder in home directory.\n\n This function can also load Harvard Oxford atlas from your local directory\n specified by your FSL installed path given in `data_dir` argument.\n See documentation for details.\n\n Parameters\n ----------\n atlas_name : string\n Name of atlas to load. Can be:\n cort-maxprob-thr0-1mm, cort-maxprob-thr0-2mm,\n cort-maxprob-thr25-1mm, cort-maxprob-thr25-2mm,\n cort-maxprob-thr50-1mm, cort-maxprob-thr50-2mm,\n cort-prob-1mm, cort-prob-2mm,\n cortl-maxprob-thr0-1mm, cortl-maxprob-thr0-2mm,\n cortl-maxprob-thr25-1mm, cortl-maxprob-thr25-2mm,\n cortl-maxprob-thr50-1mm, cortl-maxprob-thr50-2mm,\n cortl-prob-1mm, cortl-prob-2mm,\n sub-maxprob-thr0-1mm, sub-maxprob-thr0-2mm,\n sub-maxprob-thr25-1mm, sub-maxprob-thr25-2mm,\n sub-maxprob-thr50-1mm, sub-maxprob-thr50-2mm,\n sub-prob-1mm, sub-prob-2mm\n\n data_dir : string, optional\n Path of data directory where data will be stored. Optionally,\n it can also be a FSL installation directory (which is dependent\n on your installation).\n Example, if FSL is installed in /usr/share/fsl/ then\n specifying as \'/usr/share/\' can get you Harvard Oxford atlas\n from your installed directory. Since we mimic same root directory\n as FSL to load it easily from your installation.\n\n symmetric_split : bool, optional\n If True, lateralized atlases of cort or sub with maxprob will be\n returned. For subcortical types (sub-maxprob), we split every\n symmetric region in left and right parts. Effectively doubles the\n number of regions.\n\n .. note::\n Not implemented for full probabilistic atlas (*-prob-* atlases).\n\n Default=False.\n %(resume)s\n %(verbose)s\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n Dictionary-like object, keys are:\n\n - "maps": nibabel.Nifti1Image, 4D maps if a probabilistic atlas is\n requested and 3D labels if a maximum probabilistic atlas was\n requested.\n\n - "labels": string list, labels of the regions in the atlas.\n\n See also\n --------\n nilearn.datasets.fetch_atlas_juelich\n\n '
atlases = ['cort-maxprob-thr0-1mm', 'cort-maxprob-thr0-2mm', 'cort-maxprob-thr25-1mm', 'cort-maxprob-thr25-2mm', 'cort-maxprob-thr50-1mm', 'cort-maxprob-thr50-2mm', 'cort-prob-1mm', 'cort-prob-2mm', 'cortl-maxprob-thr0-1mm', 'cortl-maxprob-thr0-2mm', 'cortl-maxprob-thr25-1mm', 'cortl-maxprob-thr25-2mm', 'cortl-maxprob-thr50-1mm', 'cortl-maxprob-thr50-2mm', 'cortl-prob-1mm', 'cortl-prob-2mm', 'sub-maxprob-thr0-1mm', 'sub-maxprob-thr0-2mm', 'sub-maxprob-thr25-1mm', 'sub-maxprob-thr25-2mm', 'sub-maxprob-thr50-1mm', 'sub-maxprob-thr50-2mm', 'sub-prob-1mm', 'sub-prob-2mm']
if (atlas_name not in atlases):
raise ValueError('Invalid atlas name: {0}. Please choose an atlas among:\n{1}'.format(atlas_name, '\n'.join(atlases)))
is_probabilistic = ('-prob-' in atlas_name)
if (is_probabilistic and symmetric_split):
raise ValueError('Region splitting not supported for probabilistic atlases')
(atlas_img, names, is_lateralized) = _get_atlas_data_and_labels('HarvardOxford', atlas_name, symmetric_split=symmetric_split, data_dir=data_dir, resume=resume, verbose=verbose)
atlas_niimg = check_niimg(atlas_img)
if ((not symmetric_split) or is_lateralized):
return Bunch(filename=atlas_img, maps=atlas_niimg, labels=names)
(new_atlas_data, new_names) = _compute_symmetric_split('HarvardOxford', atlas_niimg, names)
new_atlas_niimg = new_img_like(atlas_niimg, new_atlas_data, atlas_niimg.affine)
return Bunch(filename=atlas_img, maps=new_atlas_niimg, labels=new_names)<|docstring|>Load Harvard-Oxford parcellations from FSL.
This function downloads Harvard Oxford atlas packaged from FSL 5.0
and stores atlases in NILEARN_DATA folder in home directory.
This function can also load Harvard Oxford atlas from your local directory
specified by your FSL installed path given in `data_dir` argument.
See documentation for details.
Parameters
----------
atlas_name : string
Name of atlas to load. Can be:
cort-maxprob-thr0-1mm, cort-maxprob-thr0-2mm,
cort-maxprob-thr25-1mm, cort-maxprob-thr25-2mm,
cort-maxprob-thr50-1mm, cort-maxprob-thr50-2mm,
cort-prob-1mm, cort-prob-2mm,
cortl-maxprob-thr0-1mm, cortl-maxprob-thr0-2mm,
cortl-maxprob-thr25-1mm, cortl-maxprob-thr25-2mm,
cortl-maxprob-thr50-1mm, cortl-maxprob-thr50-2mm,
cortl-prob-1mm, cortl-prob-2mm,
sub-maxprob-thr0-1mm, sub-maxprob-thr0-2mm,
sub-maxprob-thr25-1mm, sub-maxprob-thr25-2mm,
sub-maxprob-thr50-1mm, sub-maxprob-thr50-2mm,
sub-prob-1mm, sub-prob-2mm
data_dir : string, optional
Path of data directory where data will be stored. Optionally,
it can also be a FSL installation directory (which is dependent
on your installation).
Example, if FSL is installed in /usr/share/fsl/ then
specifying as '/usr/share/' can get you Harvard Oxford atlas
from your installed directory. Since we mimic same root directory
as FSL to load it easily from your installation.
symmetric_split : bool, optional
If True, lateralized atlases of cort or sub with maxprob will be
returned. For subcortical types (sub-maxprob), we split every
symmetric region in left and right parts. Effectively doubles the
number of regions.
.. note::
Not implemented for full probabilistic atlas (*-prob-* atlases).
Default=False.
%(resume)s
%(verbose)s
Returns
-------
data : sklearn.datasets.base.Bunch
Dictionary-like object, keys are:
- "maps": nibabel.Nifti1Image, 4D maps if a probabilistic atlas is
requested and 3D labels if a maximum probabilistic atlas was
requested.
- "labels": string list, labels of the regions in the atlas.
See also
--------
nilearn.datasets.fetch_atlas_juelich<|endoftext|>
|
09b824a85018999f27dcdc57acd1ec384579ab40d38b8872a4659569402b080e
|
def fetch_atlas_juelich(atlas_name, data_dir=None, symmetric_split=False, resume=True, verbose=1):
'Load Juelich parcellations from FSL.\n\n This function downloads Juelich atlas packaged from FSL 5.0\n and stores atlases in NILEARN_DATA folder in home directory.\n\n This function can also load Juelich atlas from your local directory\n specified by your FSL installed path given in `data_dir` argument.\n See documentation for details.\n\n .. versionadded:: 0.8.1\n\n Parameters\n ----------\n atlas_name : string\n Name of atlas to load. Can be:\n maxprob-thr0-1mm, maxprob-thr0-2mm,\n maxprob-thr25-1mm, maxprob-thr25-2mm,\n maxprob-thr50-1mm, maxprob-thr50-2mm,\n prob-1mm, prob-2mm\n\n data_dir : string, optional\n Path of data directory where data will be stored. Optionally,\n it can also be a FSL installation directory (which is dependent\n on your installation).\n Example, if FSL is installed in /usr/share/fsl/ then\n specifying as \'/usr/share/\' can get you Juelich atlas\n from your installed directory. Since we mimic same root directory\n as FSL to load it easily from your installation.\n\n symmetric_split : bool, optional\n If True, lateralized atlases of cort or sub with maxprob will be\n returned. For subcortical types (sub-maxprob), we split every\n symmetric region in left and right parts. Effectively doubles the\n number of regions.\n NOTE Not implemented for full probabilistic atlas (*-prob-* atlases).\n Default=False.\n\n resume : bool, optional\n Whether to resumed download of a partly-downloaded file.\n Default=True.\n\n verbose : int, optional\n Verbosity level (0 means no message). Default=1.\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n Dictionary-like object, keys are:\n\n - "maps": nibabel.Nifti1Image, 4D maps if a probabilistic atlas is\n requested and 3D labels if a maximum probabilistic atlas was\n requested.\n\n - "labels": string list, labels of the regions in the atlas.\n\n See also\n --------\n nilearn.datasets.fetch_atlas_harvard_oxford\n\n '
atlases = ['maxprob-thr0-1mm', 'maxprob-thr0-2mm', 'maxprob-thr25-1mm', 'maxprob-thr25-2mm', 'maxprob-thr50-1mm', 'maxprob-thr50-2mm', 'prob-1mm', 'prob-2mm']
if (atlas_name not in atlases):
raise ValueError('Invalid atlas name: {0}. Please choose an atlas among:\n{1}'.format(atlas_name, '\n'.join(atlases)))
is_probabilistic = atlas_name.startswith('prob-')
if (is_probabilistic and symmetric_split):
raise ValueError('Region splitting not supported for probabilistic atlases')
(atlas_img, names, _) = _get_atlas_data_and_labels('Juelich', atlas_name, data_dir=data_dir, resume=resume, verbose=verbose)
atlas_niimg = check_niimg(atlas_img)
atlas_data = get_data(atlas_niimg)
if is_probabilistic:
(new_atlas_data, new_names) = _merge_probabilistic_maps_juelich(atlas_data, names)
elif symmetric_split:
(new_atlas_data, new_names) = _compute_symmetric_split('Juelich', atlas_niimg, names)
else:
(new_atlas_data, new_names) = _merge_labels_juelich(atlas_data, names)
new_atlas_niimg = new_img_like(atlas_niimg, new_atlas_data, atlas_niimg.affine)
return Bunch(filename=atlas_img, maps=new_atlas_niimg, labels=list(new_names))
|
Load Juelich parcellations from FSL.
This function downloads Juelich atlas packaged from FSL 5.0
and stores atlases in NILEARN_DATA folder in home directory.
This function can also load Juelich atlas from your local directory
specified by your FSL installed path given in `data_dir` argument.
See documentation for details.
.. versionadded:: 0.8.1
Parameters
----------
atlas_name : string
Name of atlas to load. Can be:
maxprob-thr0-1mm, maxprob-thr0-2mm,
maxprob-thr25-1mm, maxprob-thr25-2mm,
maxprob-thr50-1mm, maxprob-thr50-2mm,
prob-1mm, prob-2mm
data_dir : string, optional
Path of data directory where data will be stored. Optionally,
it can also be a FSL installation directory (which is dependent
on your installation).
Example, if FSL is installed in /usr/share/fsl/ then
specifying as '/usr/share/' can get you Juelich atlas
from your installed directory. Since we mimic same root directory
as FSL to load it easily from your installation.
symmetric_split : bool, optional
If True, lateralized atlases of cort or sub with maxprob will be
returned. For subcortical types (sub-maxprob), we split every
symmetric region in left and right parts. Effectively doubles the
number of regions.
NOTE Not implemented for full probabilistic atlas (*-prob-* atlases).
Default=False.
resume : bool, optional
Whether to resumed download of a partly-downloaded file.
Default=True.
verbose : int, optional
Verbosity level (0 means no message). Default=1.
Returns
-------
data : sklearn.datasets.base.Bunch
Dictionary-like object, keys are:
- "maps": nibabel.Nifti1Image, 4D maps if a probabilistic atlas is
requested and 3D labels if a maximum probabilistic atlas was
requested.
- "labels": string list, labels of the regions in the atlas.
See also
--------
nilearn.datasets.fetch_atlas_harvard_oxford
|
nilearn/datasets/atlas.py
|
fetch_atlas_juelich
|
lemiceterieux/nilearn
| 827 |
python
|
def fetch_atlas_juelich(atlas_name, data_dir=None, symmetric_split=False, resume=True, verbose=1):
'Load Juelich parcellations from FSL.\n\n This function downloads Juelich atlas packaged from FSL 5.0\n and stores atlases in NILEARN_DATA folder in home directory.\n\n This function can also load Juelich atlas from your local directory\n specified by your FSL installed path given in `data_dir` argument.\n See documentation for details.\n\n .. versionadded:: 0.8.1\n\n Parameters\n ----------\n atlas_name : string\n Name of atlas to load. Can be:\n maxprob-thr0-1mm, maxprob-thr0-2mm,\n maxprob-thr25-1mm, maxprob-thr25-2mm,\n maxprob-thr50-1mm, maxprob-thr50-2mm,\n prob-1mm, prob-2mm\n\n data_dir : string, optional\n Path of data directory where data will be stored. Optionally,\n it can also be a FSL installation directory (which is dependent\n on your installation).\n Example, if FSL is installed in /usr/share/fsl/ then\n specifying as \'/usr/share/\' can get you Juelich atlas\n from your installed directory. Since we mimic same root directory\n as FSL to load it easily from your installation.\n\n symmetric_split : bool, optional\n If True, lateralized atlases of cort or sub with maxprob will be\n returned. For subcortical types (sub-maxprob), we split every\n symmetric region in left and right parts. Effectively doubles the\n number of regions.\n NOTE Not implemented for full probabilistic atlas (*-prob-* atlases).\n Default=False.\n\n resume : bool, optional\n Whether to resumed download of a partly-downloaded file.\n Default=True.\n\n verbose : int, optional\n Verbosity level (0 means no message). Default=1.\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n Dictionary-like object, keys are:\n\n - "maps": nibabel.Nifti1Image, 4D maps if a probabilistic atlas is\n requested and 3D labels if a maximum probabilistic atlas was\n requested.\n\n - "labels": string list, labels of the regions in the atlas.\n\n See also\n --------\n nilearn.datasets.fetch_atlas_harvard_oxford\n\n '
atlases = ['maxprob-thr0-1mm', 'maxprob-thr0-2mm', 'maxprob-thr25-1mm', 'maxprob-thr25-2mm', 'maxprob-thr50-1mm', 'maxprob-thr50-2mm', 'prob-1mm', 'prob-2mm']
if (atlas_name not in atlases):
raise ValueError('Invalid atlas name: {0}. Please choose an atlas among:\n{1}'.format(atlas_name, '\n'.join(atlases)))
is_probabilistic = atlas_name.startswith('prob-')
if (is_probabilistic and symmetric_split):
raise ValueError('Region splitting not supported for probabilistic atlases')
(atlas_img, names, _) = _get_atlas_data_and_labels('Juelich', atlas_name, data_dir=data_dir, resume=resume, verbose=verbose)
atlas_niimg = check_niimg(atlas_img)
atlas_data = get_data(atlas_niimg)
if is_probabilistic:
(new_atlas_data, new_names) = _merge_probabilistic_maps_juelich(atlas_data, names)
elif symmetric_split:
(new_atlas_data, new_names) = _compute_symmetric_split('Juelich', atlas_niimg, names)
else:
(new_atlas_data, new_names) = _merge_labels_juelich(atlas_data, names)
new_atlas_niimg = new_img_like(atlas_niimg, new_atlas_data, atlas_niimg.affine)
return Bunch(filename=atlas_img, maps=new_atlas_niimg, labels=list(new_names))
|
def fetch_atlas_juelich(atlas_name, data_dir=None, symmetric_split=False, resume=True, verbose=1):
'Load Juelich parcellations from FSL.\n\n This function downloads Juelich atlas packaged from FSL 5.0\n and stores atlases in NILEARN_DATA folder in home directory.\n\n This function can also load Juelich atlas from your local directory\n specified by your FSL installed path given in `data_dir` argument.\n See documentation for details.\n\n .. versionadded:: 0.8.1\n\n Parameters\n ----------\n atlas_name : string\n Name of atlas to load. Can be:\n maxprob-thr0-1mm, maxprob-thr0-2mm,\n maxprob-thr25-1mm, maxprob-thr25-2mm,\n maxprob-thr50-1mm, maxprob-thr50-2mm,\n prob-1mm, prob-2mm\n\n data_dir : string, optional\n Path of data directory where data will be stored. Optionally,\n it can also be a FSL installation directory (which is dependent\n on your installation).\n Example, if FSL is installed in /usr/share/fsl/ then\n specifying as \'/usr/share/\' can get you Juelich atlas\n from your installed directory. Since we mimic same root directory\n as FSL to load it easily from your installation.\n\n symmetric_split : bool, optional\n If True, lateralized atlases of cort or sub with maxprob will be\n returned. For subcortical types (sub-maxprob), we split every\n symmetric region in left and right parts. Effectively doubles the\n number of regions.\n NOTE Not implemented for full probabilistic atlas (*-prob-* atlases).\n Default=False.\n\n resume : bool, optional\n Whether to resumed download of a partly-downloaded file.\n Default=True.\n\n verbose : int, optional\n Verbosity level (0 means no message). Default=1.\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n Dictionary-like object, keys are:\n\n - "maps": nibabel.Nifti1Image, 4D maps if a probabilistic atlas is\n requested and 3D labels if a maximum probabilistic atlas was\n requested.\n\n - "labels": string list, labels of the regions in the atlas.\n\n See also\n --------\n nilearn.datasets.fetch_atlas_harvard_oxford\n\n '
atlases = ['maxprob-thr0-1mm', 'maxprob-thr0-2mm', 'maxprob-thr25-1mm', 'maxprob-thr25-2mm', 'maxprob-thr50-1mm', 'maxprob-thr50-2mm', 'prob-1mm', 'prob-2mm']
if (atlas_name not in atlases):
raise ValueError('Invalid atlas name: {0}. Please choose an atlas among:\n{1}'.format(atlas_name, '\n'.join(atlases)))
is_probabilistic = atlas_name.startswith('prob-')
if (is_probabilistic and symmetric_split):
raise ValueError('Region splitting not supported for probabilistic atlases')
(atlas_img, names, _) = _get_atlas_data_and_labels('Juelich', atlas_name, data_dir=data_dir, resume=resume, verbose=verbose)
atlas_niimg = check_niimg(atlas_img)
atlas_data = get_data(atlas_niimg)
if is_probabilistic:
(new_atlas_data, new_names) = _merge_probabilistic_maps_juelich(atlas_data, names)
elif symmetric_split:
(new_atlas_data, new_names) = _compute_symmetric_split('Juelich', atlas_niimg, names)
else:
(new_atlas_data, new_names) = _merge_labels_juelich(atlas_data, names)
new_atlas_niimg = new_img_like(atlas_niimg, new_atlas_data, atlas_niimg.affine)
return Bunch(filename=atlas_img, maps=new_atlas_niimg, labels=list(new_names))<|docstring|>Load Juelich parcellations from FSL.
This function downloads Juelich atlas packaged from FSL 5.0
and stores atlases in NILEARN_DATA folder in home directory.
This function can also load Juelich atlas from your local directory
specified by your FSL installed path given in `data_dir` argument.
See documentation for details.
.. versionadded:: 0.8.1
Parameters
----------
atlas_name : string
Name of atlas to load. Can be:
maxprob-thr0-1mm, maxprob-thr0-2mm,
maxprob-thr25-1mm, maxprob-thr25-2mm,
maxprob-thr50-1mm, maxprob-thr50-2mm,
prob-1mm, prob-2mm
data_dir : string, optional
Path of data directory where data will be stored. Optionally,
it can also be a FSL installation directory (which is dependent
on your installation).
Example, if FSL is installed in /usr/share/fsl/ then
specifying as '/usr/share/' can get you Juelich atlas
from your installed directory. Since we mimic same root directory
as FSL to load it easily from your installation.
symmetric_split : bool, optional
If True, lateralized atlases of cort or sub with maxprob will be
returned. For subcortical types (sub-maxprob), we split every
symmetric region in left and right parts. Effectively doubles the
number of regions.
NOTE Not implemented for full probabilistic atlas (*-prob-* atlases).
Default=False.
resume : bool, optional
Whether to resumed download of a partly-downloaded file.
Default=True.
verbose : int, optional
Verbosity level (0 means no message). Default=1.
Returns
-------
data : sklearn.datasets.base.Bunch
Dictionary-like object, keys are:
- "maps": nibabel.Nifti1Image, 4D maps if a probabilistic atlas is
requested and 3D labels if a maximum probabilistic atlas was
requested.
- "labels": string list, labels of the regions in the atlas.
See also
--------
nilearn.datasets.fetch_atlas_harvard_oxford<|endoftext|>
|
43a82baeb9d1a80cb7a2d1a8720eee0797d87d008b9d3c7e5a109c2f725069b0
|
def _get_atlas_data_and_labels(atlas_source, atlas_name, symmetric_split=False, data_dir=None, resume=True, verbose=1):
'Helper function for both fetch_atlas_juelich and fetch_atlas_harvard_oxford.\n This function downloads the atlas image and labels.\n '
if (atlas_source == 'Juelich'):
url = 'https://www.nitrc.org/frs/download.php/12096/Juelich.tgz'
elif (atlas_source == 'HarvardOxford'):
url = 'http://www.nitrc.org/frs/download.php/9902/HarvardOxford.tgz'
else:
raise ValueError('Atlas source {} is not valid.'.format(atlas_source))
data_dir = _get_dataset_dir('fsl', data_dir=data_dir, verbose=verbose)
opts = {'uncompress': True}
root = os.path.join('data', 'atlases')
if (atlas_source == 'HarvardOxford'):
if symmetric_split:
atlas_name = atlas_name.replace('cort-max', 'cortl-max')
if atlas_name.startswith('sub-'):
label_file = 'HarvardOxford-Subcortical.xml'
is_lateralized = False
elif atlas_name.startswith('cortl'):
label_file = 'HarvardOxford-Cortical-Lateralized.xml'
is_lateralized = True
else:
label_file = 'HarvardOxford-Cortical.xml'
is_lateralized = False
else:
label_file = 'Juelich.xml'
is_lateralized = False
label_file = os.path.join(root, label_file)
atlas_file = os.path.join(root, atlas_source, '{}-{}.nii.gz'.format(atlas_source, atlas_name))
(atlas_img, label_file) = _fetch_files(data_dir, [(atlas_file, url, opts), (label_file, url, opts)], resume=resume, verbose=verbose)
atlas_img = reorder_img(atlas_img)
names = {}
from xml.etree import ElementTree
names[0] = 'Background'
for (n, label) in enumerate(ElementTree.parse(label_file).findall('.//label')):
new_idx = (int(label.get('index')) + 1)
if (new_idx in names):
raise ValueError(f"Duplicate index {new_idx} for labels '{names[new_idx]}', and '{label.text}'")
names[new_idx] = label.text
assert (list(names.keys()) == list(range((n + 2))))
names = [item[1] for item in sorted(names.items())]
return (atlas_img, names, is_lateralized)
|
Helper function for both fetch_atlas_juelich and fetch_atlas_harvard_oxford.
This function downloads the atlas image and labels.
|
nilearn/datasets/atlas.py
|
_get_atlas_data_and_labels
|
lemiceterieux/nilearn
| 827 |
python
|
def _get_atlas_data_and_labels(atlas_source, atlas_name, symmetric_split=False, data_dir=None, resume=True, verbose=1):
'Helper function for both fetch_atlas_juelich and fetch_atlas_harvard_oxford.\n This function downloads the atlas image and labels.\n '
if (atlas_source == 'Juelich'):
url = 'https://www.nitrc.org/frs/download.php/12096/Juelich.tgz'
elif (atlas_source == 'HarvardOxford'):
url = 'http://www.nitrc.org/frs/download.php/9902/HarvardOxford.tgz'
else:
raise ValueError('Atlas source {} is not valid.'.format(atlas_source))
data_dir = _get_dataset_dir('fsl', data_dir=data_dir, verbose=verbose)
opts = {'uncompress': True}
root = os.path.join('data', 'atlases')
if (atlas_source == 'HarvardOxford'):
if symmetric_split:
atlas_name = atlas_name.replace('cort-max', 'cortl-max')
if atlas_name.startswith('sub-'):
label_file = 'HarvardOxford-Subcortical.xml'
is_lateralized = False
elif atlas_name.startswith('cortl'):
label_file = 'HarvardOxford-Cortical-Lateralized.xml'
is_lateralized = True
else:
label_file = 'HarvardOxford-Cortical.xml'
is_lateralized = False
else:
label_file = 'Juelich.xml'
is_lateralized = False
label_file = os.path.join(root, label_file)
atlas_file = os.path.join(root, atlas_source, '{}-{}.nii.gz'.format(atlas_source, atlas_name))
(atlas_img, label_file) = _fetch_files(data_dir, [(atlas_file, url, opts), (label_file, url, opts)], resume=resume, verbose=verbose)
atlas_img = reorder_img(atlas_img)
names = {}
from xml.etree import ElementTree
names[0] = 'Background'
for (n, label) in enumerate(ElementTree.parse(label_file).findall('.//label')):
new_idx = (int(label.get('index')) + 1)
if (new_idx in names):
raise ValueError(f"Duplicate index {new_idx} for labels '{names[new_idx]}', and '{label.text}'")
names[new_idx] = label.text
assert (list(names.keys()) == list(range((n + 2))))
names = [item[1] for item in sorted(names.items())]
return (atlas_img, names, is_lateralized)
|
def _get_atlas_data_and_labels(atlas_source, atlas_name, symmetric_split=False, data_dir=None, resume=True, verbose=1):
'Helper function for both fetch_atlas_juelich and fetch_atlas_harvard_oxford.\n This function downloads the atlas image and labels.\n '
if (atlas_source == 'Juelich'):
url = 'https://www.nitrc.org/frs/download.php/12096/Juelich.tgz'
elif (atlas_source == 'HarvardOxford'):
url = 'http://www.nitrc.org/frs/download.php/9902/HarvardOxford.tgz'
else:
raise ValueError('Atlas source {} is not valid.'.format(atlas_source))
data_dir = _get_dataset_dir('fsl', data_dir=data_dir, verbose=verbose)
opts = {'uncompress': True}
root = os.path.join('data', 'atlases')
if (atlas_source == 'HarvardOxford'):
if symmetric_split:
atlas_name = atlas_name.replace('cort-max', 'cortl-max')
if atlas_name.startswith('sub-'):
label_file = 'HarvardOxford-Subcortical.xml'
is_lateralized = False
elif atlas_name.startswith('cortl'):
label_file = 'HarvardOxford-Cortical-Lateralized.xml'
is_lateralized = True
else:
label_file = 'HarvardOxford-Cortical.xml'
is_lateralized = False
else:
label_file = 'Juelich.xml'
is_lateralized = False
label_file = os.path.join(root, label_file)
atlas_file = os.path.join(root, atlas_source, '{}-{}.nii.gz'.format(atlas_source, atlas_name))
(atlas_img, label_file) = _fetch_files(data_dir, [(atlas_file, url, opts), (label_file, url, opts)], resume=resume, verbose=verbose)
atlas_img = reorder_img(atlas_img)
names = {}
from xml.etree import ElementTree
names[0] = 'Background'
for (n, label) in enumerate(ElementTree.parse(label_file).findall('.//label')):
new_idx = (int(label.get('index')) + 1)
if (new_idx in names):
raise ValueError(f"Duplicate index {new_idx} for labels '{names[new_idx]}', and '{label.text}'")
names[new_idx] = label.text
assert (list(names.keys()) == list(range((n + 2))))
names = [item[1] for item in sorted(names.items())]
return (atlas_img, names, is_lateralized)<|docstring|>Helper function for both fetch_atlas_juelich and fetch_atlas_harvard_oxford.
This function downloads the atlas image and labels.<|endoftext|>
|
08aadcea0496acfa92dc6fe56bbc7501a689378da5fa7d259bdfd07331f3600b
|
def _merge_probabilistic_maps_juelich(atlas_data, names):
'Helper function for fetch_atlas_juelich.\n This function handles probabilistic juelich atlases\n when symmetric_split=False. In this situation, we need\n to merge labels and maps corresponding to left and right\n regions.\n '
new_names = np.unique([re.sub(' (L|R)$', '', name) for name in names])
new_name_to_idx = {k: (v - 1) for (v, k) in enumerate(new_names)}
new_atlas_data = np.zeros((*atlas_data.shape[:3], (len(new_names) - 1)))
for (i, name) in enumerate(names):
if (name != 'Background'):
new_name = re.sub(' (L|R)$', '', name)
new_atlas_data[(..., new_name_to_idx[new_name])] += atlas_data[(..., (i - 1))]
return (new_atlas_data, new_names)
|
Helper function for fetch_atlas_juelich.
This function handles probabilistic juelich atlases
when symmetric_split=False. In this situation, we need
to merge labels and maps corresponding to left and right
regions.
|
nilearn/datasets/atlas.py
|
_merge_probabilistic_maps_juelich
|
lemiceterieux/nilearn
| 827 |
python
|
def _merge_probabilistic_maps_juelich(atlas_data, names):
'Helper function for fetch_atlas_juelich.\n This function handles probabilistic juelich atlases\n when symmetric_split=False. In this situation, we need\n to merge labels and maps corresponding to left and right\n regions.\n '
new_names = np.unique([re.sub(' (L|R)$', , name) for name in names])
new_name_to_idx = {k: (v - 1) for (v, k) in enumerate(new_names)}
new_atlas_data = np.zeros((*atlas_data.shape[:3], (len(new_names) - 1)))
for (i, name) in enumerate(names):
if (name != 'Background'):
new_name = re.sub(' (L|R)$', , name)
new_atlas_data[(..., new_name_to_idx[new_name])] += atlas_data[(..., (i - 1))]
return (new_atlas_data, new_names)
|
def _merge_probabilistic_maps_juelich(atlas_data, names):
'Helper function for fetch_atlas_juelich.\n This function handles probabilistic juelich atlases\n when symmetric_split=False. In this situation, we need\n to merge labels and maps corresponding to left and right\n regions.\n '
new_names = np.unique([re.sub(' (L|R)$', , name) for name in names])
new_name_to_idx = {k: (v - 1) for (v, k) in enumerate(new_names)}
new_atlas_data = np.zeros((*atlas_data.shape[:3], (len(new_names) - 1)))
for (i, name) in enumerate(names):
if (name != 'Background'):
new_name = re.sub(' (L|R)$', , name)
new_atlas_data[(..., new_name_to_idx[new_name])] += atlas_data[(..., (i - 1))]
return (new_atlas_data, new_names)<|docstring|>Helper function for fetch_atlas_juelich.
This function handles probabilistic juelich atlases
when symmetric_split=False. In this situation, we need
to merge labels and maps corresponding to left and right
regions.<|endoftext|>
|
b8bb027eec29da56d64f270a34cab6f3242690a5db4c02cad7be0c8b61e4aa36
|
def _merge_labels_juelich(atlas_data, names):
'Helper function for fetch_atlas_juelich.\n This function handles 3D atlases when symmetric_split=False.\n In this case, we need to merge the labels corresponding to\n left and right regions.\n '
new_names = np.unique([re.sub(' (L|R)$', '', name) for name in names])
new_names_dict = {k: v for (v, k) in enumerate(new_names)}
new_atlas_data = atlas_data.copy()
for (label, name) in enumerate(names):
new_name = re.sub(' (L|R)$', '', name)
new_atlas_data[(atlas_data == label)] = new_names_dict[new_name]
return (new_atlas_data, new_names)
|
Helper function for fetch_atlas_juelich.
This function handles 3D atlases when symmetric_split=False.
In this case, we need to merge the labels corresponding to
left and right regions.
|
nilearn/datasets/atlas.py
|
_merge_labels_juelich
|
lemiceterieux/nilearn
| 827 |
python
|
def _merge_labels_juelich(atlas_data, names):
'Helper function for fetch_atlas_juelich.\n This function handles 3D atlases when symmetric_split=False.\n In this case, we need to merge the labels corresponding to\n left and right regions.\n '
new_names = np.unique([re.sub(' (L|R)$', , name) for name in names])
new_names_dict = {k: v for (v, k) in enumerate(new_names)}
new_atlas_data = atlas_data.copy()
for (label, name) in enumerate(names):
new_name = re.sub(' (L|R)$', , name)
new_atlas_data[(atlas_data == label)] = new_names_dict[new_name]
return (new_atlas_data, new_names)
|
def _merge_labels_juelich(atlas_data, names):
'Helper function for fetch_atlas_juelich.\n This function handles 3D atlases when symmetric_split=False.\n In this case, we need to merge the labels corresponding to\n left and right regions.\n '
new_names = np.unique([re.sub(' (L|R)$', , name) for name in names])
new_names_dict = {k: v for (v, k) in enumerate(new_names)}
new_atlas_data = atlas_data.copy()
for (label, name) in enumerate(names):
new_name = re.sub(' (L|R)$', , name)
new_atlas_data[(atlas_data == label)] = new_names_dict[new_name]
return (new_atlas_data, new_names)<|docstring|>Helper function for fetch_atlas_juelich.
This function handles 3D atlases when symmetric_split=False.
In this case, we need to merge the labels corresponding to
left and right regions.<|endoftext|>
|
168ee3e9c7e9aa73b5d6269da8ffb6e824f24f7364da21b7d15c6d56de49e27b
|
def _compute_symmetric_split(source, atlas_niimg, names):
'Helper function for both fetch_atlas_juelich and\n fetch_atlas_harvard_oxford.\n This function handles 3D atlases when symmetric_split=True.\n '
assert (atlas_niimg.affine[(0, 0)] > 0)
atlas_data = get_data(atlas_niimg)
labels = np.unique(atlas_data)
middle_ind = (atlas_data.shape[0] // 2)
left_atlas = atlas_data.copy()
left_atlas[middle_ind:] = 0
right_atlas = atlas_data.copy()
right_atlas[:middle_ind] = 0
if (source == 'Juelich'):
for (idx, name) in enumerate(names):
if name.endswith('L'):
names[idx] = re.sub(' L$', '', name)
names[idx] = ('Left ' + name)
if name.endswith('R'):
names[idx] = re.sub(' R$', '', name)
names[idx] = ('Right ' + name)
new_label = 0
new_atlas = atlas_data.copy()
new_names = [names[0]]
for (label, name) in zip(labels[1:], names[1:]):
new_label += 1
left_elements = (left_atlas == label).sum()
right_elements = (right_atlas == label).sum()
n_elements = float((left_elements + right_elements))
if (((left_elements / n_elements) < 0.05) or ((right_elements / n_elements) < 0.05)):
new_atlas[(atlas_data == label)] = new_label
new_names.append(name)
continue
new_atlas[(left_atlas == label)] = new_label
new_names.append(('Left ' + name))
new_label += 1
new_atlas[(right_atlas == label)] = new_label
new_names.append(('Right ' + name))
return (new_atlas, new_names)
|
Helper function for both fetch_atlas_juelich and
fetch_atlas_harvard_oxford.
This function handles 3D atlases when symmetric_split=True.
|
nilearn/datasets/atlas.py
|
_compute_symmetric_split
|
lemiceterieux/nilearn
| 827 |
python
|
def _compute_symmetric_split(source, atlas_niimg, names):
'Helper function for both fetch_atlas_juelich and\n fetch_atlas_harvard_oxford.\n This function handles 3D atlases when symmetric_split=True.\n '
assert (atlas_niimg.affine[(0, 0)] > 0)
atlas_data = get_data(atlas_niimg)
labels = np.unique(atlas_data)
middle_ind = (atlas_data.shape[0] // 2)
left_atlas = atlas_data.copy()
left_atlas[middle_ind:] = 0
right_atlas = atlas_data.copy()
right_atlas[:middle_ind] = 0
if (source == 'Juelich'):
for (idx, name) in enumerate(names):
if name.endswith('L'):
names[idx] = re.sub(' L$', , name)
names[idx] = ('Left ' + name)
if name.endswith('R'):
names[idx] = re.sub(' R$', , name)
names[idx] = ('Right ' + name)
new_label = 0
new_atlas = atlas_data.copy()
new_names = [names[0]]
for (label, name) in zip(labels[1:], names[1:]):
new_label += 1
left_elements = (left_atlas == label).sum()
right_elements = (right_atlas == label).sum()
n_elements = float((left_elements + right_elements))
if (((left_elements / n_elements) < 0.05) or ((right_elements / n_elements) < 0.05)):
new_atlas[(atlas_data == label)] = new_label
new_names.append(name)
continue
new_atlas[(left_atlas == label)] = new_label
new_names.append(('Left ' + name))
new_label += 1
new_atlas[(right_atlas == label)] = new_label
new_names.append(('Right ' + name))
return (new_atlas, new_names)
|
def _compute_symmetric_split(source, atlas_niimg, names):
'Helper function for both fetch_atlas_juelich and\n fetch_atlas_harvard_oxford.\n This function handles 3D atlases when symmetric_split=True.\n '
assert (atlas_niimg.affine[(0, 0)] > 0)
atlas_data = get_data(atlas_niimg)
labels = np.unique(atlas_data)
middle_ind = (atlas_data.shape[0] // 2)
left_atlas = atlas_data.copy()
left_atlas[middle_ind:] = 0
right_atlas = atlas_data.copy()
right_atlas[:middle_ind] = 0
if (source == 'Juelich'):
for (idx, name) in enumerate(names):
if name.endswith('L'):
names[idx] = re.sub(' L$', , name)
names[idx] = ('Left ' + name)
if name.endswith('R'):
names[idx] = re.sub(' R$', , name)
names[idx] = ('Right ' + name)
new_label = 0
new_atlas = atlas_data.copy()
new_names = [names[0]]
for (label, name) in zip(labels[1:], names[1:]):
new_label += 1
left_elements = (left_atlas == label).sum()
right_elements = (right_atlas == label).sum()
n_elements = float((left_elements + right_elements))
if (((left_elements / n_elements) < 0.05) or ((right_elements / n_elements) < 0.05)):
new_atlas[(atlas_data == label)] = new_label
new_names.append(name)
continue
new_atlas[(left_atlas == label)] = new_label
new_names.append(('Left ' + name))
new_label += 1
new_atlas[(right_atlas == label)] = new_label
new_names.append(('Right ' + name))
return (new_atlas, new_names)<|docstring|>Helper function for both fetch_atlas_juelich and
fetch_atlas_harvard_oxford.
This function handles 3D atlases when symmetric_split=True.<|endoftext|>
|
7a7f41886ebc886e50f0a442fb7e00d2c6360ae2dc29874f2cb4b6e8428c1507
|
@fill_doc
def fetch_atlas_msdl(data_dir=None, url=None, resume=True, verbose=1):
"Download and load the MSDL brain atlas.\n\n It can be downloaded at :footcite:`atlas_msdl`, and cited\n using :footcite:`Varoquaux2011multisubject`.\n See also :footcite:`VAROQUAUX2013405` for more information.\n\n Parameters\n ----------\n %(data_dir)s\n %(url)s\n %(resume)s\n %(verbose)s\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n Dictionary-like object, the interest attributes are :\n\n - 'maps': str, path to nifti file containing regions definition.\n - 'labels': string list containing the labels of the regions.\n - 'region_coords': tuple list (x, y, z) containing coordinates\n of each region in MNI space.\n - 'networks': string list containing names of the networks.\n - 'description': description about the atlas.\n\n References\n ----------\n .. footbibliography::\n\n\n "
url = 'https://team.inria.fr/parietal/files/2015/01/MSDL_rois.zip'
opts = {'uncompress': True}
dataset_name = 'msdl_atlas'
files = [(os.path.join('MSDL_rois', 'msdl_rois_labels.csv'), url, opts), (os.path.join('MSDL_rois', 'msdl_rois.nii'), url, opts)]
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose)
files = _fetch_files(data_dir, files, resume=resume, verbose=verbose)
csv_data = np.recfromcsv(files[0])
labels = [name.strip() for name in csv_data['name'].tolist()]
labels = [label.decode('utf-8') for label in labels]
with warnings.catch_warnings():
warnings.filterwarnings('ignore', module='numpy', category=FutureWarning)
region_coords = csv_data[['x', 'y', 'z']].tolist()
net_names = [net_name.strip() for net_name in csv_data['net_name'].tolist()]
fdescr = _get_dataset_descr(dataset_name)
return Bunch(maps=files[1], labels=labels, region_coords=region_coords, networks=net_names, description=fdescr)
|
Download and load the MSDL brain atlas.
It can be downloaded at :footcite:`atlas_msdl`, and cited
using :footcite:`Varoquaux2011multisubject`.
See also :footcite:`VAROQUAUX2013405` for more information.
Parameters
----------
%(data_dir)s
%(url)s
%(resume)s
%(verbose)s
Returns
-------
data : sklearn.datasets.base.Bunch
Dictionary-like object, the interest attributes are :
- 'maps': str, path to nifti file containing regions definition.
- 'labels': string list containing the labels of the regions.
- 'region_coords': tuple list (x, y, z) containing coordinates
of each region in MNI space.
- 'networks': string list containing names of the networks.
- 'description': description about the atlas.
References
----------
.. footbibliography::
|
nilearn/datasets/atlas.py
|
fetch_atlas_msdl
|
lemiceterieux/nilearn
| 827 |
python
|
@fill_doc
def fetch_atlas_msdl(data_dir=None, url=None, resume=True, verbose=1):
"Download and load the MSDL brain atlas.\n\n It can be downloaded at :footcite:`atlas_msdl`, and cited\n using :footcite:`Varoquaux2011multisubject`.\n See also :footcite:`VAROQUAUX2013405` for more information.\n\n Parameters\n ----------\n %(data_dir)s\n %(url)s\n %(resume)s\n %(verbose)s\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n Dictionary-like object, the interest attributes are :\n\n - 'maps': str, path to nifti file containing regions definition.\n - 'labels': string list containing the labels of the regions.\n - 'region_coords': tuple list (x, y, z) containing coordinates\n of each region in MNI space.\n - 'networks': string list containing names of the networks.\n - 'description': description about the atlas.\n\n References\n ----------\n .. footbibliography::\n\n\n "
url = 'https://team.inria.fr/parietal/files/2015/01/MSDL_rois.zip'
opts = {'uncompress': True}
dataset_name = 'msdl_atlas'
files = [(os.path.join('MSDL_rois', 'msdl_rois_labels.csv'), url, opts), (os.path.join('MSDL_rois', 'msdl_rois.nii'), url, opts)]
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose)
files = _fetch_files(data_dir, files, resume=resume, verbose=verbose)
csv_data = np.recfromcsv(files[0])
labels = [name.strip() for name in csv_data['name'].tolist()]
labels = [label.decode('utf-8') for label in labels]
with warnings.catch_warnings():
warnings.filterwarnings('ignore', module='numpy', category=FutureWarning)
region_coords = csv_data[['x', 'y', 'z']].tolist()
net_names = [net_name.strip() for net_name in csv_data['net_name'].tolist()]
fdescr = _get_dataset_descr(dataset_name)
return Bunch(maps=files[1], labels=labels, region_coords=region_coords, networks=net_names, description=fdescr)
|
@fill_doc
def fetch_atlas_msdl(data_dir=None, url=None, resume=True, verbose=1):
"Download and load the MSDL brain atlas.\n\n It can be downloaded at :footcite:`atlas_msdl`, and cited\n using :footcite:`Varoquaux2011multisubject`.\n See also :footcite:`VAROQUAUX2013405` for more information.\n\n Parameters\n ----------\n %(data_dir)s\n %(url)s\n %(resume)s\n %(verbose)s\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n Dictionary-like object, the interest attributes are :\n\n - 'maps': str, path to nifti file containing regions definition.\n - 'labels': string list containing the labels of the regions.\n - 'region_coords': tuple list (x, y, z) containing coordinates\n of each region in MNI space.\n - 'networks': string list containing names of the networks.\n - 'description': description about the atlas.\n\n References\n ----------\n .. footbibliography::\n\n\n "
url = 'https://team.inria.fr/parietal/files/2015/01/MSDL_rois.zip'
opts = {'uncompress': True}
dataset_name = 'msdl_atlas'
files = [(os.path.join('MSDL_rois', 'msdl_rois_labels.csv'), url, opts), (os.path.join('MSDL_rois', 'msdl_rois.nii'), url, opts)]
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose)
files = _fetch_files(data_dir, files, resume=resume, verbose=verbose)
csv_data = np.recfromcsv(files[0])
labels = [name.strip() for name in csv_data['name'].tolist()]
labels = [label.decode('utf-8') for label in labels]
with warnings.catch_warnings():
warnings.filterwarnings('ignore', module='numpy', category=FutureWarning)
region_coords = csv_data[['x', 'y', 'z']].tolist()
net_names = [net_name.strip() for net_name in csv_data['net_name'].tolist()]
fdescr = _get_dataset_descr(dataset_name)
return Bunch(maps=files[1], labels=labels, region_coords=region_coords, networks=net_names, description=fdescr)<|docstring|>Download and load the MSDL brain atlas.
It can be downloaded at :footcite:`atlas_msdl`, and cited
using :footcite:`Varoquaux2011multisubject`.
See also :footcite:`VAROQUAUX2013405` for more information.
Parameters
----------
%(data_dir)s
%(url)s
%(resume)s
%(verbose)s
Returns
-------
data : sklearn.datasets.base.Bunch
Dictionary-like object, the interest attributes are :
- 'maps': str, path to nifti file containing regions definition.
- 'labels': string list containing the labels of the regions.
- 'region_coords': tuple list (x, y, z) containing coordinates
of each region in MNI space.
- 'networks': string list containing names of the networks.
- 'description': description about the atlas.
References
----------
.. footbibliography::<|endoftext|>
|
355453728e152ac614a3ebe124708e405d3dbb6abd74e758abf067fc9db02ad2
|
def fetch_coords_power_2011():
'Download and load the Power et al. brain atlas composed of 264 ROIs\n\n See :footcite:`Power2011Functional`.\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n Dictionary-like object, contains:\n - "rois": coordinates of 264 ROIs in MNI space\n\n\n References\n ----------\n .. footbibliography::\n\n '
dataset_name = 'power_2011'
fdescr = _get_dataset_descr(dataset_name)
package_directory = os.path.dirname(os.path.abspath(__file__))
csv = os.path.join(package_directory, 'data', 'power_2011.csv')
params = dict(rois=np.recfromcsv(csv), description=fdescr)
return Bunch(**params)
|
Download and load the Power et al. brain atlas composed of 264 ROIs
See :footcite:`Power2011Functional`.
Returns
-------
data : sklearn.datasets.base.Bunch
Dictionary-like object, contains:
- "rois": coordinates of 264 ROIs in MNI space
References
----------
.. footbibliography::
|
nilearn/datasets/atlas.py
|
fetch_coords_power_2011
|
lemiceterieux/nilearn
| 827 |
python
|
def fetch_coords_power_2011():
'Download and load the Power et al. brain atlas composed of 264 ROIs\n\n See :footcite:`Power2011Functional`.\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n Dictionary-like object, contains:\n - "rois": coordinates of 264 ROIs in MNI space\n\n\n References\n ----------\n .. footbibliography::\n\n '
dataset_name = 'power_2011'
fdescr = _get_dataset_descr(dataset_name)
package_directory = os.path.dirname(os.path.abspath(__file__))
csv = os.path.join(package_directory, 'data', 'power_2011.csv')
params = dict(rois=np.recfromcsv(csv), description=fdescr)
return Bunch(**params)
|
def fetch_coords_power_2011():
'Download and load the Power et al. brain atlas composed of 264 ROIs\n\n See :footcite:`Power2011Functional`.\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n Dictionary-like object, contains:\n - "rois": coordinates of 264 ROIs in MNI space\n\n\n References\n ----------\n .. footbibliography::\n\n '
dataset_name = 'power_2011'
fdescr = _get_dataset_descr(dataset_name)
package_directory = os.path.dirname(os.path.abspath(__file__))
csv = os.path.join(package_directory, 'data', 'power_2011.csv')
params = dict(rois=np.recfromcsv(csv), description=fdescr)
return Bunch(**params)<|docstring|>Download and load the Power et al. brain atlas composed of 264 ROIs
See :footcite:`Power2011Functional`.
Returns
-------
data : sklearn.datasets.base.Bunch
Dictionary-like object, contains:
- "rois": coordinates of 264 ROIs in MNI space
References
----------
.. footbibliography::<|endoftext|>
|
7508b4b4a22cfd3d72b8a9a2209621e092e9206df4fd1ee4e049f1e94e1e10f5
|
@fill_doc
def fetch_atlas_smith_2009(data_dir=None, mirror='origin', url=None, resume=True, verbose=1):
'Download and load the Smith ICA and BrainMap atlas (dated 2009).\n\n See :footcite:`Smith200913040` and :footcite:`Laird2011behavioral`.\n\n Parameters\n ----------\n %(data_dir)s\n mirror : string, optional\n By default, the dataset is downloaded from the original website of the\n atlas. Specifying "nitrc" will force download from a mirror, with\n potentially higher bandwidth. Default=\'origin\'.\n %(url)s\n %(resume)s\n %(verbose)s\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n Dictionary-like object, contains:\n\n - 20-dimensional ICA, Resting-FMRI components:\n\n - all 20 components (rsn20)\n - 10 well-matched maps from these, as shown in PNAS paper (rsn10)\n\n - 20-dimensional ICA, BrainMap components:\n\n - all 20 components (bm20)\n - 10 well-matched maps from these, as shown in PNAS paper (bm10)\n\n - 70-dimensional ICA, Resting-FMRI components (rsn70)\n\n - 70-dimensional ICA, BrainMap components (bm70)\n\n References\n ----------\n .. footbibliography::\n\n Notes\n -----\n For more information about this dataset\'s structure:\n http://www.fmrib.ox.ac.uk/datasets/brainmap+rsns/\n\n '
if (url is None):
if (mirror == 'origin'):
url = 'http://www.fmrib.ox.ac.uk/datasets/brainmap+rsns/'
elif (mirror == 'nitrc'):
url = ['https://www.nitrc.org/frs/download.php/7730/', 'https://www.nitrc.org/frs/download.php/7729/', 'https://www.nitrc.org/frs/download.php/7731/', 'https://www.nitrc.org/frs/download.php/7726/', 'https://www.nitrc.org/frs/download.php/7728/', 'https://www.nitrc.org/frs/download.php/7727/']
else:
raise ValueError(('Unknown mirror "%s". Mirror must be "origin" or "nitrc"' % str(mirror)))
files = ['rsn20.nii.gz', 'PNAS_Smith09_rsn10.nii.gz', 'rsn70.nii.gz', 'bm20.nii.gz', 'PNAS_Smith09_bm10.nii.gz', 'bm70.nii.gz']
if isinstance(url, str):
url = ([url] * len(files))
files = [(f, (u + f), {}) for (f, u) in zip(files, url)]
dataset_name = 'smith_2009'
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose)
files_ = _fetch_files(data_dir, files, resume=resume, verbose=verbose)
fdescr = _get_dataset_descr(dataset_name)
keys = ['rsn20', 'rsn10', 'rsn70', 'bm20', 'bm10', 'bm70']
params = dict(zip(keys, files_))
params['description'] = fdescr
return Bunch(**params)
|
Download and load the Smith ICA and BrainMap atlas (dated 2009).
See :footcite:`Smith200913040` and :footcite:`Laird2011behavioral`.
Parameters
----------
%(data_dir)s
mirror : string, optional
By default, the dataset is downloaded from the original website of the
atlas. Specifying "nitrc" will force download from a mirror, with
potentially higher bandwidth. Default='origin'.
%(url)s
%(resume)s
%(verbose)s
Returns
-------
data : sklearn.datasets.base.Bunch
Dictionary-like object, contains:
- 20-dimensional ICA, Resting-FMRI components:
- all 20 components (rsn20)
- 10 well-matched maps from these, as shown in PNAS paper (rsn10)
- 20-dimensional ICA, BrainMap components:
- all 20 components (bm20)
- 10 well-matched maps from these, as shown in PNAS paper (bm10)
- 70-dimensional ICA, Resting-FMRI components (rsn70)
- 70-dimensional ICA, BrainMap components (bm70)
References
----------
.. footbibliography::
Notes
-----
For more information about this dataset's structure:
http://www.fmrib.ox.ac.uk/datasets/brainmap+rsns/
|
nilearn/datasets/atlas.py
|
fetch_atlas_smith_2009
|
lemiceterieux/nilearn
| 827 |
python
|
@fill_doc
def fetch_atlas_smith_2009(data_dir=None, mirror='origin', url=None, resume=True, verbose=1):
'Download and load the Smith ICA and BrainMap atlas (dated 2009).\n\n See :footcite:`Smith200913040` and :footcite:`Laird2011behavioral`.\n\n Parameters\n ----------\n %(data_dir)s\n mirror : string, optional\n By default, the dataset is downloaded from the original website of the\n atlas. Specifying "nitrc" will force download from a mirror, with\n potentially higher bandwidth. Default=\'origin\'.\n %(url)s\n %(resume)s\n %(verbose)s\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n Dictionary-like object, contains:\n\n - 20-dimensional ICA, Resting-FMRI components:\n\n - all 20 components (rsn20)\n - 10 well-matched maps from these, as shown in PNAS paper (rsn10)\n\n - 20-dimensional ICA, BrainMap components:\n\n - all 20 components (bm20)\n - 10 well-matched maps from these, as shown in PNAS paper (bm10)\n\n - 70-dimensional ICA, Resting-FMRI components (rsn70)\n\n - 70-dimensional ICA, BrainMap components (bm70)\n\n References\n ----------\n .. footbibliography::\n\n Notes\n -----\n For more information about this dataset\'s structure:\n http://www.fmrib.ox.ac.uk/datasets/brainmap+rsns/\n\n '
if (url is None):
if (mirror == 'origin'):
url = 'http://www.fmrib.ox.ac.uk/datasets/brainmap+rsns/'
elif (mirror == 'nitrc'):
url = ['https://www.nitrc.org/frs/download.php/7730/', 'https://www.nitrc.org/frs/download.php/7729/', 'https://www.nitrc.org/frs/download.php/7731/', 'https://www.nitrc.org/frs/download.php/7726/', 'https://www.nitrc.org/frs/download.php/7728/', 'https://www.nitrc.org/frs/download.php/7727/']
else:
raise ValueError(('Unknown mirror "%s". Mirror must be "origin" or "nitrc"' % str(mirror)))
files = ['rsn20.nii.gz', 'PNAS_Smith09_rsn10.nii.gz', 'rsn70.nii.gz', 'bm20.nii.gz', 'PNAS_Smith09_bm10.nii.gz', 'bm70.nii.gz']
if isinstance(url, str):
url = ([url] * len(files))
files = [(f, (u + f), {}) for (f, u) in zip(files, url)]
dataset_name = 'smith_2009'
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose)
files_ = _fetch_files(data_dir, files, resume=resume, verbose=verbose)
fdescr = _get_dataset_descr(dataset_name)
keys = ['rsn20', 'rsn10', 'rsn70', 'bm20', 'bm10', 'bm70']
params = dict(zip(keys, files_))
params['description'] = fdescr
return Bunch(**params)
|
@fill_doc
def fetch_atlas_smith_2009(data_dir=None, mirror='origin', url=None, resume=True, verbose=1):
'Download and load the Smith ICA and BrainMap atlas (dated 2009).\n\n See :footcite:`Smith200913040` and :footcite:`Laird2011behavioral`.\n\n Parameters\n ----------\n %(data_dir)s\n mirror : string, optional\n By default, the dataset is downloaded from the original website of the\n atlas. Specifying "nitrc" will force download from a mirror, with\n potentially higher bandwidth. Default=\'origin\'.\n %(url)s\n %(resume)s\n %(verbose)s\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n Dictionary-like object, contains:\n\n - 20-dimensional ICA, Resting-FMRI components:\n\n - all 20 components (rsn20)\n - 10 well-matched maps from these, as shown in PNAS paper (rsn10)\n\n - 20-dimensional ICA, BrainMap components:\n\n - all 20 components (bm20)\n - 10 well-matched maps from these, as shown in PNAS paper (bm10)\n\n - 70-dimensional ICA, Resting-FMRI components (rsn70)\n\n - 70-dimensional ICA, BrainMap components (bm70)\n\n References\n ----------\n .. footbibliography::\n\n Notes\n -----\n For more information about this dataset\'s structure:\n http://www.fmrib.ox.ac.uk/datasets/brainmap+rsns/\n\n '
if (url is None):
if (mirror == 'origin'):
url = 'http://www.fmrib.ox.ac.uk/datasets/brainmap+rsns/'
elif (mirror == 'nitrc'):
url = ['https://www.nitrc.org/frs/download.php/7730/', 'https://www.nitrc.org/frs/download.php/7729/', 'https://www.nitrc.org/frs/download.php/7731/', 'https://www.nitrc.org/frs/download.php/7726/', 'https://www.nitrc.org/frs/download.php/7728/', 'https://www.nitrc.org/frs/download.php/7727/']
else:
raise ValueError(('Unknown mirror "%s". Mirror must be "origin" or "nitrc"' % str(mirror)))
files = ['rsn20.nii.gz', 'PNAS_Smith09_rsn10.nii.gz', 'rsn70.nii.gz', 'bm20.nii.gz', 'PNAS_Smith09_bm10.nii.gz', 'bm70.nii.gz']
if isinstance(url, str):
url = ([url] * len(files))
files = [(f, (u + f), {}) for (f, u) in zip(files, url)]
dataset_name = 'smith_2009'
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose)
files_ = _fetch_files(data_dir, files, resume=resume, verbose=verbose)
fdescr = _get_dataset_descr(dataset_name)
keys = ['rsn20', 'rsn10', 'rsn70', 'bm20', 'bm10', 'bm70']
params = dict(zip(keys, files_))
params['description'] = fdescr
return Bunch(**params)<|docstring|>Download and load the Smith ICA and BrainMap atlas (dated 2009).
See :footcite:`Smith200913040` and :footcite:`Laird2011behavioral`.
Parameters
----------
%(data_dir)s
mirror : string, optional
By default, the dataset is downloaded from the original website of the
atlas. Specifying "nitrc" will force download from a mirror, with
potentially higher bandwidth. Default='origin'.
%(url)s
%(resume)s
%(verbose)s
Returns
-------
data : sklearn.datasets.base.Bunch
Dictionary-like object, contains:
- 20-dimensional ICA, Resting-FMRI components:
- all 20 components (rsn20)
- 10 well-matched maps from these, as shown in PNAS paper (rsn10)
- 20-dimensional ICA, BrainMap components:
- all 20 components (bm20)
- 10 well-matched maps from these, as shown in PNAS paper (bm10)
- 70-dimensional ICA, Resting-FMRI components (rsn70)
- 70-dimensional ICA, BrainMap components (bm70)
References
----------
.. footbibliography::
Notes
-----
For more information about this dataset's structure:
http://www.fmrib.ox.ac.uk/datasets/brainmap+rsns/<|endoftext|>
|
99b1697915a249f7f1d52cfe9b992bd3f71a56a24579d4fa3a98cf7cc261a58e
|
@fill_doc
def fetch_atlas_yeo_2011(data_dir=None, url=None, resume=True, verbose=1):
'Download and return file names for the Yeo 2011 parcellation.\n\n The provided images are in MNI152 space.\n\n For more information on this dataset\'s structure,\n see :footcite:`CorticalParcellation_Yeo2011`,\n and :footcite:`Yeo2011organization`.\n\n Parameters\n ----------\n %(data_dir)s\n %(url)s\n %(resume)s\n %(verbose)s\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n Dictionary-like object, keys are:\n\n - "thin_7", "thick_7": 7-region parcellations,\n fitted to resp. thin and thick template cortex segmentations.\n\n - "thin_17", "thick_17": 17-region parcellations.\n\n - "colors_7", "colors_17": colormaps (text files) for 7- and 17-region\n parcellation respectively.\n\n - "anat": anatomy image.\n\n References\n ----------\n .. footbibliography::\n\n Notes\n -----\n Licence: unknown.\n\n '
if (url is None):
url = 'ftp://surfer.nmr.mgh.harvard.edu/pub/data/Yeo_JNeurophysiol11_MNI152.zip'
opts = {'uncompress': True}
dataset_name = 'yeo_2011'
keys = ('thin_7', 'thick_7', 'thin_17', 'thick_17', 'colors_7', 'colors_17', 'anat')
basenames = ('Yeo2011_7Networks_MNI152_FreeSurferConformed1mm.nii.gz', 'Yeo2011_7Networks_MNI152_FreeSurferConformed1mm_LiberalMask.nii.gz', 'Yeo2011_17Networks_MNI152_FreeSurferConformed1mm.nii.gz', 'Yeo2011_17Networks_MNI152_FreeSurferConformed1mm_LiberalMask.nii.gz', 'Yeo2011_7Networks_ColorLUT.txt', 'Yeo2011_17Networks_ColorLUT.txt', 'FSL_MNI152_FreeSurferConformed_1mm.nii.gz')
filenames = [(os.path.join('Yeo_JNeurophysiol11_MNI152', f), url, opts) for f in basenames]
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose)
sub_files = _fetch_files(data_dir, filenames, resume=resume, verbose=verbose)
fdescr = _get_dataset_descr(dataset_name)
params = dict(([('description', fdescr)] + list(zip(keys, sub_files))))
return Bunch(**params)
|
Download and return file names for the Yeo 2011 parcellation.
The provided images are in MNI152 space.
For more information on this dataset's structure,
see :footcite:`CorticalParcellation_Yeo2011`,
and :footcite:`Yeo2011organization`.
Parameters
----------
%(data_dir)s
%(url)s
%(resume)s
%(verbose)s
Returns
-------
data : sklearn.datasets.base.Bunch
Dictionary-like object, keys are:
- "thin_7", "thick_7": 7-region parcellations,
fitted to resp. thin and thick template cortex segmentations.
- "thin_17", "thick_17": 17-region parcellations.
- "colors_7", "colors_17": colormaps (text files) for 7- and 17-region
parcellation respectively.
- "anat": anatomy image.
References
----------
.. footbibliography::
Notes
-----
Licence: unknown.
|
nilearn/datasets/atlas.py
|
fetch_atlas_yeo_2011
|
lemiceterieux/nilearn
| 827 |
python
|
@fill_doc
def fetch_atlas_yeo_2011(data_dir=None, url=None, resume=True, verbose=1):
'Download and return file names for the Yeo 2011 parcellation.\n\n The provided images are in MNI152 space.\n\n For more information on this dataset\'s structure,\n see :footcite:`CorticalParcellation_Yeo2011`,\n and :footcite:`Yeo2011organization`.\n\n Parameters\n ----------\n %(data_dir)s\n %(url)s\n %(resume)s\n %(verbose)s\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n Dictionary-like object, keys are:\n\n - "thin_7", "thick_7": 7-region parcellations,\n fitted to resp. thin and thick template cortex segmentations.\n\n - "thin_17", "thick_17": 17-region parcellations.\n\n - "colors_7", "colors_17": colormaps (text files) for 7- and 17-region\n parcellation respectively.\n\n - "anat": anatomy image.\n\n References\n ----------\n .. footbibliography::\n\n Notes\n -----\n Licence: unknown.\n\n '
if (url is None):
url = 'ftp://surfer.nmr.mgh.harvard.edu/pub/data/Yeo_JNeurophysiol11_MNI152.zip'
opts = {'uncompress': True}
dataset_name = 'yeo_2011'
keys = ('thin_7', 'thick_7', 'thin_17', 'thick_17', 'colors_7', 'colors_17', 'anat')
basenames = ('Yeo2011_7Networks_MNI152_FreeSurferConformed1mm.nii.gz', 'Yeo2011_7Networks_MNI152_FreeSurferConformed1mm_LiberalMask.nii.gz', 'Yeo2011_17Networks_MNI152_FreeSurferConformed1mm.nii.gz', 'Yeo2011_17Networks_MNI152_FreeSurferConformed1mm_LiberalMask.nii.gz', 'Yeo2011_7Networks_ColorLUT.txt', 'Yeo2011_17Networks_ColorLUT.txt', 'FSL_MNI152_FreeSurferConformed_1mm.nii.gz')
filenames = [(os.path.join('Yeo_JNeurophysiol11_MNI152', f), url, opts) for f in basenames]
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose)
sub_files = _fetch_files(data_dir, filenames, resume=resume, verbose=verbose)
fdescr = _get_dataset_descr(dataset_name)
params = dict(([('description', fdescr)] + list(zip(keys, sub_files))))
return Bunch(**params)
|
@fill_doc
def fetch_atlas_yeo_2011(data_dir=None, url=None, resume=True, verbose=1):
'Download and return file names for the Yeo 2011 parcellation.\n\n The provided images are in MNI152 space.\n\n For more information on this dataset\'s structure,\n see :footcite:`CorticalParcellation_Yeo2011`,\n and :footcite:`Yeo2011organization`.\n\n Parameters\n ----------\n %(data_dir)s\n %(url)s\n %(resume)s\n %(verbose)s\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n Dictionary-like object, keys are:\n\n - "thin_7", "thick_7": 7-region parcellations,\n fitted to resp. thin and thick template cortex segmentations.\n\n - "thin_17", "thick_17": 17-region parcellations.\n\n - "colors_7", "colors_17": colormaps (text files) for 7- and 17-region\n parcellation respectively.\n\n - "anat": anatomy image.\n\n References\n ----------\n .. footbibliography::\n\n Notes\n -----\n Licence: unknown.\n\n '
if (url is None):
url = 'ftp://surfer.nmr.mgh.harvard.edu/pub/data/Yeo_JNeurophysiol11_MNI152.zip'
opts = {'uncompress': True}
dataset_name = 'yeo_2011'
keys = ('thin_7', 'thick_7', 'thin_17', 'thick_17', 'colors_7', 'colors_17', 'anat')
basenames = ('Yeo2011_7Networks_MNI152_FreeSurferConformed1mm.nii.gz', 'Yeo2011_7Networks_MNI152_FreeSurferConformed1mm_LiberalMask.nii.gz', 'Yeo2011_17Networks_MNI152_FreeSurferConformed1mm.nii.gz', 'Yeo2011_17Networks_MNI152_FreeSurferConformed1mm_LiberalMask.nii.gz', 'Yeo2011_7Networks_ColorLUT.txt', 'Yeo2011_17Networks_ColorLUT.txt', 'FSL_MNI152_FreeSurferConformed_1mm.nii.gz')
filenames = [(os.path.join('Yeo_JNeurophysiol11_MNI152', f), url, opts) for f in basenames]
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose)
sub_files = _fetch_files(data_dir, filenames, resume=resume, verbose=verbose)
fdescr = _get_dataset_descr(dataset_name)
params = dict(([('description', fdescr)] + list(zip(keys, sub_files))))
return Bunch(**params)<|docstring|>Download and return file names for the Yeo 2011 parcellation.
The provided images are in MNI152 space.
For more information on this dataset's structure,
see :footcite:`CorticalParcellation_Yeo2011`,
and :footcite:`Yeo2011organization`.
Parameters
----------
%(data_dir)s
%(url)s
%(resume)s
%(verbose)s
Returns
-------
data : sklearn.datasets.base.Bunch
Dictionary-like object, keys are:
- "thin_7", "thick_7": 7-region parcellations,
fitted to resp. thin and thick template cortex segmentations.
- "thin_17", "thick_17": 17-region parcellations.
- "colors_7", "colors_17": colormaps (text files) for 7- and 17-region
parcellation respectively.
- "anat": anatomy image.
References
----------
.. footbibliography::
Notes
-----
Licence: unknown.<|endoftext|>
|
f59e50cff7d43ef1b9d26d9c905c9a0de2881747ab9a46a5b06c106f3be3b905
|
@fill_doc
def fetch_atlas_aal(version='SPM12', data_dir=None, url=None, resume=True, verbose=1):
'Downloads and returns the AAL template for SPM 12.\n\n This atlas is the result of an automated anatomical parcellation of the\n spatially normalized single-subject high-resolution T1 volume provided by\n the Montreal Neurological Institute (MNI) (D. L. Collins et al., 1998,\n Trans. Med. Imag. 17, 463-468, PubMed).\n\n For more information on this dataset\'s structure,\n see :footcite:`AAL_atlas`,\n and :footcite:`TZOURIOMAZOYER2002273`.\n\n Parameters\n ----------\n version : string {\'SPM12\', \'SPM5\', \'SPM8\'}, optional\n The version of the AAL atlas. Must be SPM5, SPM8 or SPM12.\n Default=\'SPM12\'.\n %(data_dir)s\n %(url)s\n %(resume)s\n %(verbose)s\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n Dictionary-like object, keys are:\n\n - "maps": str. path to nifti file containing regions.\n\n - "labels": list of the names of the regions\n\n References\n ----------\n .. footbibliography::\n\n Notes\n -----\n Licence: unknown.\n\n '
versions = ['SPM5', 'SPM8', 'SPM12']
if (version not in versions):
raise ValueError(('The version of AAL requested "%s" does not exist.Please choose one among %s.' % (version, str(versions))))
if (url is None):
baseurl = 'http://www.gin.cnrs.fr/AAL_files/aal_for_%s.tar.gz'
url = (baseurl % version)
opts = {'uncompress': True}
dataset_name = ('aal_' + version)
basenames = ('AAL.nii', 'AAL.xml')
filenames = [(os.path.join('aal', 'atlas', f), url, opts) for f in basenames]
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose)
(atlas_img, labels_file) = _fetch_files(data_dir, filenames, resume=resume, verbose=verbose)
fdescr = _get_dataset_descr(dataset_name)
xml_tree = xml.etree.ElementTree.parse(labels_file)
root = xml_tree.getroot()
labels = []
indices = []
for label in root.iter('label'):
indices.append(label.find('index').text)
labels.append(label.find('name').text)
params = {'description': fdescr, 'maps': atlas_img, 'labels': labels, 'indices': indices}
return Bunch(**params)
|
Downloads and returns the AAL template for SPM 12.
This atlas is the result of an automated anatomical parcellation of the
spatially normalized single-subject high-resolution T1 volume provided by
the Montreal Neurological Institute (MNI) (D. L. Collins et al., 1998,
Trans. Med. Imag. 17, 463-468, PubMed).
For more information on this dataset's structure,
see :footcite:`AAL_atlas`,
and :footcite:`TZOURIOMAZOYER2002273`.
Parameters
----------
version : string {'SPM12', 'SPM5', 'SPM8'}, optional
The version of the AAL atlas. Must be SPM5, SPM8 or SPM12.
Default='SPM12'.
%(data_dir)s
%(url)s
%(resume)s
%(verbose)s
Returns
-------
data : sklearn.datasets.base.Bunch
Dictionary-like object, keys are:
- "maps": str. path to nifti file containing regions.
- "labels": list of the names of the regions
References
----------
.. footbibliography::
Notes
-----
Licence: unknown.
|
nilearn/datasets/atlas.py
|
fetch_atlas_aal
|
lemiceterieux/nilearn
| 827 |
python
|
@fill_doc
def fetch_atlas_aal(version='SPM12', data_dir=None, url=None, resume=True, verbose=1):
'Downloads and returns the AAL template for SPM 12.\n\n This atlas is the result of an automated anatomical parcellation of the\n spatially normalized single-subject high-resolution T1 volume provided by\n the Montreal Neurological Institute (MNI) (D. L. Collins et al., 1998,\n Trans. Med. Imag. 17, 463-468, PubMed).\n\n For more information on this dataset\'s structure,\n see :footcite:`AAL_atlas`,\n and :footcite:`TZOURIOMAZOYER2002273`.\n\n Parameters\n ----------\n version : string {\'SPM12\', \'SPM5\', \'SPM8\'}, optional\n The version of the AAL atlas. Must be SPM5, SPM8 or SPM12.\n Default=\'SPM12\'.\n %(data_dir)s\n %(url)s\n %(resume)s\n %(verbose)s\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n Dictionary-like object, keys are:\n\n - "maps": str. path to nifti file containing regions.\n\n - "labels": list of the names of the regions\n\n References\n ----------\n .. footbibliography::\n\n Notes\n -----\n Licence: unknown.\n\n '
versions = ['SPM5', 'SPM8', 'SPM12']
if (version not in versions):
raise ValueError(('The version of AAL requested "%s" does not exist.Please choose one among %s.' % (version, str(versions))))
if (url is None):
baseurl = 'http://www.gin.cnrs.fr/AAL_files/aal_for_%s.tar.gz'
url = (baseurl % version)
opts = {'uncompress': True}
dataset_name = ('aal_' + version)
basenames = ('AAL.nii', 'AAL.xml')
filenames = [(os.path.join('aal', 'atlas', f), url, opts) for f in basenames]
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose)
(atlas_img, labels_file) = _fetch_files(data_dir, filenames, resume=resume, verbose=verbose)
fdescr = _get_dataset_descr(dataset_name)
xml_tree = xml.etree.ElementTree.parse(labels_file)
root = xml_tree.getroot()
labels = []
indices = []
for label in root.iter('label'):
indices.append(label.find('index').text)
labels.append(label.find('name').text)
params = {'description': fdescr, 'maps': atlas_img, 'labels': labels, 'indices': indices}
return Bunch(**params)
|
@fill_doc
def fetch_atlas_aal(version='SPM12', data_dir=None, url=None, resume=True, verbose=1):
'Downloads and returns the AAL template for SPM 12.\n\n This atlas is the result of an automated anatomical parcellation of the\n spatially normalized single-subject high-resolution T1 volume provided by\n the Montreal Neurological Institute (MNI) (D. L. Collins et al., 1998,\n Trans. Med. Imag. 17, 463-468, PubMed).\n\n For more information on this dataset\'s structure,\n see :footcite:`AAL_atlas`,\n and :footcite:`TZOURIOMAZOYER2002273`.\n\n Parameters\n ----------\n version : string {\'SPM12\', \'SPM5\', \'SPM8\'}, optional\n The version of the AAL atlas. Must be SPM5, SPM8 or SPM12.\n Default=\'SPM12\'.\n %(data_dir)s\n %(url)s\n %(resume)s\n %(verbose)s\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n Dictionary-like object, keys are:\n\n - "maps": str. path to nifti file containing regions.\n\n - "labels": list of the names of the regions\n\n References\n ----------\n .. footbibliography::\n\n Notes\n -----\n Licence: unknown.\n\n '
versions = ['SPM5', 'SPM8', 'SPM12']
if (version not in versions):
raise ValueError(('The version of AAL requested "%s" does not exist.Please choose one among %s.' % (version, str(versions))))
if (url is None):
baseurl = 'http://www.gin.cnrs.fr/AAL_files/aal_for_%s.tar.gz'
url = (baseurl % version)
opts = {'uncompress': True}
dataset_name = ('aal_' + version)
basenames = ('AAL.nii', 'AAL.xml')
filenames = [(os.path.join('aal', 'atlas', f), url, opts) for f in basenames]
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose)
(atlas_img, labels_file) = _fetch_files(data_dir, filenames, resume=resume, verbose=verbose)
fdescr = _get_dataset_descr(dataset_name)
xml_tree = xml.etree.ElementTree.parse(labels_file)
root = xml_tree.getroot()
labels = []
indices = []
for label in root.iter('label'):
indices.append(label.find('index').text)
labels.append(label.find('name').text)
params = {'description': fdescr, 'maps': atlas_img, 'labels': labels, 'indices': indices}
return Bunch(**params)<|docstring|>Downloads and returns the AAL template for SPM 12.
This atlas is the result of an automated anatomical parcellation of the
spatially normalized single-subject high-resolution T1 volume provided by
the Montreal Neurological Institute (MNI) (D. L. Collins et al., 1998,
Trans. Med. Imag. 17, 463-468, PubMed).
For more information on this dataset's structure,
see :footcite:`AAL_atlas`,
and :footcite:`TZOURIOMAZOYER2002273`.
Parameters
----------
version : string {'SPM12', 'SPM5', 'SPM8'}, optional
The version of the AAL atlas. Must be SPM5, SPM8 or SPM12.
Default='SPM12'.
%(data_dir)s
%(url)s
%(resume)s
%(verbose)s
Returns
-------
data : sklearn.datasets.base.Bunch
Dictionary-like object, keys are:
- "maps": str. path to nifti file containing regions.
- "labels": list of the names of the regions
References
----------
.. footbibliography::
Notes
-----
Licence: unknown.<|endoftext|>
|
5de00f68135cafd60541d76a30308c0c29b11ee1cf85e479afbde181fcfd6741
|
@fill_doc
def fetch_atlas_basc_multiscale_2015(version='sym', data_dir=None, url=None, resume=True, verbose=1):
'Downloads and loads multiscale functional brain parcellations\n\n This atlas includes group brain parcellations generated from\n resting-state functional magnetic resonance images from about\n 200 young healthy subjects.\n\n Multiple scales (number of networks) are available, among\n 7, 12, 20, 36, 64, 122, 197, 325, 444. The brain parcellations\n have been generated using a method called bootstrap analysis of\n stable clusters called as BASC :footcite:`BELLEC20101126`,\n and the scales have been selected using a data-driven method\n called MSTEPS :footcite:`Bellec2013Mining`.\n\n Note that two versions of the template are available, \'sym\' or \'asym\'.\n The \'asym\' type contains brain images that have been registered in the\n asymmetric version of the MNI brain template (reflecting that the brain\n is asymmetric), while the \'sym\' type contains images registered in the\n symmetric version of the MNI template. The symmetric template has been\n forced to be symmetric anatomically, and is therefore ideally suited to\n study homotopic functional connections in fMRI: finding homotopic regions\n simply consists of flipping the x-axis of the template.\n\n .. versionadded:: 0.2.3\n\n Parameters\n ----------\n version : str {\'sym\', \'asym\'}, optional\n Available versions are \'sym\' or \'asym\'. By default all scales of\n brain parcellations of version \'sym\' will be returned.\n Default=\'sym\'.\n %(data_dir)s\n %(url)s\n %(resume)s\n %(verbose)s\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n Dictionary-like object, Keys are:\n\n - "scale007", "scale012", "scale020", "scale036", "scale064",\n "scale122", "scale197", "scale325", "scale444": str, path\n to Nifti file of various scales of brain parcellations.\n\n - "description": details about the data release.\n\n References\n ----------\n .. footbibliography::\n\n Notes\n -----\n For more information on this dataset\'s structure, see\n https://figshare.com/articles/basc/1285615\n\n '
versions = ['sym', 'asym']
if (version not in versions):
raise ValueError(('The version of Brain parcellations requested "%s" does not exist. Please choose one among them %s.' % (version, str(versions))))
keys = ['scale007', 'scale012', 'scale020', 'scale036', 'scale064', 'scale122', 'scale197', 'scale325', 'scale444']
if (version == 'sym'):
url = 'https://ndownloader.figshare.com/files/1861819'
elif (version == 'asym'):
url = 'https://ndownloader.figshare.com/files/1861820'
opts = {'uncompress': True}
dataset_name = 'basc_multiscale_2015'
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose)
folder_name = ('template_cambridge_basc_multiscale_nii_' + version)
basenames = [(((('template_cambridge_basc_multiscale_' + version) + '_') + key) + '.nii.gz') for key in keys]
filenames = [(os.path.join(folder_name, basename), url, opts) for basename in basenames]
data = _fetch_files(data_dir, filenames, resume=resume, verbose=verbose)
descr = _get_dataset_descr(dataset_name)
params = dict(zip(keys, data))
params['description'] = descr
return Bunch(**params)
|
Downloads and loads multiscale functional brain parcellations
This atlas includes group brain parcellations generated from
resting-state functional magnetic resonance images from about
200 young healthy subjects.
Multiple scales (number of networks) are available, among
7, 12, 20, 36, 64, 122, 197, 325, 444. The brain parcellations
have been generated using a method called bootstrap analysis of
stable clusters called as BASC :footcite:`BELLEC20101126`,
and the scales have been selected using a data-driven method
called MSTEPS :footcite:`Bellec2013Mining`.
Note that two versions of the template are available, 'sym' or 'asym'.
The 'asym' type contains brain images that have been registered in the
asymmetric version of the MNI brain template (reflecting that the brain
is asymmetric), while the 'sym' type contains images registered in the
symmetric version of the MNI template. The symmetric template has been
forced to be symmetric anatomically, and is therefore ideally suited to
study homotopic functional connections in fMRI: finding homotopic regions
simply consists of flipping the x-axis of the template.
.. versionadded:: 0.2.3
Parameters
----------
version : str {'sym', 'asym'}, optional
Available versions are 'sym' or 'asym'. By default all scales of
brain parcellations of version 'sym' will be returned.
Default='sym'.
%(data_dir)s
%(url)s
%(resume)s
%(verbose)s
Returns
-------
data : sklearn.datasets.base.Bunch
Dictionary-like object, Keys are:
- "scale007", "scale012", "scale020", "scale036", "scale064",
"scale122", "scale197", "scale325", "scale444": str, path
to Nifti file of various scales of brain parcellations.
- "description": details about the data release.
References
----------
.. footbibliography::
Notes
-----
For more information on this dataset's structure, see
https://figshare.com/articles/basc/1285615
|
nilearn/datasets/atlas.py
|
fetch_atlas_basc_multiscale_2015
|
lemiceterieux/nilearn
| 827 |
python
|
@fill_doc
def fetch_atlas_basc_multiscale_2015(version='sym', data_dir=None, url=None, resume=True, verbose=1):
'Downloads and loads multiscale functional brain parcellations\n\n This atlas includes group brain parcellations generated from\n resting-state functional magnetic resonance images from about\n 200 young healthy subjects.\n\n Multiple scales (number of networks) are available, among\n 7, 12, 20, 36, 64, 122, 197, 325, 444. The brain parcellations\n have been generated using a method called bootstrap analysis of\n stable clusters called as BASC :footcite:`BELLEC20101126`,\n and the scales have been selected using a data-driven method\n called MSTEPS :footcite:`Bellec2013Mining`.\n\n Note that two versions of the template are available, \'sym\' or \'asym\'.\n The \'asym\' type contains brain images that have been registered in the\n asymmetric version of the MNI brain template (reflecting that the brain\n is asymmetric), while the \'sym\' type contains images registered in the\n symmetric version of the MNI template. The symmetric template has been\n forced to be symmetric anatomically, and is therefore ideally suited to\n study homotopic functional connections in fMRI: finding homotopic regions\n simply consists of flipping the x-axis of the template.\n\n .. versionadded:: 0.2.3\n\n Parameters\n ----------\n version : str {\'sym\', \'asym\'}, optional\n Available versions are \'sym\' or \'asym\'. By default all scales of\n brain parcellations of version \'sym\' will be returned.\n Default=\'sym\'.\n %(data_dir)s\n %(url)s\n %(resume)s\n %(verbose)s\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n Dictionary-like object, Keys are:\n\n - "scale007", "scale012", "scale020", "scale036", "scale064",\n "scale122", "scale197", "scale325", "scale444": str, path\n to Nifti file of various scales of brain parcellations.\n\n - "description": details about the data release.\n\n References\n ----------\n .. footbibliography::\n\n Notes\n -----\n For more information on this dataset\'s structure, see\n https://figshare.com/articles/basc/1285615\n\n '
versions = ['sym', 'asym']
if (version not in versions):
raise ValueError(('The version of Brain parcellations requested "%s" does not exist. Please choose one among them %s.' % (version, str(versions))))
keys = ['scale007', 'scale012', 'scale020', 'scale036', 'scale064', 'scale122', 'scale197', 'scale325', 'scale444']
if (version == 'sym'):
url = 'https://ndownloader.figshare.com/files/1861819'
elif (version == 'asym'):
url = 'https://ndownloader.figshare.com/files/1861820'
opts = {'uncompress': True}
dataset_name = 'basc_multiscale_2015'
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose)
folder_name = ('template_cambridge_basc_multiscale_nii_' + version)
basenames = [(((('template_cambridge_basc_multiscale_' + version) + '_') + key) + '.nii.gz') for key in keys]
filenames = [(os.path.join(folder_name, basename), url, opts) for basename in basenames]
data = _fetch_files(data_dir, filenames, resume=resume, verbose=verbose)
descr = _get_dataset_descr(dataset_name)
params = dict(zip(keys, data))
params['description'] = descr
return Bunch(**params)
|
@fill_doc
def fetch_atlas_basc_multiscale_2015(version='sym', data_dir=None, url=None, resume=True, verbose=1):
'Downloads and loads multiscale functional brain parcellations\n\n This atlas includes group brain parcellations generated from\n resting-state functional magnetic resonance images from about\n 200 young healthy subjects.\n\n Multiple scales (number of networks) are available, among\n 7, 12, 20, 36, 64, 122, 197, 325, 444. The brain parcellations\n have been generated using a method called bootstrap analysis of\n stable clusters called as BASC :footcite:`BELLEC20101126`,\n and the scales have been selected using a data-driven method\n called MSTEPS :footcite:`Bellec2013Mining`.\n\n Note that two versions of the template are available, \'sym\' or \'asym\'.\n The \'asym\' type contains brain images that have been registered in the\n asymmetric version of the MNI brain template (reflecting that the brain\n is asymmetric), while the \'sym\' type contains images registered in the\n symmetric version of the MNI template. The symmetric template has been\n forced to be symmetric anatomically, and is therefore ideally suited to\n study homotopic functional connections in fMRI: finding homotopic regions\n simply consists of flipping the x-axis of the template.\n\n .. versionadded:: 0.2.3\n\n Parameters\n ----------\n version : str {\'sym\', \'asym\'}, optional\n Available versions are \'sym\' or \'asym\'. By default all scales of\n brain parcellations of version \'sym\' will be returned.\n Default=\'sym\'.\n %(data_dir)s\n %(url)s\n %(resume)s\n %(verbose)s\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n Dictionary-like object, Keys are:\n\n - "scale007", "scale012", "scale020", "scale036", "scale064",\n "scale122", "scale197", "scale325", "scale444": str, path\n to Nifti file of various scales of brain parcellations.\n\n - "description": details about the data release.\n\n References\n ----------\n .. footbibliography::\n\n Notes\n -----\n For more information on this dataset\'s structure, see\n https://figshare.com/articles/basc/1285615\n\n '
versions = ['sym', 'asym']
if (version not in versions):
raise ValueError(('The version of Brain parcellations requested "%s" does not exist. Please choose one among them %s.' % (version, str(versions))))
keys = ['scale007', 'scale012', 'scale020', 'scale036', 'scale064', 'scale122', 'scale197', 'scale325', 'scale444']
if (version == 'sym'):
url = 'https://ndownloader.figshare.com/files/1861819'
elif (version == 'asym'):
url = 'https://ndownloader.figshare.com/files/1861820'
opts = {'uncompress': True}
dataset_name = 'basc_multiscale_2015'
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose)
folder_name = ('template_cambridge_basc_multiscale_nii_' + version)
basenames = [(((('template_cambridge_basc_multiscale_' + version) + '_') + key) + '.nii.gz') for key in keys]
filenames = [(os.path.join(folder_name, basename), url, opts) for basename in basenames]
data = _fetch_files(data_dir, filenames, resume=resume, verbose=verbose)
descr = _get_dataset_descr(dataset_name)
params = dict(zip(keys, data))
params['description'] = descr
return Bunch(**params)<|docstring|>Downloads and loads multiscale functional brain parcellations
This atlas includes group brain parcellations generated from
resting-state functional magnetic resonance images from about
200 young healthy subjects.
Multiple scales (number of networks) are available, among
7, 12, 20, 36, 64, 122, 197, 325, 444. The brain parcellations
have been generated using a method called bootstrap analysis of
stable clusters called as BASC :footcite:`BELLEC20101126`,
and the scales have been selected using a data-driven method
called MSTEPS :footcite:`Bellec2013Mining`.
Note that two versions of the template are available, 'sym' or 'asym'.
The 'asym' type contains brain images that have been registered in the
asymmetric version of the MNI brain template (reflecting that the brain
is asymmetric), while the 'sym' type contains images registered in the
symmetric version of the MNI template. The symmetric template has been
forced to be symmetric anatomically, and is therefore ideally suited to
study homotopic functional connections in fMRI: finding homotopic regions
simply consists of flipping the x-axis of the template.
.. versionadded:: 0.2.3
Parameters
----------
version : str {'sym', 'asym'}, optional
Available versions are 'sym' or 'asym'. By default all scales of
brain parcellations of version 'sym' will be returned.
Default='sym'.
%(data_dir)s
%(url)s
%(resume)s
%(verbose)s
Returns
-------
data : sklearn.datasets.base.Bunch
Dictionary-like object, Keys are:
- "scale007", "scale012", "scale020", "scale036", "scale064",
"scale122", "scale197", "scale325", "scale444": str, path
to Nifti file of various scales of brain parcellations.
- "description": details about the data release.
References
----------
.. footbibliography::
Notes
-----
For more information on this dataset's structure, see
https://figshare.com/articles/basc/1285615<|endoftext|>
|
0f2ec23f8b881fd2246c4bda7b4fce133938d15606d5e4db56123d7b1069cda3
|
def fetch_coords_dosenbach_2010(ordered_regions=True):
'Load the Dosenbach et al. 160 ROIs. These ROIs cover\n much of the cerebral cortex and cerebellum and are assigned to 6\n networks.\n\n See :footcite:`Dosenbach20101358`.\n\n Parameters\n ----------\n ordered_regions : bool, optional\n ROIs from same networks are grouped together and ordered with respect\n to their names and their locations (anterior to posterior).\n Default=True.\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n Dictionary-like object, contains:\n\n - "rois": coordinates of 160 ROIs in MNI space\n - "labels": ROIs labels\n - "networks": networks names\n\n References\n ----------\n .. footbibliography::\n\n '
dataset_name = 'dosenbach_2010'
fdescr = _get_dataset_descr(dataset_name)
package_directory = os.path.dirname(os.path.abspath(__file__))
csv = os.path.join(package_directory, 'data', 'dosenbach_2010.csv')
out_csv = np.recfromcsv(csv)
if ordered_regions:
out_csv = np.sort(out_csv, order=['network', 'name', 'y'])
names = out_csv['name']
numbers = out_csv['number']
labels = np.array(['{0} {1}'.format(name, number) for (name, number) in zip(names, numbers)])
params = dict(rois=out_csv[['x', 'y', 'z']], labels=labels, networks=out_csv['network'], description=fdescr)
return Bunch(**params)
|
Load the Dosenbach et al. 160 ROIs. These ROIs cover
much of the cerebral cortex and cerebellum and are assigned to 6
networks.
See :footcite:`Dosenbach20101358`.
Parameters
----------
ordered_regions : bool, optional
ROIs from same networks are grouped together and ordered with respect
to their names and their locations (anterior to posterior).
Default=True.
Returns
-------
data : sklearn.datasets.base.Bunch
Dictionary-like object, contains:
- "rois": coordinates of 160 ROIs in MNI space
- "labels": ROIs labels
- "networks": networks names
References
----------
.. footbibliography::
|
nilearn/datasets/atlas.py
|
fetch_coords_dosenbach_2010
|
lemiceterieux/nilearn
| 827 |
python
|
def fetch_coords_dosenbach_2010(ordered_regions=True):
'Load the Dosenbach et al. 160 ROIs. These ROIs cover\n much of the cerebral cortex and cerebellum and are assigned to 6\n networks.\n\n See :footcite:`Dosenbach20101358`.\n\n Parameters\n ----------\n ordered_regions : bool, optional\n ROIs from same networks are grouped together and ordered with respect\n to their names and their locations (anterior to posterior).\n Default=True.\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n Dictionary-like object, contains:\n\n - "rois": coordinates of 160 ROIs in MNI space\n - "labels": ROIs labels\n - "networks": networks names\n\n References\n ----------\n .. footbibliography::\n\n '
dataset_name = 'dosenbach_2010'
fdescr = _get_dataset_descr(dataset_name)
package_directory = os.path.dirname(os.path.abspath(__file__))
csv = os.path.join(package_directory, 'data', 'dosenbach_2010.csv')
out_csv = np.recfromcsv(csv)
if ordered_regions:
out_csv = np.sort(out_csv, order=['network', 'name', 'y'])
names = out_csv['name']
numbers = out_csv['number']
labels = np.array(['{0} {1}'.format(name, number) for (name, number) in zip(names, numbers)])
params = dict(rois=out_csv[['x', 'y', 'z']], labels=labels, networks=out_csv['network'], description=fdescr)
return Bunch(**params)
|
def fetch_coords_dosenbach_2010(ordered_regions=True):
'Load the Dosenbach et al. 160 ROIs. These ROIs cover\n much of the cerebral cortex and cerebellum and are assigned to 6\n networks.\n\n See :footcite:`Dosenbach20101358`.\n\n Parameters\n ----------\n ordered_regions : bool, optional\n ROIs from same networks are grouped together and ordered with respect\n to their names and their locations (anterior to posterior).\n Default=True.\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n Dictionary-like object, contains:\n\n - "rois": coordinates of 160 ROIs in MNI space\n - "labels": ROIs labels\n - "networks": networks names\n\n References\n ----------\n .. footbibliography::\n\n '
dataset_name = 'dosenbach_2010'
fdescr = _get_dataset_descr(dataset_name)
package_directory = os.path.dirname(os.path.abspath(__file__))
csv = os.path.join(package_directory, 'data', 'dosenbach_2010.csv')
out_csv = np.recfromcsv(csv)
if ordered_regions:
out_csv = np.sort(out_csv, order=['network', 'name', 'y'])
names = out_csv['name']
numbers = out_csv['number']
labels = np.array(['{0} {1}'.format(name, number) for (name, number) in zip(names, numbers)])
params = dict(rois=out_csv[['x', 'y', 'z']], labels=labels, networks=out_csv['network'], description=fdescr)
return Bunch(**params)<|docstring|>Load the Dosenbach et al. 160 ROIs. These ROIs cover
much of the cerebral cortex and cerebellum and are assigned to 6
networks.
See :footcite:`Dosenbach20101358`.
Parameters
----------
ordered_regions : bool, optional
ROIs from same networks are grouped together and ordered with respect
to their names and their locations (anterior to posterior).
Default=True.
Returns
-------
data : sklearn.datasets.base.Bunch
Dictionary-like object, contains:
- "rois": coordinates of 160 ROIs in MNI space
- "labels": ROIs labels
- "networks": networks names
References
----------
.. footbibliography::<|endoftext|>
|
f0473783fe21f4c1ad8005129acdf7df688051df1ea43ecee1cf1b21f9124baf
|
def fetch_coords_seitzman_2018(ordered_regions=True):
'Load the Seitzman et al. 300 ROIs. These ROIs cover cortical,\n subcortical and cerebellar regions and are assigned to one of 13\n networks (Auditory, CinguloOpercular, DefaultMode, DorsalAttention,\n FrontoParietal, MedialTemporalLobe, ParietoMedial, Reward, Salience,\n SomatomotorDorsal, SomatomotorLateral, VentralAttention, Visual) and\n have a regional label (cortexL, cortexR, cerebellum, thalamus, hippocampus,\n basalGanglia, amygdala, cortexMid).\n\n See :footcite:`SEITZMAN2020116290`.\n\n .. versionadded:: 0.5.1\n\n Parameters\n ----------\n ordered_regions : bool, optional\n ROIs from same networks are grouped together and ordered with respect\n to their locations (anterior to posterior). Default=True.\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n Dictionary-like object, contains:\n\n - "rois": Coordinates of 300 ROIs in MNI space\n - "radius": Radius of each ROI in mm\n - "networks": Network names\n - "regions": Region names\n\n References\n ----------\n .. footbibliography::\n\n '
dataset_name = 'seitzman_2018'
fdescr = _get_dataset_descr(dataset_name)
package_directory = os.path.dirname(os.path.abspath(__file__))
roi_file = os.path.join(package_directory, 'data', 'seitzman_2018_ROIs_300inVol_MNI_allInfo.txt')
anatomical_file = os.path.join(package_directory, 'data', 'seitzman_2018_ROIs_anatomicalLabels.txt')
rois = np.recfromcsv(roi_file, delimiter=' ')
rois = recfunctions.rename_fields(rois, {'netname': 'network', 'radiusmm': 'radius'})
rois.network = rois.network.astype(str)
with open(anatomical_file, 'r') as fi:
header = fi.readline()
region_mapping = {}
for r in header.strip().split(','):
(i, region) = r.split('=')
region_mapping[int(i)] = region
anatomical = np.genfromtxt(anatomical_file, skip_header=1)
anatomical_names = np.array([region_mapping[a] for a in anatomical])
rois = recfunctions.merge_arrays((rois, anatomical_names), asrecarray=True, flatten=True)
rois.dtype.names = (rois.dtype.names[:(- 1)] + ('region',))
if ordered_regions:
rois = np.sort(rois, order=['network', 'y'])
params = dict(rois=rois[['x', 'y', 'z']], radius=rois['radius'], networks=rois['network'].astype(str), regions=rois['region'], description=fdescr)
return Bunch(**params)
|
Load the Seitzman et al. 300 ROIs. These ROIs cover cortical,
subcortical and cerebellar regions and are assigned to one of 13
networks (Auditory, CinguloOpercular, DefaultMode, DorsalAttention,
FrontoParietal, MedialTemporalLobe, ParietoMedial, Reward, Salience,
SomatomotorDorsal, SomatomotorLateral, VentralAttention, Visual) and
have a regional label (cortexL, cortexR, cerebellum, thalamus, hippocampus,
basalGanglia, amygdala, cortexMid).
See :footcite:`SEITZMAN2020116290`.
.. versionadded:: 0.5.1
Parameters
----------
ordered_regions : bool, optional
ROIs from same networks are grouped together and ordered with respect
to their locations (anterior to posterior). Default=True.
Returns
-------
data : sklearn.datasets.base.Bunch
Dictionary-like object, contains:
- "rois": Coordinates of 300 ROIs in MNI space
- "radius": Radius of each ROI in mm
- "networks": Network names
- "regions": Region names
References
----------
.. footbibliography::
|
nilearn/datasets/atlas.py
|
fetch_coords_seitzman_2018
|
lemiceterieux/nilearn
| 827 |
python
|
def fetch_coords_seitzman_2018(ordered_regions=True):
'Load the Seitzman et al. 300 ROIs. These ROIs cover cortical,\n subcortical and cerebellar regions and are assigned to one of 13\n networks (Auditory, CinguloOpercular, DefaultMode, DorsalAttention,\n FrontoParietal, MedialTemporalLobe, ParietoMedial, Reward, Salience,\n SomatomotorDorsal, SomatomotorLateral, VentralAttention, Visual) and\n have a regional label (cortexL, cortexR, cerebellum, thalamus, hippocampus,\n basalGanglia, amygdala, cortexMid).\n\n See :footcite:`SEITZMAN2020116290`.\n\n .. versionadded:: 0.5.1\n\n Parameters\n ----------\n ordered_regions : bool, optional\n ROIs from same networks are grouped together and ordered with respect\n to their locations (anterior to posterior). Default=True.\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n Dictionary-like object, contains:\n\n - "rois": Coordinates of 300 ROIs in MNI space\n - "radius": Radius of each ROI in mm\n - "networks": Network names\n - "regions": Region names\n\n References\n ----------\n .. footbibliography::\n\n '
dataset_name = 'seitzman_2018'
fdescr = _get_dataset_descr(dataset_name)
package_directory = os.path.dirname(os.path.abspath(__file__))
roi_file = os.path.join(package_directory, 'data', 'seitzman_2018_ROIs_300inVol_MNI_allInfo.txt')
anatomical_file = os.path.join(package_directory, 'data', 'seitzman_2018_ROIs_anatomicalLabels.txt')
rois = np.recfromcsv(roi_file, delimiter=' ')
rois = recfunctions.rename_fields(rois, {'netname': 'network', 'radiusmm': 'radius'})
rois.network = rois.network.astype(str)
with open(anatomical_file, 'r') as fi:
header = fi.readline()
region_mapping = {}
for r in header.strip().split(','):
(i, region) = r.split('=')
region_mapping[int(i)] = region
anatomical = np.genfromtxt(anatomical_file, skip_header=1)
anatomical_names = np.array([region_mapping[a] for a in anatomical])
rois = recfunctions.merge_arrays((rois, anatomical_names), asrecarray=True, flatten=True)
rois.dtype.names = (rois.dtype.names[:(- 1)] + ('region',))
if ordered_regions:
rois = np.sort(rois, order=['network', 'y'])
params = dict(rois=rois[['x', 'y', 'z']], radius=rois['radius'], networks=rois['network'].astype(str), regions=rois['region'], description=fdescr)
return Bunch(**params)
|
def fetch_coords_seitzman_2018(ordered_regions=True):
'Load the Seitzman et al. 300 ROIs. These ROIs cover cortical,\n subcortical and cerebellar regions and are assigned to one of 13\n networks (Auditory, CinguloOpercular, DefaultMode, DorsalAttention,\n FrontoParietal, MedialTemporalLobe, ParietoMedial, Reward, Salience,\n SomatomotorDorsal, SomatomotorLateral, VentralAttention, Visual) and\n have a regional label (cortexL, cortexR, cerebellum, thalamus, hippocampus,\n basalGanglia, amygdala, cortexMid).\n\n See :footcite:`SEITZMAN2020116290`.\n\n .. versionadded:: 0.5.1\n\n Parameters\n ----------\n ordered_regions : bool, optional\n ROIs from same networks are grouped together and ordered with respect\n to their locations (anterior to posterior). Default=True.\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n Dictionary-like object, contains:\n\n - "rois": Coordinates of 300 ROIs in MNI space\n - "radius": Radius of each ROI in mm\n - "networks": Network names\n - "regions": Region names\n\n References\n ----------\n .. footbibliography::\n\n '
dataset_name = 'seitzman_2018'
fdescr = _get_dataset_descr(dataset_name)
package_directory = os.path.dirname(os.path.abspath(__file__))
roi_file = os.path.join(package_directory, 'data', 'seitzman_2018_ROIs_300inVol_MNI_allInfo.txt')
anatomical_file = os.path.join(package_directory, 'data', 'seitzman_2018_ROIs_anatomicalLabels.txt')
rois = np.recfromcsv(roi_file, delimiter=' ')
rois = recfunctions.rename_fields(rois, {'netname': 'network', 'radiusmm': 'radius'})
rois.network = rois.network.astype(str)
with open(anatomical_file, 'r') as fi:
header = fi.readline()
region_mapping = {}
for r in header.strip().split(','):
(i, region) = r.split('=')
region_mapping[int(i)] = region
anatomical = np.genfromtxt(anatomical_file, skip_header=1)
anatomical_names = np.array([region_mapping[a] for a in anatomical])
rois = recfunctions.merge_arrays((rois, anatomical_names), asrecarray=True, flatten=True)
rois.dtype.names = (rois.dtype.names[:(- 1)] + ('region',))
if ordered_regions:
rois = np.sort(rois, order=['network', 'y'])
params = dict(rois=rois[['x', 'y', 'z']], radius=rois['radius'], networks=rois['network'].astype(str), regions=rois['region'], description=fdescr)
return Bunch(**params)<|docstring|>Load the Seitzman et al. 300 ROIs. These ROIs cover cortical,
subcortical and cerebellar regions and are assigned to one of 13
networks (Auditory, CinguloOpercular, DefaultMode, DorsalAttention,
FrontoParietal, MedialTemporalLobe, ParietoMedial, Reward, Salience,
SomatomotorDorsal, SomatomotorLateral, VentralAttention, Visual) and
have a regional label (cortexL, cortexR, cerebellum, thalamus, hippocampus,
basalGanglia, amygdala, cortexMid).
See :footcite:`SEITZMAN2020116290`.
.. versionadded:: 0.5.1
Parameters
----------
ordered_regions : bool, optional
ROIs from same networks are grouped together and ordered with respect
to their locations (anterior to posterior). Default=True.
Returns
-------
data : sklearn.datasets.base.Bunch
Dictionary-like object, contains:
- "rois": Coordinates of 300 ROIs in MNI space
- "radius": Radius of each ROI in mm
- "networks": Network names
- "regions": Region names
References
----------
.. footbibliography::<|endoftext|>
|
f2186b3112fc21fac613bbcb01d4a3025800a649cdbbb656638f36bf3ef8bb29
|
@fill_doc
def fetch_atlas_allen_2011(data_dir=None, url=None, resume=True, verbose=1):
'Download and return file names for the Allen and MIALAB ICA atlas\n (dated 2011).\n\n See :footcite:`Allen2011baseline`.\n\n The provided images are in MNI152 space.\n\n Parameters\n ----------\n %(data_dir)s\n %(url)s\n %(resume)s\n %(verbose)s\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n Dictionary-like object, keys are:\n\n - "maps": T-maps of all 75 unthresholded components.\n - "rsn28": T-maps of 28 RSNs included in E. Allen et al.\n - "networks": string list containing the names for the 28 RSNs.\n - "rsn_indices": dict[rsn_name] -> list of int, indices in the "maps"\n file of the 28 RSNs.\n - "comps": The aggregate ICA Components.\n - "description": details about the data release.\n\n References\n ----------\n .. footbibliography::\n\n Notes\n -----\n Licence: unknown\n\n See http://mialab.mrn.org/data/index.html for more information\n on this dataset.\n\n '
if (url is None):
url = 'https://osf.io/hrcku/download'
dataset_name = 'allen_rsn_2011'
keys = ('maps', 'rsn28', 'comps')
opts = {'uncompress': True}
files = ['ALL_HC_unthresholded_tmaps.nii.gz', 'RSN_HC_unthresholded_tmaps.nii.gz', 'rest_hcp_agg__component_ica_.nii.gz']
labels = [('Basal Ganglia', [21]), ('Auditory', [17]), ('Sensorimotor', [7, 23, 24, 38, 56, 29]), ('Visual', [46, 64, 67, 48, 39, 59]), ('Default-Mode', [50, 53, 25, 68]), ('Attentional', [34, 60, 52, 72, 71, 55]), ('Frontal', [42, 20, 47, 49])]
networks = [([name] * len(idxs)) for (name, idxs) in labels]
filenames = [(os.path.join('allen_rsn_2011', f), url, opts) for f in files]
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose)
sub_files = _fetch_files(data_dir, filenames, resume=resume, verbose=verbose)
fdescr = _get_dataset_descr(dataset_name)
params = [('description', fdescr), ('rsn_indices', labels), ('networks', networks)]
params.extend(list(zip(keys, sub_files)))
return Bunch(**dict(params))
|
Download and return file names for the Allen and MIALAB ICA atlas
(dated 2011).
See :footcite:`Allen2011baseline`.
The provided images are in MNI152 space.
Parameters
----------
%(data_dir)s
%(url)s
%(resume)s
%(verbose)s
Returns
-------
data : sklearn.datasets.base.Bunch
Dictionary-like object, keys are:
- "maps": T-maps of all 75 unthresholded components.
- "rsn28": T-maps of 28 RSNs included in E. Allen et al.
- "networks": string list containing the names for the 28 RSNs.
- "rsn_indices": dict[rsn_name] -> list of int, indices in the "maps"
file of the 28 RSNs.
- "comps": The aggregate ICA Components.
- "description": details about the data release.
References
----------
.. footbibliography::
Notes
-----
Licence: unknown
See http://mialab.mrn.org/data/index.html for more information
on this dataset.
|
nilearn/datasets/atlas.py
|
fetch_atlas_allen_2011
|
lemiceterieux/nilearn
| 827 |
python
|
@fill_doc
def fetch_atlas_allen_2011(data_dir=None, url=None, resume=True, verbose=1):
'Download and return file names for the Allen and MIALAB ICA atlas\n (dated 2011).\n\n See :footcite:`Allen2011baseline`.\n\n The provided images are in MNI152 space.\n\n Parameters\n ----------\n %(data_dir)s\n %(url)s\n %(resume)s\n %(verbose)s\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n Dictionary-like object, keys are:\n\n - "maps": T-maps of all 75 unthresholded components.\n - "rsn28": T-maps of 28 RSNs included in E. Allen et al.\n - "networks": string list containing the names for the 28 RSNs.\n - "rsn_indices": dict[rsn_name] -> list of int, indices in the "maps"\n file of the 28 RSNs.\n - "comps": The aggregate ICA Components.\n - "description": details about the data release.\n\n References\n ----------\n .. footbibliography::\n\n Notes\n -----\n Licence: unknown\n\n See http://mialab.mrn.org/data/index.html for more information\n on this dataset.\n\n '
if (url is None):
url = 'https://osf.io/hrcku/download'
dataset_name = 'allen_rsn_2011'
keys = ('maps', 'rsn28', 'comps')
opts = {'uncompress': True}
files = ['ALL_HC_unthresholded_tmaps.nii.gz', 'RSN_HC_unthresholded_tmaps.nii.gz', 'rest_hcp_agg__component_ica_.nii.gz']
labels = [('Basal Ganglia', [21]), ('Auditory', [17]), ('Sensorimotor', [7, 23, 24, 38, 56, 29]), ('Visual', [46, 64, 67, 48, 39, 59]), ('Default-Mode', [50, 53, 25, 68]), ('Attentional', [34, 60, 52, 72, 71, 55]), ('Frontal', [42, 20, 47, 49])]
networks = [([name] * len(idxs)) for (name, idxs) in labels]
filenames = [(os.path.join('allen_rsn_2011', f), url, opts) for f in files]
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose)
sub_files = _fetch_files(data_dir, filenames, resume=resume, verbose=verbose)
fdescr = _get_dataset_descr(dataset_name)
params = [('description', fdescr), ('rsn_indices', labels), ('networks', networks)]
params.extend(list(zip(keys, sub_files)))
return Bunch(**dict(params))
|
@fill_doc
def fetch_atlas_allen_2011(data_dir=None, url=None, resume=True, verbose=1):
'Download and return file names for the Allen and MIALAB ICA atlas\n (dated 2011).\n\n See :footcite:`Allen2011baseline`.\n\n The provided images are in MNI152 space.\n\n Parameters\n ----------\n %(data_dir)s\n %(url)s\n %(resume)s\n %(verbose)s\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n Dictionary-like object, keys are:\n\n - "maps": T-maps of all 75 unthresholded components.\n - "rsn28": T-maps of 28 RSNs included in E. Allen et al.\n - "networks": string list containing the names for the 28 RSNs.\n - "rsn_indices": dict[rsn_name] -> list of int, indices in the "maps"\n file of the 28 RSNs.\n - "comps": The aggregate ICA Components.\n - "description": details about the data release.\n\n References\n ----------\n .. footbibliography::\n\n Notes\n -----\n Licence: unknown\n\n See http://mialab.mrn.org/data/index.html for more information\n on this dataset.\n\n '
if (url is None):
url = 'https://osf.io/hrcku/download'
dataset_name = 'allen_rsn_2011'
keys = ('maps', 'rsn28', 'comps')
opts = {'uncompress': True}
files = ['ALL_HC_unthresholded_tmaps.nii.gz', 'RSN_HC_unthresholded_tmaps.nii.gz', 'rest_hcp_agg__component_ica_.nii.gz']
labels = [('Basal Ganglia', [21]), ('Auditory', [17]), ('Sensorimotor', [7, 23, 24, 38, 56, 29]), ('Visual', [46, 64, 67, 48, 39, 59]), ('Default-Mode', [50, 53, 25, 68]), ('Attentional', [34, 60, 52, 72, 71, 55]), ('Frontal', [42, 20, 47, 49])]
networks = [([name] * len(idxs)) for (name, idxs) in labels]
filenames = [(os.path.join('allen_rsn_2011', f), url, opts) for f in files]
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose)
sub_files = _fetch_files(data_dir, filenames, resume=resume, verbose=verbose)
fdescr = _get_dataset_descr(dataset_name)
params = [('description', fdescr), ('rsn_indices', labels), ('networks', networks)]
params.extend(list(zip(keys, sub_files)))
return Bunch(**dict(params))<|docstring|>Download and return file names for the Allen and MIALAB ICA atlas
(dated 2011).
See :footcite:`Allen2011baseline`.
The provided images are in MNI152 space.
Parameters
----------
%(data_dir)s
%(url)s
%(resume)s
%(verbose)s
Returns
-------
data : sklearn.datasets.base.Bunch
Dictionary-like object, keys are:
- "maps": T-maps of all 75 unthresholded components.
- "rsn28": T-maps of 28 RSNs included in E. Allen et al.
- "networks": string list containing the names for the 28 RSNs.
- "rsn_indices": dict[rsn_name] -> list of int, indices in the "maps"
file of the 28 RSNs.
- "comps": The aggregate ICA Components.
- "description": details about the data release.
References
----------
.. footbibliography::
Notes
-----
Licence: unknown
See http://mialab.mrn.org/data/index.html for more information
on this dataset.<|endoftext|>
|
a136a23bc4919b98d7fa94ca5df01a3df1761d87c766867b8c728542f5ea9983
|
@fill_doc
def fetch_atlas_surf_destrieux(data_dir=None, url=None, resume=True, verbose=1):
'Download and load Destrieux et al, 2010 cortical atlas\n\n See :footcite:`DESTRIEUX20101`.\n\n This atlas returns 76 labels per hemisphere based on sulco-gryal patterns\n as distributed with Freesurfer in fsaverage5 surface space.\n\n .. versionadded:: 0.3\n\n Parameters\n ----------\n %(data_dir)s\n %(url)s\n %(resume)s\n %(verbose)s\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n Dictionary-like object, contains:\n\n - "labels": list\n Contains region labels\n\n - "map_left": numpy.ndarray\n Index into \'labels\' for each vertex on the\n left hemisphere of the fsaverage5 surface\n\n - "map_right": numpy.ndarray\n Index into \'labels\' for each vertex on the\n right hemisphere of the fsaverage5 surface\n\n - "description": str\n Details about the dataset\n\n References\n ----------\n .. footbibliography::\n\n '
if (url is None):
url = 'https://www.nitrc.org/frs/download.php/'
dataset_name = 'destrieux_surface'
fdescr = _get_dataset_descr(dataset_name)
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose)
annot_file = '%s.aparc.a2009s.annot'
annot_url = (url + '%i/%s.aparc.a2009s.annot')
annot_nids = {'lh annot': 9343, 'rh annot': 9342}
annots = []
for hemi in [('lh', 'left'), ('rh', 'right')]:
annot = _fetch_files(data_dir, [((annot_file % hemi[1]), (annot_url % (annot_nids[('%s annot' % hemi[0])], hemi[0])), {'move': (annot_file % hemi[1])})], resume=resume, verbose=verbose)[0]
annots.append(annot)
annot_left = nb.freesurfer.read_annot(annots[0])
annot_right = nb.freesurfer.read_annot(annots[1])
return Bunch(labels=annot_left[2], map_left=annot_left[0], map_right=annot_right[0], description=fdescr)
|
Download and load Destrieux et al, 2010 cortical atlas
See :footcite:`DESTRIEUX20101`.
This atlas returns 76 labels per hemisphere based on sulco-gryal patterns
as distributed with Freesurfer in fsaverage5 surface space.
.. versionadded:: 0.3
Parameters
----------
%(data_dir)s
%(url)s
%(resume)s
%(verbose)s
Returns
-------
data : sklearn.datasets.base.Bunch
Dictionary-like object, contains:
- "labels": list
Contains region labels
- "map_left": numpy.ndarray
Index into 'labels' for each vertex on the
left hemisphere of the fsaverage5 surface
- "map_right": numpy.ndarray
Index into 'labels' for each vertex on the
right hemisphere of the fsaverage5 surface
- "description": str
Details about the dataset
References
----------
.. footbibliography::
|
nilearn/datasets/atlas.py
|
fetch_atlas_surf_destrieux
|
lemiceterieux/nilearn
| 827 |
python
|
@fill_doc
def fetch_atlas_surf_destrieux(data_dir=None, url=None, resume=True, verbose=1):
'Download and load Destrieux et al, 2010 cortical atlas\n\n See :footcite:`DESTRIEUX20101`.\n\n This atlas returns 76 labels per hemisphere based on sulco-gryal patterns\n as distributed with Freesurfer in fsaverage5 surface space.\n\n .. versionadded:: 0.3\n\n Parameters\n ----------\n %(data_dir)s\n %(url)s\n %(resume)s\n %(verbose)s\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n Dictionary-like object, contains:\n\n - "labels": list\n Contains region labels\n\n - "map_left": numpy.ndarray\n Index into \'labels\' for each vertex on the\n left hemisphere of the fsaverage5 surface\n\n - "map_right": numpy.ndarray\n Index into \'labels\' for each vertex on the\n right hemisphere of the fsaverage5 surface\n\n - "description": str\n Details about the dataset\n\n References\n ----------\n .. footbibliography::\n\n '
if (url is None):
url = 'https://www.nitrc.org/frs/download.php/'
dataset_name = 'destrieux_surface'
fdescr = _get_dataset_descr(dataset_name)
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose)
annot_file = '%s.aparc.a2009s.annot'
annot_url = (url + '%i/%s.aparc.a2009s.annot')
annot_nids = {'lh annot': 9343, 'rh annot': 9342}
annots = []
for hemi in [('lh', 'left'), ('rh', 'right')]:
annot = _fetch_files(data_dir, [((annot_file % hemi[1]), (annot_url % (annot_nids[('%s annot' % hemi[0])], hemi[0])), {'move': (annot_file % hemi[1])})], resume=resume, verbose=verbose)[0]
annots.append(annot)
annot_left = nb.freesurfer.read_annot(annots[0])
annot_right = nb.freesurfer.read_annot(annots[1])
return Bunch(labels=annot_left[2], map_left=annot_left[0], map_right=annot_right[0], description=fdescr)
|
@fill_doc
def fetch_atlas_surf_destrieux(data_dir=None, url=None, resume=True, verbose=1):
'Download and load Destrieux et al, 2010 cortical atlas\n\n See :footcite:`DESTRIEUX20101`.\n\n This atlas returns 76 labels per hemisphere based on sulco-gryal patterns\n as distributed with Freesurfer in fsaverage5 surface space.\n\n .. versionadded:: 0.3\n\n Parameters\n ----------\n %(data_dir)s\n %(url)s\n %(resume)s\n %(verbose)s\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n Dictionary-like object, contains:\n\n - "labels": list\n Contains region labels\n\n - "map_left": numpy.ndarray\n Index into \'labels\' for each vertex on the\n left hemisphere of the fsaverage5 surface\n\n - "map_right": numpy.ndarray\n Index into \'labels\' for each vertex on the\n right hemisphere of the fsaverage5 surface\n\n - "description": str\n Details about the dataset\n\n References\n ----------\n .. footbibliography::\n\n '
if (url is None):
url = 'https://www.nitrc.org/frs/download.php/'
dataset_name = 'destrieux_surface'
fdescr = _get_dataset_descr(dataset_name)
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose)
annot_file = '%s.aparc.a2009s.annot'
annot_url = (url + '%i/%s.aparc.a2009s.annot')
annot_nids = {'lh annot': 9343, 'rh annot': 9342}
annots = []
for hemi in [('lh', 'left'), ('rh', 'right')]:
annot = _fetch_files(data_dir, [((annot_file % hemi[1]), (annot_url % (annot_nids[('%s annot' % hemi[0])], hemi[0])), {'move': (annot_file % hemi[1])})], resume=resume, verbose=verbose)[0]
annots.append(annot)
annot_left = nb.freesurfer.read_annot(annots[0])
annot_right = nb.freesurfer.read_annot(annots[1])
return Bunch(labels=annot_left[2], map_left=annot_left[0], map_right=annot_right[0], description=fdescr)<|docstring|>Download and load Destrieux et al, 2010 cortical atlas
See :footcite:`DESTRIEUX20101`.
This atlas returns 76 labels per hemisphere based on sulco-gryal patterns
as distributed with Freesurfer in fsaverage5 surface space.
.. versionadded:: 0.3
Parameters
----------
%(data_dir)s
%(url)s
%(resume)s
%(verbose)s
Returns
-------
data : sklearn.datasets.base.Bunch
Dictionary-like object, contains:
- "labels": list
Contains region labels
- "map_left": numpy.ndarray
Index into 'labels' for each vertex on the
left hemisphere of the fsaverage5 surface
- "map_right": numpy.ndarray
Index into 'labels' for each vertex on the
right hemisphere of the fsaverage5 surface
- "description": str
Details about the dataset
References
----------
.. footbibliography::<|endoftext|>
|
ca0385ec311ee7449242ea85f26d181812baa4592b9b34fe28261c52dee88d3e
|
def _separate_talairach_levels(atlas_img, labels, verbose=1):
"Separate the multiple annotation levels in talairach raw atlas.\n\n The Talairach atlas has five levels of annotation: hemisphere, lobe, gyrus,\n tissue, brodmann area. They are mixed up in the original atlas: each label\n in the atlas corresponds to a 5-tuple containing, for each of these levels,\n a value or the string '*' (meaning undefined, background).\n\n This function disentangles the levels, and stores each on an octet in an\n int64 image (the level with most labels, ba, has 72 labels).\n This way, any subset of these levels can be accessed by applying a bitwise\n mask.\n\n In the created image, the least significant octet contains the hemisphere,\n the next one the lobe, then gyrus, tissue, and ba. Background is 0.\n The labels contain\n [('level name', ['labels', 'for', 'this', 'level' ...]), ...],\n where the levels are in the order mentioned above.\n\n The label '*' is replaced by 'Background' for clarity.\n\n "
labels = np.asarray(labels)
if verbose:
print('Separating talairach atlas levels: {}'.format(_TALAIRACH_LEVELS))
levels = []
new_img = np.zeros(atlas_img.shape, dtype=np.int64)
for (pos, level) in enumerate(_TALAIRACH_LEVELS):
if verbose:
print(level)
level_img = np.zeros(atlas_img.shape, dtype=np.int64)
level_labels = {'*': 0}
for (region_nb, region) in enumerate(labels[(:, pos)]):
level_labels.setdefault(region, len(level_labels))
level_img[(get_data(atlas_img) == region_nb)] = level_labels[region]
level_img <<= (8 * pos)
new_img |= level_img
level_labels = list(list(zip(*sorted(level_labels.items(), key=(lambda t: t[1]))))[0])
level_labels[0] = 'Background'
levels.append((level, level_labels))
new_img = new_img_like(atlas_img, data=new_img)
return (new_img, levels)
|
Separate the multiple annotation levels in talairach raw atlas.
The Talairach atlas has five levels of annotation: hemisphere, lobe, gyrus,
tissue, brodmann area. They are mixed up in the original atlas: each label
in the atlas corresponds to a 5-tuple containing, for each of these levels,
a value or the string '*' (meaning undefined, background).
This function disentangles the levels, and stores each on an octet in an
int64 image (the level with most labels, ba, has 72 labels).
This way, any subset of these levels can be accessed by applying a bitwise
mask.
In the created image, the least significant octet contains the hemisphere,
the next one the lobe, then gyrus, tissue, and ba. Background is 0.
The labels contain
[('level name', ['labels', 'for', 'this', 'level' ...]), ...],
where the levels are in the order mentioned above.
The label '*' is replaced by 'Background' for clarity.
|
nilearn/datasets/atlas.py
|
_separate_talairach_levels
|
lemiceterieux/nilearn
| 827 |
python
|
def _separate_talairach_levels(atlas_img, labels, verbose=1):
"Separate the multiple annotation levels in talairach raw atlas.\n\n The Talairach atlas has five levels of annotation: hemisphere, lobe, gyrus,\n tissue, brodmann area. They are mixed up in the original atlas: each label\n in the atlas corresponds to a 5-tuple containing, for each of these levels,\n a value or the string '*' (meaning undefined, background).\n\n This function disentangles the levels, and stores each on an octet in an\n int64 image (the level with most labels, ba, has 72 labels).\n This way, any subset of these levels can be accessed by applying a bitwise\n mask.\n\n In the created image, the least significant octet contains the hemisphere,\n the next one the lobe, then gyrus, tissue, and ba. Background is 0.\n The labels contain\n [('level name', ['labels', 'for', 'this', 'level' ...]), ...],\n where the levels are in the order mentioned above.\n\n The label '*' is replaced by 'Background' for clarity.\n\n "
labels = np.asarray(labels)
if verbose:
print('Separating talairach atlas levels: {}'.format(_TALAIRACH_LEVELS))
levels = []
new_img = np.zeros(atlas_img.shape, dtype=np.int64)
for (pos, level) in enumerate(_TALAIRACH_LEVELS):
if verbose:
print(level)
level_img = np.zeros(atlas_img.shape, dtype=np.int64)
level_labels = {'*': 0}
for (region_nb, region) in enumerate(labels[(:, pos)]):
level_labels.setdefault(region, len(level_labels))
level_img[(get_data(atlas_img) == region_nb)] = level_labels[region]
level_img <<= (8 * pos)
new_img |= level_img
level_labels = list(list(zip(*sorted(level_labels.items(), key=(lambda t: t[1]))))[0])
level_labels[0] = 'Background'
levels.append((level, level_labels))
new_img = new_img_like(atlas_img, data=new_img)
return (new_img, levels)
|
def _separate_talairach_levels(atlas_img, labels, verbose=1):
"Separate the multiple annotation levels in talairach raw atlas.\n\n The Talairach atlas has five levels of annotation: hemisphere, lobe, gyrus,\n tissue, brodmann area. They are mixed up in the original atlas: each label\n in the atlas corresponds to a 5-tuple containing, for each of these levels,\n a value or the string '*' (meaning undefined, background).\n\n This function disentangles the levels, and stores each on an octet in an\n int64 image (the level with most labels, ba, has 72 labels).\n This way, any subset of these levels can be accessed by applying a bitwise\n mask.\n\n In the created image, the least significant octet contains the hemisphere,\n the next one the lobe, then gyrus, tissue, and ba. Background is 0.\n The labels contain\n [('level name', ['labels', 'for', 'this', 'level' ...]), ...],\n where the levels are in the order mentioned above.\n\n The label '*' is replaced by 'Background' for clarity.\n\n "
labels = np.asarray(labels)
if verbose:
print('Separating talairach atlas levels: {}'.format(_TALAIRACH_LEVELS))
levels = []
new_img = np.zeros(atlas_img.shape, dtype=np.int64)
for (pos, level) in enumerate(_TALAIRACH_LEVELS):
if verbose:
print(level)
level_img = np.zeros(atlas_img.shape, dtype=np.int64)
level_labels = {'*': 0}
for (region_nb, region) in enumerate(labels[(:, pos)]):
level_labels.setdefault(region, len(level_labels))
level_img[(get_data(atlas_img) == region_nb)] = level_labels[region]
level_img <<= (8 * pos)
new_img |= level_img
level_labels = list(list(zip(*sorted(level_labels.items(), key=(lambda t: t[1]))))[0])
level_labels[0] = 'Background'
levels.append((level, level_labels))
new_img = new_img_like(atlas_img, data=new_img)
return (new_img, levels)<|docstring|>Separate the multiple annotation levels in talairach raw atlas.
The Talairach atlas has five levels of annotation: hemisphere, lobe, gyrus,
tissue, brodmann area. They are mixed up in the original atlas: each label
in the atlas corresponds to a 5-tuple containing, for each of these levels,
a value or the string '*' (meaning undefined, background).
This function disentangles the levels, and stores each on an octet in an
int64 image (the level with most labels, ba, has 72 labels).
This way, any subset of these levels can be accessed by applying a bitwise
mask.
In the created image, the least significant octet contains the hemisphere,
the next one the lobe, then gyrus, tissue, and ba. Background is 0.
The labels contain
[('level name', ['labels', 'for', 'this', 'level' ...]), ...],
where the levels are in the order mentioned above.
The label '*' is replaced by 'Background' for clarity.<|endoftext|>
|
801ad79ec8bfc7e3e95817c12d4c34d2828917502a062ff0b7aefe7f5abf79ab
|
def _get_talairach_all_levels(data_dir=None, verbose=1):
"Get the path to Talairach atlas and labels\n\n The atlas is downloaded and the files are created if necessary.\n\n The image contains all five levels of the atlas, each encoded on 8 bits\n (least significant octet contains the hemisphere, the next one the lobe,\n then gyrus, tissue, and ba).\n\n The labels json file contains\n [['level name', ['labels', 'for', 'this', 'level' ...]], ...],\n where the levels are in the order mentioned above.\n\n "
data_dir = _get_dataset_dir('talairach_atlas', data_dir=data_dir, verbose=verbose)
img_file = os.path.join(data_dir, 'talairach.nii')
labels_file = os.path.join(data_dir, 'talairach_labels.json')
if (os.path.isfile(img_file) and os.path.isfile(labels_file)):
return (img_file, labels_file)
atlas_url = 'http://www.talairach.org/talairach.nii'
temp_dir = mkdtemp()
try:
temp_file = _fetch_files(temp_dir, [('talairach.nii', atlas_url, {})], verbose=verbose)[0]
atlas_img = nb.load(temp_file, mmap=False)
atlas_img = check_niimg(atlas_img)
finally:
shutil.rmtree(temp_dir)
labels = atlas_img.header.extensions[0].get_content()
labels = labels.strip().decode('utf-8').split('\n')
labels = [l.split('.') for l in labels]
(new_img, level_labels) = _separate_talairach_levels(atlas_img, labels, verbose=verbose)
new_img.to_filename(img_file)
with open(labels_file, 'w') as fp:
json.dump(level_labels, fp)
return (img_file, labels_file)
|
Get the path to Talairach atlas and labels
The atlas is downloaded and the files are created if necessary.
The image contains all five levels of the atlas, each encoded on 8 bits
(least significant octet contains the hemisphere, the next one the lobe,
then gyrus, tissue, and ba).
The labels json file contains
[['level name', ['labels', 'for', 'this', 'level' ...]], ...],
where the levels are in the order mentioned above.
|
nilearn/datasets/atlas.py
|
_get_talairach_all_levels
|
lemiceterieux/nilearn
| 827 |
python
|
def _get_talairach_all_levels(data_dir=None, verbose=1):
"Get the path to Talairach atlas and labels\n\n The atlas is downloaded and the files are created if necessary.\n\n The image contains all five levels of the atlas, each encoded on 8 bits\n (least significant octet contains the hemisphere, the next one the lobe,\n then gyrus, tissue, and ba).\n\n The labels json file contains\n [['level name', ['labels', 'for', 'this', 'level' ...]], ...],\n where the levels are in the order mentioned above.\n\n "
data_dir = _get_dataset_dir('talairach_atlas', data_dir=data_dir, verbose=verbose)
img_file = os.path.join(data_dir, 'talairach.nii')
labels_file = os.path.join(data_dir, 'talairach_labels.json')
if (os.path.isfile(img_file) and os.path.isfile(labels_file)):
return (img_file, labels_file)
atlas_url = 'http://www.talairach.org/talairach.nii'
temp_dir = mkdtemp()
try:
temp_file = _fetch_files(temp_dir, [('talairach.nii', atlas_url, {})], verbose=verbose)[0]
atlas_img = nb.load(temp_file, mmap=False)
atlas_img = check_niimg(atlas_img)
finally:
shutil.rmtree(temp_dir)
labels = atlas_img.header.extensions[0].get_content()
labels = labels.strip().decode('utf-8').split('\n')
labels = [l.split('.') for l in labels]
(new_img, level_labels) = _separate_talairach_levels(atlas_img, labels, verbose=verbose)
new_img.to_filename(img_file)
with open(labels_file, 'w') as fp:
json.dump(level_labels, fp)
return (img_file, labels_file)
|
def _get_talairach_all_levels(data_dir=None, verbose=1):
"Get the path to Talairach atlas and labels\n\n The atlas is downloaded and the files are created if necessary.\n\n The image contains all five levels of the atlas, each encoded on 8 bits\n (least significant octet contains the hemisphere, the next one the lobe,\n then gyrus, tissue, and ba).\n\n The labels json file contains\n [['level name', ['labels', 'for', 'this', 'level' ...]], ...],\n where the levels are in the order mentioned above.\n\n "
data_dir = _get_dataset_dir('talairach_atlas', data_dir=data_dir, verbose=verbose)
img_file = os.path.join(data_dir, 'talairach.nii')
labels_file = os.path.join(data_dir, 'talairach_labels.json')
if (os.path.isfile(img_file) and os.path.isfile(labels_file)):
return (img_file, labels_file)
atlas_url = 'http://www.talairach.org/talairach.nii'
temp_dir = mkdtemp()
try:
temp_file = _fetch_files(temp_dir, [('talairach.nii', atlas_url, {})], verbose=verbose)[0]
atlas_img = nb.load(temp_file, mmap=False)
atlas_img = check_niimg(atlas_img)
finally:
shutil.rmtree(temp_dir)
labels = atlas_img.header.extensions[0].get_content()
labels = labels.strip().decode('utf-8').split('\n')
labels = [l.split('.') for l in labels]
(new_img, level_labels) = _separate_talairach_levels(atlas_img, labels, verbose=verbose)
new_img.to_filename(img_file)
with open(labels_file, 'w') as fp:
json.dump(level_labels, fp)
return (img_file, labels_file)<|docstring|>Get the path to Talairach atlas and labels
The atlas is downloaded and the files are created if necessary.
The image contains all five levels of the atlas, each encoded on 8 bits
(least significant octet contains the hemisphere, the next one the lobe,
then gyrus, tissue, and ba).
The labels json file contains
[['level name', ['labels', 'for', 'this', 'level' ...]], ...],
where the levels are in the order mentioned above.<|endoftext|>
|
4745d62f85f876f53706e3dc9720d7054dc39c7b71edf2c53b818df8c4909d2e
|
@fill_doc
def fetch_atlas_talairach(level_name, data_dir=None, verbose=1):
"Download the Talairach atlas.\n\n For more information, see :footcite:`talairach_atlas`,\n :footcite:`Lancaster2000Talairach`,\n and :footcite:`Lancaster1997labeling`.\n\n .. versionadded:: 0.4.0\n\n Parameters\n ----------\n level_name : string {'hemisphere', 'lobe', 'gyrus', 'tissue', 'ba'}\n Which level of the atlas to use: the hemisphere, the lobe, the gyrus,\n the tissue type or the Brodmann area.\n %(data_dir)s\n %(verbose)s\n\n Returns\n -------\n sklearn.datasets.base.Bunch\n Dictionary-like object, contains:\n\n - maps: 3D Nifti image, values are indices in the list of labels.\n - labels: list of strings. Starts with 'Background'.\n - description: a short description of the atlas and some references.\n\n References\n ----------\n .. footbibliography::\n\n "
if (level_name not in _TALAIRACH_LEVELS):
raise ValueError('"level_name" should be one of {}'.format(_TALAIRACH_LEVELS))
position = _TALAIRACH_LEVELS.index(level_name)
(atlas_file, labels_file) = _get_talairach_all_levels(data_dir, verbose)
atlas_img = check_niimg(atlas_file)
with open(labels_file) as fp:
labels = json.load(fp)[position][1]
level_data = ((get_data(atlas_img) >> (8 * position)) & 255)
atlas_img = new_img_like(atlas_img, data=level_data)
description = _get_dataset_descr('talairach_atlas').decode('utf-8').format(level_name)
return Bunch(maps=atlas_img, labels=labels, description=description)
|
Download the Talairach atlas.
For more information, see :footcite:`talairach_atlas`,
:footcite:`Lancaster2000Talairach`,
and :footcite:`Lancaster1997labeling`.
.. versionadded:: 0.4.0
Parameters
----------
level_name : string {'hemisphere', 'lobe', 'gyrus', 'tissue', 'ba'}
Which level of the atlas to use: the hemisphere, the lobe, the gyrus,
the tissue type or the Brodmann area.
%(data_dir)s
%(verbose)s
Returns
-------
sklearn.datasets.base.Bunch
Dictionary-like object, contains:
- maps: 3D Nifti image, values are indices in the list of labels.
- labels: list of strings. Starts with 'Background'.
- description: a short description of the atlas and some references.
References
----------
.. footbibliography::
|
nilearn/datasets/atlas.py
|
fetch_atlas_talairach
|
lemiceterieux/nilearn
| 827 |
python
|
@fill_doc
def fetch_atlas_talairach(level_name, data_dir=None, verbose=1):
"Download the Talairach atlas.\n\n For more information, see :footcite:`talairach_atlas`,\n :footcite:`Lancaster2000Talairach`,\n and :footcite:`Lancaster1997labeling`.\n\n .. versionadded:: 0.4.0\n\n Parameters\n ----------\n level_name : string {'hemisphere', 'lobe', 'gyrus', 'tissue', 'ba'}\n Which level of the atlas to use: the hemisphere, the lobe, the gyrus,\n the tissue type or the Brodmann area.\n %(data_dir)s\n %(verbose)s\n\n Returns\n -------\n sklearn.datasets.base.Bunch\n Dictionary-like object, contains:\n\n - maps: 3D Nifti image, values are indices in the list of labels.\n - labels: list of strings. Starts with 'Background'.\n - description: a short description of the atlas and some references.\n\n References\n ----------\n .. footbibliography::\n\n "
if (level_name not in _TALAIRACH_LEVELS):
raise ValueError('"level_name" should be one of {}'.format(_TALAIRACH_LEVELS))
position = _TALAIRACH_LEVELS.index(level_name)
(atlas_file, labels_file) = _get_talairach_all_levels(data_dir, verbose)
atlas_img = check_niimg(atlas_file)
with open(labels_file) as fp:
labels = json.load(fp)[position][1]
level_data = ((get_data(atlas_img) >> (8 * position)) & 255)
atlas_img = new_img_like(atlas_img, data=level_data)
description = _get_dataset_descr('talairach_atlas').decode('utf-8').format(level_name)
return Bunch(maps=atlas_img, labels=labels, description=description)
|
@fill_doc
def fetch_atlas_talairach(level_name, data_dir=None, verbose=1):
"Download the Talairach atlas.\n\n For more information, see :footcite:`talairach_atlas`,\n :footcite:`Lancaster2000Talairach`,\n and :footcite:`Lancaster1997labeling`.\n\n .. versionadded:: 0.4.0\n\n Parameters\n ----------\n level_name : string {'hemisphere', 'lobe', 'gyrus', 'tissue', 'ba'}\n Which level of the atlas to use: the hemisphere, the lobe, the gyrus,\n the tissue type or the Brodmann area.\n %(data_dir)s\n %(verbose)s\n\n Returns\n -------\n sklearn.datasets.base.Bunch\n Dictionary-like object, contains:\n\n - maps: 3D Nifti image, values are indices in the list of labels.\n - labels: list of strings. Starts with 'Background'.\n - description: a short description of the atlas and some references.\n\n References\n ----------\n .. footbibliography::\n\n "
if (level_name not in _TALAIRACH_LEVELS):
raise ValueError('"level_name" should be one of {}'.format(_TALAIRACH_LEVELS))
position = _TALAIRACH_LEVELS.index(level_name)
(atlas_file, labels_file) = _get_talairach_all_levels(data_dir, verbose)
atlas_img = check_niimg(atlas_file)
with open(labels_file) as fp:
labels = json.load(fp)[position][1]
level_data = ((get_data(atlas_img) >> (8 * position)) & 255)
atlas_img = new_img_like(atlas_img, data=level_data)
description = _get_dataset_descr('talairach_atlas').decode('utf-8').format(level_name)
return Bunch(maps=atlas_img, labels=labels, description=description)<|docstring|>Download the Talairach atlas.
For more information, see :footcite:`talairach_atlas`,
:footcite:`Lancaster2000Talairach`,
and :footcite:`Lancaster1997labeling`.
.. versionadded:: 0.4.0
Parameters
----------
level_name : string {'hemisphere', 'lobe', 'gyrus', 'tissue', 'ba'}
Which level of the atlas to use: the hemisphere, the lobe, the gyrus,
the tissue type or the Brodmann area.
%(data_dir)s
%(verbose)s
Returns
-------
sklearn.datasets.base.Bunch
Dictionary-like object, contains:
- maps: 3D Nifti image, values are indices in the list of labels.
- labels: list of strings. Starts with 'Background'.
- description: a short description of the atlas and some references.
References
----------
.. footbibliography::<|endoftext|>
|
f0284b36c89f2a78c9902b51cad627f9646377ab95f73cee129335bc9566739b
|
@fill_doc
def fetch_atlas_pauli_2017(version='prob', data_dir=None, verbose=1):
"Download the Pauli et al. (2017) atlas with in total\n 12 subcortical nodes\n\n See :footcite:`pauli_atlas` and :footcite:`Pauli2018probabilistic`.\n\n Parameters\n ----------\n version : str {'prob', 'det'}, optional\n Which version of the atlas should be download. This can be\n 'prob' for the probabilistic atlas or 'det' for the\n deterministic atlas. Default='prob'.\n %(data_dir)s\n %(verbose)s\n\n Returns\n -------\n sklearn.datasets.base.Bunch\n Dictionary-like object, contains:\n\n - maps: 3D Nifti image, values are indices in the list of labels.\n - labels: list of strings. Starts with 'Background'.\n - description: a short description of the atlas and some references.\n\n References\n ----------\n .. footbibliography::\n\n "
if (version == 'prob'):
url_maps = 'https://osf.io/w8zq2/download'
filename = 'pauli_2017_prob.nii.gz'
elif (version == 'det'):
url_maps = 'https://osf.io/5mqfx/download'
filename = 'pauli_2017_det.nii.gz'
else:
raise NotImplementedError(('{} is no valid version for '.format(version) + 'the Pauli atlas'))
url_labels = 'https://osf.io/6qrcb/download'
dataset_name = 'pauli_2017'
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose)
files = [(filename, url_maps, {'move': filename}), ('labels.txt', url_labels, {'move': 'labels.txt'})]
(atlas_file, labels) = _fetch_files(data_dir, files)
labels = np.loadtxt(labels, dtype=str)[(:, 1)].tolist()
fdescr = _get_dataset_descr(dataset_name)
return Bunch(maps=atlas_file, labels=labels, description=fdescr)
|
Download the Pauli et al. (2017) atlas with in total
12 subcortical nodes
See :footcite:`pauli_atlas` and :footcite:`Pauli2018probabilistic`.
Parameters
----------
version : str {'prob', 'det'}, optional
Which version of the atlas should be download. This can be
'prob' for the probabilistic atlas or 'det' for the
deterministic atlas. Default='prob'.
%(data_dir)s
%(verbose)s
Returns
-------
sklearn.datasets.base.Bunch
Dictionary-like object, contains:
- maps: 3D Nifti image, values are indices in the list of labels.
- labels: list of strings. Starts with 'Background'.
- description: a short description of the atlas and some references.
References
----------
.. footbibliography::
|
nilearn/datasets/atlas.py
|
fetch_atlas_pauli_2017
|
lemiceterieux/nilearn
| 827 |
python
|
@fill_doc
def fetch_atlas_pauli_2017(version='prob', data_dir=None, verbose=1):
"Download the Pauli et al. (2017) atlas with in total\n 12 subcortical nodes\n\n See :footcite:`pauli_atlas` and :footcite:`Pauli2018probabilistic`.\n\n Parameters\n ----------\n version : str {'prob', 'det'}, optional\n Which version of the atlas should be download. This can be\n 'prob' for the probabilistic atlas or 'det' for the\n deterministic atlas. Default='prob'.\n %(data_dir)s\n %(verbose)s\n\n Returns\n -------\n sklearn.datasets.base.Bunch\n Dictionary-like object, contains:\n\n - maps: 3D Nifti image, values are indices in the list of labels.\n - labels: list of strings. Starts with 'Background'.\n - description: a short description of the atlas and some references.\n\n References\n ----------\n .. footbibliography::\n\n "
if (version == 'prob'):
url_maps = 'https://osf.io/w8zq2/download'
filename = 'pauli_2017_prob.nii.gz'
elif (version == 'det'):
url_maps = 'https://osf.io/5mqfx/download'
filename = 'pauli_2017_det.nii.gz'
else:
raise NotImplementedError(('{} is no valid version for '.format(version) + 'the Pauli atlas'))
url_labels = 'https://osf.io/6qrcb/download'
dataset_name = 'pauli_2017'
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose)
files = [(filename, url_maps, {'move': filename}), ('labels.txt', url_labels, {'move': 'labels.txt'})]
(atlas_file, labels) = _fetch_files(data_dir, files)
labels = np.loadtxt(labels, dtype=str)[(:, 1)].tolist()
fdescr = _get_dataset_descr(dataset_name)
return Bunch(maps=atlas_file, labels=labels, description=fdescr)
|
@fill_doc
def fetch_atlas_pauli_2017(version='prob', data_dir=None, verbose=1):
"Download the Pauli et al. (2017) atlas with in total\n 12 subcortical nodes\n\n See :footcite:`pauli_atlas` and :footcite:`Pauli2018probabilistic`.\n\n Parameters\n ----------\n version : str {'prob', 'det'}, optional\n Which version of the atlas should be download. This can be\n 'prob' for the probabilistic atlas or 'det' for the\n deterministic atlas. Default='prob'.\n %(data_dir)s\n %(verbose)s\n\n Returns\n -------\n sklearn.datasets.base.Bunch\n Dictionary-like object, contains:\n\n - maps: 3D Nifti image, values are indices in the list of labels.\n - labels: list of strings. Starts with 'Background'.\n - description: a short description of the atlas and some references.\n\n References\n ----------\n .. footbibliography::\n\n "
if (version == 'prob'):
url_maps = 'https://osf.io/w8zq2/download'
filename = 'pauli_2017_prob.nii.gz'
elif (version == 'det'):
url_maps = 'https://osf.io/5mqfx/download'
filename = 'pauli_2017_det.nii.gz'
else:
raise NotImplementedError(('{} is no valid version for '.format(version) + 'the Pauli atlas'))
url_labels = 'https://osf.io/6qrcb/download'
dataset_name = 'pauli_2017'
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose)
files = [(filename, url_maps, {'move': filename}), ('labels.txt', url_labels, {'move': 'labels.txt'})]
(atlas_file, labels) = _fetch_files(data_dir, files)
labels = np.loadtxt(labels, dtype=str)[(:, 1)].tolist()
fdescr = _get_dataset_descr(dataset_name)
return Bunch(maps=atlas_file, labels=labels, description=fdescr)<|docstring|>Download the Pauli et al. (2017) atlas with in total
12 subcortical nodes
See :footcite:`pauli_atlas` and :footcite:`Pauli2018probabilistic`.
Parameters
----------
version : str {'prob', 'det'}, optional
Which version of the atlas should be download. This can be
'prob' for the probabilistic atlas or 'det' for the
deterministic atlas. Default='prob'.
%(data_dir)s
%(verbose)s
Returns
-------
sklearn.datasets.base.Bunch
Dictionary-like object, contains:
- maps: 3D Nifti image, values are indices in the list of labels.
- labels: list of strings. Starts with 'Background'.
- description: a short description of the atlas and some references.
References
----------
.. footbibliography::<|endoftext|>
|
2be651359f1f223cc8d2c916dd7bcac860d4be796dab772fa19aa07daf2d933a
|
@fill_doc
def fetch_atlas_schaefer_2018(n_rois=400, yeo_networks=7, resolution_mm=1, data_dir=None, base_url=None, resume=True, verbose=1):
'Download and return file names for the Schaefer 2018 parcellation\n\n .. versionadded:: 0.5.1\n\n The provided images are in MNI152 space.\n\n For more information on this dataset, see :footcite:`schaefer_atlas`,\n :footcite:`Schaefer2017parcellation`,\n and :footcite:`Yeo2011organization`.\n\n Parameters\n ----------\n n_rois : int, optional\n Number of regions of interest {100, 200, 300, 400, 500, 600,\n 700, 800, 900, 1000}.\n Default=400.\n\n yeo_networks : int, optional\n ROI annotation according to yeo networks {7, 17}.\n Default=7.\n\n resolution_mm : int, optional\n Spatial resolution of atlas image in mm {1, 2}.\n Default=1mm.\n %(data_dir)s\n base_url : string, optional\n base_url of files to download (None results in default base_url).\n %(resume)s\n %(verbose)s\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n Dictionary-like object, contains:\n\n - maps: 3D Nifti image, values are indices in the list of labels.\n - labels: ROI labels including Yeo-network annotation,list of strings.\n - description: A short description of the atlas and some references.\n\n References\n ----------\n .. footbibliography::\n\n\n Notes\n -----\n Release v0.14.3 of the Schaefer 2018 parcellation is used by\n default. Versions prior to v0.14.3 are known to contain erroneous region\n label names. For more details, see\n https://github.com/ThomasYeoLab/CBIG/blob/master/stable_projects/brain_parcellation/Schaefer2018_LocalGlobal/Parcellations/Updates/Update_20190916_README.md\n\n Licence: MIT.\n\n '
valid_n_rois = list(range(100, 1100, 100))
valid_yeo_networks = [7, 17]
valid_resolution_mm = [1, 2]
if (n_rois not in valid_n_rois):
raise ValueError('Requested n_rois={} not available. Valid options: {}'.format(n_rois, valid_n_rois))
if (yeo_networks not in valid_yeo_networks):
raise ValueError('Requested yeo_networks={} not available. Valid options: {}'.format(yeo_networks, valid_yeo_networks))
if (resolution_mm not in valid_resolution_mm):
raise ValueError('Requested resolution_mm={} not available. Valid options: {}'.format(resolution_mm, valid_resolution_mm))
if (base_url is None):
base_url = 'https://raw.githubusercontent.com/ThomasYeoLab/CBIG/v0.14.3-Update_Yeo2011_Schaefer2018_labelname/stable_projects/brain_parcellation/Schaefer2018_LocalGlobal/Parcellations/MNI/'
files = []
labels_file_template = 'Schaefer2018_{}Parcels_{}Networks_order.txt'
img_file_template = 'Schaefer2018_{}Parcels_{}Networks_order_FSLMNI152_{}mm.nii.gz'
for f in [labels_file_template.format(n_rois, yeo_networks), img_file_template.format(n_rois, yeo_networks, resolution_mm)]:
files.append((f, (base_url + f), {}))
dataset_name = 'schaefer_2018'
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose)
(labels_file, atlas_file) = _fetch_files(data_dir, files, resume=resume, verbose=verbose)
labels = np.genfromtxt(labels_file, usecols=1, dtype='S', delimiter='\t')
fdescr = _get_dataset_descr(dataset_name)
return Bunch(maps=atlas_file, labels=labels, description=fdescr)
|
Download and return file names for the Schaefer 2018 parcellation
.. versionadded:: 0.5.1
The provided images are in MNI152 space.
For more information on this dataset, see :footcite:`schaefer_atlas`,
:footcite:`Schaefer2017parcellation`,
and :footcite:`Yeo2011organization`.
Parameters
----------
n_rois : int, optional
Number of regions of interest {100, 200, 300, 400, 500, 600,
700, 800, 900, 1000}.
Default=400.
yeo_networks : int, optional
ROI annotation according to yeo networks {7, 17}.
Default=7.
resolution_mm : int, optional
Spatial resolution of atlas image in mm {1, 2}.
Default=1mm.
%(data_dir)s
base_url : string, optional
base_url of files to download (None results in default base_url).
%(resume)s
%(verbose)s
Returns
-------
data : sklearn.datasets.base.Bunch
Dictionary-like object, contains:
- maps: 3D Nifti image, values are indices in the list of labels.
- labels: ROI labels including Yeo-network annotation,list of strings.
- description: A short description of the atlas and some references.
References
----------
.. footbibliography::
Notes
-----
Release v0.14.3 of the Schaefer 2018 parcellation is used by
default. Versions prior to v0.14.3 are known to contain erroneous region
label names. For more details, see
https://github.com/ThomasYeoLab/CBIG/blob/master/stable_projects/brain_parcellation/Schaefer2018_LocalGlobal/Parcellations/Updates/Update_20190916_README.md
Licence: MIT.
|
nilearn/datasets/atlas.py
|
fetch_atlas_schaefer_2018
|
lemiceterieux/nilearn
| 827 |
python
|
@fill_doc
def fetch_atlas_schaefer_2018(n_rois=400, yeo_networks=7, resolution_mm=1, data_dir=None, base_url=None, resume=True, verbose=1):
'Download and return file names for the Schaefer 2018 parcellation\n\n .. versionadded:: 0.5.1\n\n The provided images are in MNI152 space.\n\n For more information on this dataset, see :footcite:`schaefer_atlas`,\n :footcite:`Schaefer2017parcellation`,\n and :footcite:`Yeo2011organization`.\n\n Parameters\n ----------\n n_rois : int, optional\n Number of regions of interest {100, 200, 300, 400, 500, 600,\n 700, 800, 900, 1000}.\n Default=400.\n\n yeo_networks : int, optional\n ROI annotation according to yeo networks {7, 17}.\n Default=7.\n\n resolution_mm : int, optional\n Spatial resolution of atlas image in mm {1, 2}.\n Default=1mm.\n %(data_dir)s\n base_url : string, optional\n base_url of files to download (None results in default base_url).\n %(resume)s\n %(verbose)s\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n Dictionary-like object, contains:\n\n - maps: 3D Nifti image, values are indices in the list of labels.\n - labels: ROI labels including Yeo-network annotation,list of strings.\n - description: A short description of the atlas and some references.\n\n References\n ----------\n .. footbibliography::\n\n\n Notes\n -----\n Release v0.14.3 of the Schaefer 2018 parcellation is used by\n default. Versions prior to v0.14.3 are known to contain erroneous region\n label names. For more details, see\n https://github.com/ThomasYeoLab/CBIG/blob/master/stable_projects/brain_parcellation/Schaefer2018_LocalGlobal/Parcellations/Updates/Update_20190916_README.md\n\n Licence: MIT.\n\n '
valid_n_rois = list(range(100, 1100, 100))
valid_yeo_networks = [7, 17]
valid_resolution_mm = [1, 2]
if (n_rois not in valid_n_rois):
raise ValueError('Requested n_rois={} not available. Valid options: {}'.format(n_rois, valid_n_rois))
if (yeo_networks not in valid_yeo_networks):
raise ValueError('Requested yeo_networks={} not available. Valid options: {}'.format(yeo_networks, valid_yeo_networks))
if (resolution_mm not in valid_resolution_mm):
raise ValueError('Requested resolution_mm={} not available. Valid options: {}'.format(resolution_mm, valid_resolution_mm))
if (base_url is None):
base_url = 'https://raw.githubusercontent.com/ThomasYeoLab/CBIG/v0.14.3-Update_Yeo2011_Schaefer2018_labelname/stable_projects/brain_parcellation/Schaefer2018_LocalGlobal/Parcellations/MNI/'
files = []
labels_file_template = 'Schaefer2018_{}Parcels_{}Networks_order.txt'
img_file_template = 'Schaefer2018_{}Parcels_{}Networks_order_FSLMNI152_{}mm.nii.gz'
for f in [labels_file_template.format(n_rois, yeo_networks), img_file_template.format(n_rois, yeo_networks, resolution_mm)]:
files.append((f, (base_url + f), {}))
dataset_name = 'schaefer_2018'
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose)
(labels_file, atlas_file) = _fetch_files(data_dir, files, resume=resume, verbose=verbose)
labels = np.genfromtxt(labels_file, usecols=1, dtype='S', delimiter='\t')
fdescr = _get_dataset_descr(dataset_name)
return Bunch(maps=atlas_file, labels=labels, description=fdescr)
|
@fill_doc
def fetch_atlas_schaefer_2018(n_rois=400, yeo_networks=7, resolution_mm=1, data_dir=None, base_url=None, resume=True, verbose=1):
'Download and return file names for the Schaefer 2018 parcellation\n\n .. versionadded:: 0.5.1\n\n The provided images are in MNI152 space.\n\n For more information on this dataset, see :footcite:`schaefer_atlas`,\n :footcite:`Schaefer2017parcellation`,\n and :footcite:`Yeo2011organization`.\n\n Parameters\n ----------\n n_rois : int, optional\n Number of regions of interest {100, 200, 300, 400, 500, 600,\n 700, 800, 900, 1000}.\n Default=400.\n\n yeo_networks : int, optional\n ROI annotation according to yeo networks {7, 17}.\n Default=7.\n\n resolution_mm : int, optional\n Spatial resolution of atlas image in mm {1, 2}.\n Default=1mm.\n %(data_dir)s\n base_url : string, optional\n base_url of files to download (None results in default base_url).\n %(resume)s\n %(verbose)s\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n Dictionary-like object, contains:\n\n - maps: 3D Nifti image, values are indices in the list of labels.\n - labels: ROI labels including Yeo-network annotation,list of strings.\n - description: A short description of the atlas and some references.\n\n References\n ----------\n .. footbibliography::\n\n\n Notes\n -----\n Release v0.14.3 of the Schaefer 2018 parcellation is used by\n default. Versions prior to v0.14.3 are known to contain erroneous region\n label names. For more details, see\n https://github.com/ThomasYeoLab/CBIG/blob/master/stable_projects/brain_parcellation/Schaefer2018_LocalGlobal/Parcellations/Updates/Update_20190916_README.md\n\n Licence: MIT.\n\n '
valid_n_rois = list(range(100, 1100, 100))
valid_yeo_networks = [7, 17]
valid_resolution_mm = [1, 2]
if (n_rois not in valid_n_rois):
raise ValueError('Requested n_rois={} not available. Valid options: {}'.format(n_rois, valid_n_rois))
if (yeo_networks not in valid_yeo_networks):
raise ValueError('Requested yeo_networks={} not available. Valid options: {}'.format(yeo_networks, valid_yeo_networks))
if (resolution_mm not in valid_resolution_mm):
raise ValueError('Requested resolution_mm={} not available. Valid options: {}'.format(resolution_mm, valid_resolution_mm))
if (base_url is None):
base_url = 'https://raw.githubusercontent.com/ThomasYeoLab/CBIG/v0.14.3-Update_Yeo2011_Schaefer2018_labelname/stable_projects/brain_parcellation/Schaefer2018_LocalGlobal/Parcellations/MNI/'
files = []
labels_file_template = 'Schaefer2018_{}Parcels_{}Networks_order.txt'
img_file_template = 'Schaefer2018_{}Parcels_{}Networks_order_FSLMNI152_{}mm.nii.gz'
for f in [labels_file_template.format(n_rois, yeo_networks), img_file_template.format(n_rois, yeo_networks, resolution_mm)]:
files.append((f, (base_url + f), {}))
dataset_name = 'schaefer_2018'
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose)
(labels_file, atlas_file) = _fetch_files(data_dir, files, resume=resume, verbose=verbose)
labels = np.genfromtxt(labels_file, usecols=1, dtype='S', delimiter='\t')
fdescr = _get_dataset_descr(dataset_name)
return Bunch(maps=atlas_file, labels=labels, description=fdescr)<|docstring|>Download and return file names for the Schaefer 2018 parcellation
.. versionadded:: 0.5.1
The provided images are in MNI152 space.
For more information on this dataset, see :footcite:`schaefer_atlas`,
:footcite:`Schaefer2017parcellation`,
and :footcite:`Yeo2011organization`.
Parameters
----------
n_rois : int, optional
Number of regions of interest {100, 200, 300, 400, 500, 600,
700, 800, 900, 1000}.
Default=400.
yeo_networks : int, optional
ROI annotation according to yeo networks {7, 17}.
Default=7.
resolution_mm : int, optional
Spatial resolution of atlas image in mm {1, 2}.
Default=1mm.
%(data_dir)s
base_url : string, optional
base_url of files to download (None results in default base_url).
%(resume)s
%(verbose)s
Returns
-------
data : sklearn.datasets.base.Bunch
Dictionary-like object, contains:
- maps: 3D Nifti image, values are indices in the list of labels.
- labels: ROI labels including Yeo-network annotation,list of strings.
- description: A short description of the atlas and some references.
References
----------
.. footbibliography::
Notes
-----
Release v0.14.3 of the Schaefer 2018 parcellation is used by
default. Versions prior to v0.14.3 are known to contain erroneous region
label names. For more details, see
https://github.com/ThomasYeoLab/CBIG/blob/master/stable_projects/brain_parcellation/Schaefer2018_LocalGlobal/Parcellations/Updates/Update_20190916_README.md
Licence: MIT.<|endoftext|>
|
c1ed429e46a53b0ee0c4e53b1cbfbb8da3596518bc0f664c8a25f81269c428cc
|
def constructMaximumBinaryTree(self, nums):
'\n :type nums: List[int]\n :rtype: TreeNode\n '
self.SparseTable = [[(v, i) for (i, v) in enumerate(nums)]]
l = len(nums)
t = 1
while ((t * 2) < l):
prevTable = self.SparseTable[(- 1)]
self.SparseTable.append([max(prevTable[i], prevTable[(i + t)]) for i in xrange(((l - (t * 2)) + 1))])
t *= 2
return self.do_constructMaximumBinaryTree(0, l)
|
:type nums: List[int]
:rtype: TreeNode
|
py/maximum-binary-tree.py
|
constructMaximumBinaryTree
|
ckclark/leetcode
| 0 |
python
|
def constructMaximumBinaryTree(self, nums):
'\n :type nums: List[int]\n :rtype: TreeNode\n '
self.SparseTable = [[(v, i) for (i, v) in enumerate(nums)]]
l = len(nums)
t = 1
while ((t * 2) < l):
prevTable = self.SparseTable[(- 1)]
self.SparseTable.append([max(prevTable[i], prevTable[(i + t)]) for i in xrange(((l - (t * 2)) + 1))])
t *= 2
return self.do_constructMaximumBinaryTree(0, l)
|
def constructMaximumBinaryTree(self, nums):
'\n :type nums: List[int]\n :rtype: TreeNode\n '
self.SparseTable = [[(v, i) for (i, v) in enumerate(nums)]]
l = len(nums)
t = 1
while ((t * 2) < l):
prevTable = self.SparseTable[(- 1)]
self.SparseTable.append([max(prevTable[i], prevTable[(i + t)]) for i in xrange(((l - (t * 2)) + 1))])
t *= 2
return self.do_constructMaximumBinaryTree(0, l)<|docstring|>:type nums: List[int]
:rtype: TreeNode<|endoftext|>
|
987f589f3ffca4b45b556dcd51e590b4e267ec6cc23eba75ccb391c3a7ce373a
|
def repr(self, **fields) -> str:
'\n Helper for __repr__\n '
field_strings = []
at_least_one_attached_attribute = False
for (key, field) in fields.items():
field_strings.append(f'{key}={field!r}')
at_least_one_attached_attribute = True
if at_least_one_attached_attribute:
return f"<{self.__class__.__name__}({','.join(field_strings)})>"
return f'<{self.__class__.__name__} {id(self)}>'
|
Helper for __repr__
|
investing_algorithm_framework/core/context/algorithm_context_configuration.py
|
repr
|
investing-algorithms/investing-algorithm-framework
| 1 |
python
|
def repr(self, **fields) -> str:
'\n \n '
field_strings = []
at_least_one_attached_attribute = False
for (key, field) in fields.items():
field_strings.append(f'{key}={field!r}')
at_least_one_attached_attribute = True
if at_least_one_attached_attribute:
return f"<{self.__class__.__name__}({','.join(field_strings)})>"
return f'<{self.__class__.__name__} {id(self)}>'
|
def repr(self, **fields) -> str:
'\n \n '
field_strings = []
at_least_one_attached_attribute = False
for (key, field) in fields.items():
field_strings.append(f'{key}={field!r}')
at_least_one_attached_attribute = True
if at_least_one_attached_attribute:
return f"<{self.__class__.__name__}({','.join(field_strings)})>"
return f'<{self.__class__.__name__} {id(self)}>'<|docstring|>Helper for __repr__<|endoftext|>
|
a2fa5f16927ea85d45c1b5326556580ce4b589e3aa75dd6e2fb322a1872c9ce5
|
def simple_test(self, imgs, img_metas, imgs_2, img_metas_2, proposals=None, rescale=False, test_cfg2=None):
'Test without augmentation.'
assert self.with_bbox, 'Bbox head must be implemented.'
if (test_cfg2 is not None):
test_cfg = test_cfg2
else:
test_cfg = self.test_cfg
img_metas = img_metas[0]
img_metas_2 = img_metas_2[0]
x = self.extract_feat(imgs)
x_2 = self.extract_feat(imgs_2)
proposal_list = (self.simple_test_rpn(x, img_metas, test_cfg.rpn) if (proposals is None) else proposals)
proposal_list_2 = (self.simple_test_rpn_2(x_2, img_metas_2, test_cfg.rpn) if (proposals is None) else proposals)
(bboxes, scores) = self.simple_test_bboxes(x, img_metas, proposal_list, None, rescale=rescale)
(bboxes_2, scores_2) = self.simple_test_bboxes(x_2, img_metas_2, proposal_list_2, None, rescale=rescale)
if self.refinement_head:
bboxes_2_refinement = bboxes_2[(:, 6:)]
bboxes_2_refinement = [torch.cat((bboxes_2_refinement, scores_2[(:, 1, None)]), dim=1)]
bboxes_2_refinement = self.simple_test_bbox_refinement(x, img_metas, bboxes_2_refinement, None, rescale=rescale)
bboxes_combined = torch.cat((bboxes, bboxes_2_refinement), 0)
scores_combined = torch.cat((scores, scores_2), 0)
else:
bboxes_combined = torch.cat((bboxes, bboxes_2), 0)
scores_combined = torch.cat((scores, scores_2), 0)
(det_bboxes, det_labels) = multiclass_nms_3d(bboxes_combined, scores_combined, test_cfg.rcnn.score_thr, test_cfg.rcnn.nms, test_cfg.rcnn.max_per_img)
bbox_results = bbox2result3D(det_bboxes, det_labels, self.bbox_head.num_classes)
if test_cfg.return_bbox_only:
return bbox_results
elif self.refinement_mask_head:
det_bboxes_np = det_bboxes.cpu().numpy()
det_labels_np = det_labels.cpu().numpy()
bboxes_np = bboxes_combined.cpu().numpy()
cutoff_between_res1_res2 = len(bboxes)
nonscaled_bboxes = []
nonscaled_labels = []
upscaled_bboxes = []
upscaled_labels = []
for (det_bbox, det_label) in zip(det_bboxes_np, det_labels_np):
for (index, bbox) in enumerate(bboxes_np):
if np.all((det_bbox[:6] == bbox[6:])):
if (index >= cutoff_between_res1_res2):
upscaled_bboxes.append(det_bbox)
upscaled_labels.append(det_label)
else:
nonscaled_bboxes.append(det_bbox)
nonscaled_labels.append(det_label)
nonscaled_bboxes_gpu = torch.from_numpy(np.array(nonscaled_bboxes)).cuda()
nonscaled_labels_gpu = torch.from_numpy(np.array(nonscaled_labels)).cuda()
upscaled_bboxes_gpu = torch.from_numpy(np.array(upscaled_bboxes)).cuda()
upscaled_labels_gpu = torch.from_numpy(np.array(upscaled_labels)).cuda()
segm_results_nonscaled = self.simple_test_mask(x, img_metas, nonscaled_bboxes_gpu, nonscaled_labels_gpu, rescale=rescale)
segm_results_refinement = self.simple_test_mask_refinement_v2(x, img_metas, upscaled_bboxes_gpu, upscaled_labels_gpu, rescale=rescale)
if (len(nonscaled_bboxes_gpu) == 0):
det_bboxes = upscaled_bboxes_gpu
det_labels = upscaled_labels_gpu
elif (len(upscaled_bboxes_gpu) == 0):
det_bboxes = nonscaled_bboxes_gpu
det_labels = nonscaled_labels_gpu
else:
det_bboxes = torch.cat((nonscaled_bboxes_gpu, upscaled_bboxes_gpu), 0)
det_labels = torch.cat((nonscaled_labels_gpu, upscaled_labels_gpu), 0)
bbox_results = bbox2result3D(det_bboxes, det_labels, self.bbox_head.num_classes)
for segm_results in segm_results_refinement[0]:
segm_results_nonscaled[0].append(segm_results)
return (bbox_results, segm_results_nonscaled)
else:
segm_results = self.simple_test_mask(x, img_metas, det_bboxes, det_labels, rescale=rescale)
return (bbox_results, segm_results)
|
Test without augmentation.
|
mmdet/models/detectors/two_stage_3d_2scales.py
|
simple_test
|
arthur801031/3d-multi-resolution-rcnn
| 16 |
python
|
def simple_test(self, imgs, img_metas, imgs_2, img_metas_2, proposals=None, rescale=False, test_cfg2=None):
assert self.with_bbox, 'Bbox head must be implemented.'
if (test_cfg2 is not None):
test_cfg = test_cfg2
else:
test_cfg = self.test_cfg
img_metas = img_metas[0]
img_metas_2 = img_metas_2[0]
x = self.extract_feat(imgs)
x_2 = self.extract_feat(imgs_2)
proposal_list = (self.simple_test_rpn(x, img_metas, test_cfg.rpn) if (proposals is None) else proposals)
proposal_list_2 = (self.simple_test_rpn_2(x_2, img_metas_2, test_cfg.rpn) if (proposals is None) else proposals)
(bboxes, scores) = self.simple_test_bboxes(x, img_metas, proposal_list, None, rescale=rescale)
(bboxes_2, scores_2) = self.simple_test_bboxes(x_2, img_metas_2, proposal_list_2, None, rescale=rescale)
if self.refinement_head:
bboxes_2_refinement = bboxes_2[(:, 6:)]
bboxes_2_refinement = [torch.cat((bboxes_2_refinement, scores_2[(:, 1, None)]), dim=1)]
bboxes_2_refinement = self.simple_test_bbox_refinement(x, img_metas, bboxes_2_refinement, None, rescale=rescale)
bboxes_combined = torch.cat((bboxes, bboxes_2_refinement), 0)
scores_combined = torch.cat((scores, scores_2), 0)
else:
bboxes_combined = torch.cat((bboxes, bboxes_2), 0)
scores_combined = torch.cat((scores, scores_2), 0)
(det_bboxes, det_labels) = multiclass_nms_3d(bboxes_combined, scores_combined, test_cfg.rcnn.score_thr, test_cfg.rcnn.nms, test_cfg.rcnn.max_per_img)
bbox_results = bbox2result3D(det_bboxes, det_labels, self.bbox_head.num_classes)
if test_cfg.return_bbox_only:
return bbox_results
elif self.refinement_mask_head:
det_bboxes_np = det_bboxes.cpu().numpy()
det_labels_np = det_labels.cpu().numpy()
bboxes_np = bboxes_combined.cpu().numpy()
cutoff_between_res1_res2 = len(bboxes)
nonscaled_bboxes = []
nonscaled_labels = []
upscaled_bboxes = []
upscaled_labels = []
for (det_bbox, det_label) in zip(det_bboxes_np, det_labels_np):
for (index, bbox) in enumerate(bboxes_np):
if np.all((det_bbox[:6] == bbox[6:])):
if (index >= cutoff_between_res1_res2):
upscaled_bboxes.append(det_bbox)
upscaled_labels.append(det_label)
else:
nonscaled_bboxes.append(det_bbox)
nonscaled_labels.append(det_label)
nonscaled_bboxes_gpu = torch.from_numpy(np.array(nonscaled_bboxes)).cuda()
nonscaled_labels_gpu = torch.from_numpy(np.array(nonscaled_labels)).cuda()
upscaled_bboxes_gpu = torch.from_numpy(np.array(upscaled_bboxes)).cuda()
upscaled_labels_gpu = torch.from_numpy(np.array(upscaled_labels)).cuda()
segm_results_nonscaled = self.simple_test_mask(x, img_metas, nonscaled_bboxes_gpu, nonscaled_labels_gpu, rescale=rescale)
segm_results_refinement = self.simple_test_mask_refinement_v2(x, img_metas, upscaled_bboxes_gpu, upscaled_labels_gpu, rescale=rescale)
if (len(nonscaled_bboxes_gpu) == 0):
det_bboxes = upscaled_bboxes_gpu
det_labels = upscaled_labels_gpu
elif (len(upscaled_bboxes_gpu) == 0):
det_bboxes = nonscaled_bboxes_gpu
det_labels = nonscaled_labels_gpu
else:
det_bboxes = torch.cat((nonscaled_bboxes_gpu, upscaled_bboxes_gpu), 0)
det_labels = torch.cat((nonscaled_labels_gpu, upscaled_labels_gpu), 0)
bbox_results = bbox2result3D(det_bboxes, det_labels, self.bbox_head.num_classes)
for segm_results in segm_results_refinement[0]:
segm_results_nonscaled[0].append(segm_results)
return (bbox_results, segm_results_nonscaled)
else:
segm_results = self.simple_test_mask(x, img_metas, det_bboxes, det_labels, rescale=rescale)
return (bbox_results, segm_results)
|
def simple_test(self, imgs, img_metas, imgs_2, img_metas_2, proposals=None, rescale=False, test_cfg2=None):
assert self.with_bbox, 'Bbox head must be implemented.'
if (test_cfg2 is not None):
test_cfg = test_cfg2
else:
test_cfg = self.test_cfg
img_metas = img_metas[0]
img_metas_2 = img_metas_2[0]
x = self.extract_feat(imgs)
x_2 = self.extract_feat(imgs_2)
proposal_list = (self.simple_test_rpn(x, img_metas, test_cfg.rpn) if (proposals is None) else proposals)
proposal_list_2 = (self.simple_test_rpn_2(x_2, img_metas_2, test_cfg.rpn) if (proposals is None) else proposals)
(bboxes, scores) = self.simple_test_bboxes(x, img_metas, proposal_list, None, rescale=rescale)
(bboxes_2, scores_2) = self.simple_test_bboxes(x_2, img_metas_2, proposal_list_2, None, rescale=rescale)
if self.refinement_head:
bboxes_2_refinement = bboxes_2[(:, 6:)]
bboxes_2_refinement = [torch.cat((bboxes_2_refinement, scores_2[(:, 1, None)]), dim=1)]
bboxes_2_refinement = self.simple_test_bbox_refinement(x, img_metas, bboxes_2_refinement, None, rescale=rescale)
bboxes_combined = torch.cat((bboxes, bboxes_2_refinement), 0)
scores_combined = torch.cat((scores, scores_2), 0)
else:
bboxes_combined = torch.cat((bboxes, bboxes_2), 0)
scores_combined = torch.cat((scores, scores_2), 0)
(det_bboxes, det_labels) = multiclass_nms_3d(bboxes_combined, scores_combined, test_cfg.rcnn.score_thr, test_cfg.rcnn.nms, test_cfg.rcnn.max_per_img)
bbox_results = bbox2result3D(det_bboxes, det_labels, self.bbox_head.num_classes)
if test_cfg.return_bbox_only:
return bbox_results
elif self.refinement_mask_head:
det_bboxes_np = det_bboxes.cpu().numpy()
det_labels_np = det_labels.cpu().numpy()
bboxes_np = bboxes_combined.cpu().numpy()
cutoff_between_res1_res2 = len(bboxes)
nonscaled_bboxes = []
nonscaled_labels = []
upscaled_bboxes = []
upscaled_labels = []
for (det_bbox, det_label) in zip(det_bboxes_np, det_labels_np):
for (index, bbox) in enumerate(bboxes_np):
if np.all((det_bbox[:6] == bbox[6:])):
if (index >= cutoff_between_res1_res2):
upscaled_bboxes.append(det_bbox)
upscaled_labels.append(det_label)
else:
nonscaled_bboxes.append(det_bbox)
nonscaled_labels.append(det_label)
nonscaled_bboxes_gpu = torch.from_numpy(np.array(nonscaled_bboxes)).cuda()
nonscaled_labels_gpu = torch.from_numpy(np.array(nonscaled_labels)).cuda()
upscaled_bboxes_gpu = torch.from_numpy(np.array(upscaled_bboxes)).cuda()
upscaled_labels_gpu = torch.from_numpy(np.array(upscaled_labels)).cuda()
segm_results_nonscaled = self.simple_test_mask(x, img_metas, nonscaled_bboxes_gpu, nonscaled_labels_gpu, rescale=rescale)
segm_results_refinement = self.simple_test_mask_refinement_v2(x, img_metas, upscaled_bboxes_gpu, upscaled_labels_gpu, rescale=rescale)
if (len(nonscaled_bboxes_gpu) == 0):
det_bboxes = upscaled_bboxes_gpu
det_labels = upscaled_labels_gpu
elif (len(upscaled_bboxes_gpu) == 0):
det_bboxes = nonscaled_bboxes_gpu
det_labels = nonscaled_labels_gpu
else:
det_bboxes = torch.cat((nonscaled_bboxes_gpu, upscaled_bboxes_gpu), 0)
det_labels = torch.cat((nonscaled_labels_gpu, upscaled_labels_gpu), 0)
bbox_results = bbox2result3D(det_bboxes, det_labels, self.bbox_head.num_classes)
for segm_results in segm_results_refinement[0]:
segm_results_nonscaled[0].append(segm_results)
return (bbox_results, segm_results_nonscaled)
else:
segm_results = self.simple_test_mask(x, img_metas, det_bboxes, det_labels, rescale=rescale)
return (bbox_results, segm_results)<|docstring|>Test without augmentation.<|endoftext|>
|
bd931965fa1741213b5d57bca320564a05b74b58665ddcaaa5d9d0c681967bad
|
def doInitialization(self):
' actually do the initialization and look up the models '
if (not self.initialized):
parentModel = getModelByName(self.parentModelName)
super(ModelCollectionManager, self).__init__(parentModel)
for childName in self.childModelNames:
childModel = getModelByName(childName)
self.registerChildClass(childModel)
self.initialized = True
|
actually do the initialization and look up the models
|
geocamUtil/models/managers.py
|
doInitialization
|
geocam/geocamUtilWeb
| 4 |
python
|
def doInitialization(self):
' '
if (not self.initialized):
parentModel = getModelByName(self.parentModelName)
super(ModelCollectionManager, self).__init__(parentModel)
for childName in self.childModelNames:
childModel = getModelByName(childName)
self.registerChildClass(childModel)
self.initialized = True
|
def doInitialization(self):
' '
if (not self.initialized):
parentModel = getModelByName(self.parentModelName)
super(ModelCollectionManager, self).__init__(parentModel)
for childName in self.childModelNames:
childModel = getModelByName(childName)
self.registerChildClass(childModel)
self.initialized = True<|docstring|>actually do the initialization and look up the models<|endoftext|>
|
4c5bac090d976791b29dc1c1f35f2ce17326bac77eba6522d321a81cdcab27a1
|
def __init__(self, model_name_variable=None, query=None, using=None, hints=None, model=None):
"\n Initialize with either the model or the model name variable.\n :param model_name_variable: the name of the variable holding the model name, ie 'settings.bla'\n :param query:\n :param using:\n :param hints:\n :param model: This is just used when cloning or other internal uses, or if you already have the model.\n "
if ((not model_name_variable) and (not model)):
raise Exception('You must define the model name variable or the model')
if model:
self.initialized = True
super(LazyQuerySet, self).__init__(model, query, using, hints)
return
self.name = model_name_variable.split('.')[(- 1)]
self.model_name_variable = model_name_variable
self.model = model
self.initial_query = query
self.initial_using = using
self.initial_hints = hints
self.initialized = False
|
Initialize with either the model or the model name variable.
:param model_name_variable: the name of the variable holding the model name, ie 'settings.bla'
:param query:
:param using:
:param hints:
:param model: This is just used when cloning or other internal uses, or if you already have the model.
|
geocamUtil/models/managers.py
|
__init__
|
geocam/geocamUtilWeb
| 4 |
python
|
def __init__(self, model_name_variable=None, query=None, using=None, hints=None, model=None):
"\n Initialize with either the model or the model name variable.\n :param model_name_variable: the name of the variable holding the model name, ie 'settings.bla'\n :param query:\n :param using:\n :param hints:\n :param model: This is just used when cloning or other internal uses, or if you already have the model.\n "
if ((not model_name_variable) and (not model)):
raise Exception('You must define the model name variable or the model')
if model:
self.initialized = True
super(LazyQuerySet, self).__init__(model, query, using, hints)
return
self.name = model_name_variable.split('.')[(- 1)]
self.model_name_variable = model_name_variable
self.model = model
self.initial_query = query
self.initial_using = using
self.initial_hints = hints
self.initialized = False
|
def __init__(self, model_name_variable=None, query=None, using=None, hints=None, model=None):
"\n Initialize with either the model or the model name variable.\n :param model_name_variable: the name of the variable holding the model name, ie 'settings.bla'\n :param query:\n :param using:\n :param hints:\n :param model: This is just used when cloning or other internal uses, or if you already have the model.\n "
if ((not model_name_variable) and (not model)):
raise Exception('You must define the model name variable or the model')
if model:
self.initialized = True
super(LazyQuerySet, self).__init__(model, query, using, hints)
return
self.name = model_name_variable.split('.')[(- 1)]
self.model_name_variable = model_name_variable
self.model = model
self.initial_query = query
self.initial_using = using
self.initial_hints = hints
self.initialized = False<|docstring|>Initialize with either the model or the model name variable.
:param model_name_variable: the name of the variable holding the model name, ie 'settings.bla'
:param query:
:param using:
:param hints:
:param model: This is just used when cloning or other internal uses, or if you already have the model.<|endoftext|>
|
18ddc6a71377b509fad258d828be6a58924fc51f20c10c262239d267f75f6227
|
def do_initialization(self):
' actually do the initialization and look up the models '
if (not self.initialized):
model_name = eval(self.model_name_variable)
self.model = getModelByName(model_name)
super(LazyQuerySet, self).__init__(self.model, self.initial_query, self.initial_using, self.initial_hints)
self.initialized = True
|
actually do the initialization and look up the models
|
geocamUtil/models/managers.py
|
do_initialization
|
geocam/geocamUtilWeb
| 4 |
python
|
def do_initialization(self):
' '
if (not self.initialized):
model_name = eval(self.model_name_variable)
self.model = getModelByName(model_name)
super(LazyQuerySet, self).__init__(self.model, self.initial_query, self.initial_using, self.initial_hints)
self.initialized = True
|
def do_initialization(self):
' '
if (not self.initialized):
model_name = eval(self.model_name_variable)
self.model = getModelByName(model_name)
super(LazyQuerySet, self).__init__(self.model, self.initial_query, self.initial_using, self.initial_hints)
self.initialized = True<|docstring|>actually do the initialization and look up the models<|endoftext|>
|
05ceebc322b03bd34b689699e07d963d73a95a8fec43b4bfc710998a49370e11
|
def do_initialization(self):
' actually do the initialization and look up the models '
if (not self.initialized):
model_name = eval(self.model_name_variable)
self.model = getModelByName(model_name)
super(LazyModelManager, self).__init__()
self.initialized = True
|
actually do the initialization and look up the models
|
geocamUtil/models/managers.py
|
do_initialization
|
geocam/geocamUtilWeb
| 4 |
python
|
def do_initialization(self):
' '
if (not self.initialized):
model_name = eval(self.model_name_variable)
self.model = getModelByName(model_name)
super(LazyModelManager, self).__init__()
self.initialized = True
|
def do_initialization(self):
' '
if (not self.initialized):
model_name = eval(self.model_name_variable)
self.model = getModelByName(model_name)
super(LazyModelManager, self).__init__()
self.initialized = True<|docstring|>actually do the initialization and look up the models<|endoftext|>
|
d878b8ba448f3f2c1b85ff2c3cade83248de8ef99c93cd6f0d515ab3d5f66108
|
@staticmethod
def is_scalar(value):
"Return True iff 'value' should be represented by a leaf node."
return (not isinstance(value, (dict, list, tuple, set)))
|
Return True iff 'value' should be represented by a leaf node.
|
browson/node.py
|
is_scalar
|
jherland/browson
| 0 |
python
|
@staticmethod
def is_scalar(value):
return (not isinstance(value, (dict, list, tuple, set)))
|
@staticmethod
def is_scalar(value):
return (not isinstance(value, (dict, list, tuple, set)))<|docstring|>Return True iff 'value' should be represented by a leaf node.<|endoftext|>
|
62b22ff8cdeb336aa2cbab24a17e596c819ca1ad3fda33b0a31d8133ad4767d7
|
@property
def is_leaf(self):
'Return True iff this is a leaf node (i.e. cannot have any children).\n\n This is different from an empty container, i.e. an "internal" node\n whose list of children is empty.'
return (self._children is None)
|
Return True iff this is a leaf node (i.e. cannot have any children).
This is different from an empty container, i.e. an "internal" node
whose list of children is empty.
|
browson/node.py
|
is_leaf
|
jherland/browson
| 0 |
python
|
@property
def is_leaf(self):
'Return True iff this is a leaf node (i.e. cannot have any children).\n\n This is different from an empty container, i.e. an "internal" node\n whose list of children is empty.'
return (self._children is None)
|
@property
def is_leaf(self):
'Return True iff this is a leaf node (i.e. cannot have any children).\n\n This is different from an empty container, i.e. an "internal" node\n whose list of children is empty.'
return (self._children is None)<|docstring|>Return True iff this is a leaf node (i.e. cannot have any children).
This is different from an empty container, i.e. an "internal" node
whose list of children is empty.<|endoftext|>
|
52fbb0e9746a7edf64dad98af2546bead4348dce45a66dc15085d82e298870d0
|
@property
def children(self):
"Return this node's children.\n\n Return an empty list for leaf nodes, as a convenience for callers that\n typically iterated over this methods return value."
return ([] if (self._children is None) else self._children)
|
Return this node's children.
Return an empty list for leaf nodes, as a convenience for callers that
typically iterated over this methods return value.
|
browson/node.py
|
children
|
jherland/browson
| 0 |
python
|
@property
def children(self):
"Return this node's children.\n\n Return an empty list for leaf nodes, as a convenience for callers that\n typically iterated over this methods return value."
return ([] if (self._children is None) else self._children)
|
@property
def children(self):
"Return this node's children.\n\n Return an empty list for leaf nodes, as a convenience for callers that\n typically iterated over this methods return value."
return ([] if (self._children is None) else self._children)<|docstring|>Return this node's children.
Return an empty list for leaf nodes, as a convenience for callers that
typically iterated over this methods return value.<|endoftext|>
|
cf2a0d2c5a5dc197203f83a7b9678d2dc45767fd5adfd73bdf4ba31a6682f790
|
def ancestors(self, include_self=False):
'Yield transitive parents of this node.'
if include_self:
(yield self)
if (self.parent is not None):
(yield from self.parent.ancestors(include_self=True))
|
Yield transitive parents of this node.
|
browson/node.py
|
ancestors
|
jherland/browson
| 0 |
python
|
def ancestors(self, include_self=False):
if include_self:
(yield self)
if (self.parent is not None):
(yield from self.parent.ancestors(include_self=True))
|
def ancestors(self, include_self=False):
if include_self:
(yield self)
if (self.parent is not None):
(yield from self.parent.ancestors(include_self=True))<|docstring|>Yield transitive parents of this node.<|endoftext|>
|
dafcfb61590b8d4dabc9d61c14ee4bc9cf52ed76d0364ad87721ed13547bdb23
|
def dfwalk(self, preorder=yield_node, postorder=None):
'Depth-first walk, yields values yielded from visitor function.'
if (preorder is not None):
(yield from preorder(self))
for child in self.children:
(yield from child.dfwalk(preorder, postorder))
if (postorder is not None):
(yield from postorder(self))
|
Depth-first walk, yields values yielded from visitor function.
|
browson/node.py
|
dfwalk
|
jherland/browson
| 0 |
python
|
def dfwalk(self, preorder=yield_node, postorder=None):
if (preorder is not None):
(yield from preorder(self))
for child in self.children:
(yield from child.dfwalk(preorder, postorder))
if (postorder is not None):
(yield from postorder(self))
|
def dfwalk(self, preorder=yield_node, postorder=None):
if (preorder is not None):
(yield from preorder(self))
for child in self.children:
(yield from child.dfwalk(preorder, postorder))
if (postorder is not None):
(yield from postorder(self))<|docstring|>Depth-first walk, yields values yielded from visitor function.<|endoftext|>
|
b0ef35c3b3efadd90a98d5d781623fea8755adcb0cd7c8dc9eb040cad21f8ee7
|
def hit(self):
'\n Decrease 1 HP and change the color of image and return the remaining HP\n\n @return The remaining HP\n '
self.hp -= 1
self.image = self._create_surface((244, 158, 66))
return self.hp
|
Decrease 1 HP and change the color of image and return the remaining HP
@return The remaining HP
|
MLGame/games/arkanoid/game/gameobject.py
|
hit
|
Liuian/1092_INTRODUCTION-TO-MACHINE-LEARNING-AND-ITS-APPLICATION-TO-GAMING
| 0 |
python
|
def hit(self):
'\n Decrease 1 HP and change the color of image and return the remaining HP\n\n @return The remaining HP\n '
self.hp -= 1
self.image = self._create_surface((244, 158, 66))
return self.hp
|
def hit(self):
'\n Decrease 1 HP and change the color of image and return the remaining HP\n\n @return The remaining HP\n '
self.hp -= 1
self.image = self._create_surface((244, 158, 66))
return self.hp<|docstring|>Decrease 1 HP and change the color of image and return the remaining HP
@return The remaining HP<|endoftext|>
|
80b1fa89cc995477d32f5429ab1ee04e078e451a3c6048abab55b9e4d98dd606
|
def _platform_additional_check(self, platform: Platform):
'\n The additional checking for the condition that the ball passes the corner of the platform\n '
if (self.rect.bottom > platform.rect.top):
routine_a = (Vector2(self._last_pos.bottomleft), Vector2(self.rect.bottomleft))
routine_b = (Vector2(self._last_pos.bottomright), Vector2(self.rect.bottomright))
return (physics.rect_collideline(platform.rect, routine_a) or physics.rect_collideline(platform.rect, routine_b))
return False
|
The additional checking for the condition that the ball passes the corner of the platform
|
MLGame/games/arkanoid/game/gameobject.py
|
_platform_additional_check
|
Liuian/1092_INTRODUCTION-TO-MACHINE-LEARNING-AND-ITS-APPLICATION-TO-GAMING
| 0 |
python
|
def _platform_additional_check(self, platform: Platform):
'\n \n '
if (self.rect.bottom > platform.rect.top):
routine_a = (Vector2(self._last_pos.bottomleft), Vector2(self.rect.bottomleft))
routine_b = (Vector2(self._last_pos.bottomright), Vector2(self.rect.bottomright))
return (physics.rect_collideline(platform.rect, routine_a) or physics.rect_collideline(platform.rect, routine_b))
return False
|
def _platform_additional_check(self, platform: Platform):
'\n \n '
if (self.rect.bottom > platform.rect.top):
routine_a = (Vector2(self._last_pos.bottomleft), Vector2(self.rect.bottomleft))
routine_b = (Vector2(self._last_pos.bottomright), Vector2(self.rect.bottomright))
return (physics.rect_collideline(platform.rect, routine_a) or physics.rect_collideline(platform.rect, routine_b))
return False<|docstring|>The additional checking for the condition that the ball passes the corner of the platform<|endoftext|>
|
321351843763018d208f18ddd518983be415e0ed9925d9ef4130629a412b953d
|
def _slice_ball(self, ball_speed_x, platform_speed_x):
'\n Check if the platform slices the ball, and modify the ball speed.\n\n @return The new x speed of the ball after slicing\n '
if (platform_speed_x == 0):
return (7 if (ball_speed_x > 0) else (- 7))
elif ((ball_speed_x * platform_speed_x) > 0):
return (10 if (ball_speed_x > 0) else (- 10))
else:
return ((- 7) if (ball_speed_x > 0) else 7)
|
Check if the platform slices the ball, and modify the ball speed.
@return The new x speed of the ball after slicing
|
MLGame/games/arkanoid/game/gameobject.py
|
_slice_ball
|
Liuian/1092_INTRODUCTION-TO-MACHINE-LEARNING-AND-ITS-APPLICATION-TO-GAMING
| 0 |
python
|
def _slice_ball(self, ball_speed_x, platform_speed_x):
'\n Check if the platform slices the ball, and modify the ball speed.\n\n @return The new x speed of the ball after slicing\n '
if (platform_speed_x == 0):
return (7 if (ball_speed_x > 0) else (- 7))
elif ((ball_speed_x * platform_speed_x) > 0):
return (10 if (ball_speed_x > 0) else (- 10))
else:
return ((- 7) if (ball_speed_x > 0) else 7)
|
def _slice_ball(self, ball_speed_x, platform_speed_x):
'\n Check if the platform slices the ball, and modify the ball speed.\n\n @return The new x speed of the ball after slicing\n '
if (platform_speed_x == 0):
return (7 if (ball_speed_x > 0) else (- 7))
elif ((ball_speed_x * platform_speed_x) > 0):
return (10 if (ball_speed_x > 0) else (- 10))
else:
return ((- 7) if (ball_speed_x > 0) else 7)<|docstring|>Check if the platform slices the ball, and modify the ball speed.
@return The new x speed of the ball after slicing<|endoftext|>
|
49d3b78919ff711e3eab976eb9ffe4f88653608f987fb23f4de16a4e52e09f3c
|
def check_hit_brick(self, group_brick: pygame.sprite.RenderPlain) -> int:
'\n Check if the ball hits bricks in the `group_brick`.\n The hit bricks will be removed from `group_brick`, but the alive hard brick will not.\n However, if the ball speed is high, the hard brick will be removed with only one hit.\n\n @param group_brick The sprite group containing bricks\n @return The number of destroyed bricks\n '
hit_bricks = pygame.sprite.spritecollide(self, group_brick, 1, physics.collide_or_contact)
num_of_destroyed_brick = len(hit_bricks)
if (num_of_destroyed_brick > 0):
if ((num_of_destroyed_brick == 2) and ((hit_bricks[0].rect.y == hit_bricks[1].rect.y) or (hit_bricks[0].rect.x == hit_bricks[1].rect.x))):
combined_rect = hit_bricks[0].rect.union(hit_bricks[1].rect)
physics.bounce_off_ip(self.rect, self._speed, combined_rect, (0, 0))
else:
physics.bounce_off_ip(self.rect, self._speed, hit_bricks[0].rect, (0, 0))
if (abs(self._speed[0]) == 7):
for brick in hit_bricks:
if (isinstance(brick, HardBrick) and brick.hit()):
group_brick.add((brick,))
num_of_destroyed_brick -= 1
return num_of_destroyed_brick
|
Check if the ball hits bricks in the `group_brick`.
The hit bricks will be removed from `group_brick`, but the alive hard brick will not.
However, if the ball speed is high, the hard brick will be removed with only one hit.
@param group_brick The sprite group containing bricks
@return The number of destroyed bricks
|
MLGame/games/arkanoid/game/gameobject.py
|
check_hit_brick
|
Liuian/1092_INTRODUCTION-TO-MACHINE-LEARNING-AND-ITS-APPLICATION-TO-GAMING
| 0 |
python
|
def check_hit_brick(self, group_brick: pygame.sprite.RenderPlain) -> int:
'\n Check if the ball hits bricks in the `group_brick`.\n The hit bricks will be removed from `group_brick`, but the alive hard brick will not.\n However, if the ball speed is high, the hard brick will be removed with only one hit.\n\n @param group_brick The sprite group containing bricks\n @return The number of destroyed bricks\n '
hit_bricks = pygame.sprite.spritecollide(self, group_brick, 1, physics.collide_or_contact)
num_of_destroyed_brick = len(hit_bricks)
if (num_of_destroyed_brick > 0):
if ((num_of_destroyed_brick == 2) and ((hit_bricks[0].rect.y == hit_bricks[1].rect.y) or (hit_bricks[0].rect.x == hit_bricks[1].rect.x))):
combined_rect = hit_bricks[0].rect.union(hit_bricks[1].rect)
physics.bounce_off_ip(self.rect, self._speed, combined_rect, (0, 0))
else:
physics.bounce_off_ip(self.rect, self._speed, hit_bricks[0].rect, (0, 0))
if (abs(self._speed[0]) == 7):
for brick in hit_bricks:
if (isinstance(brick, HardBrick) and brick.hit()):
group_brick.add((brick,))
num_of_destroyed_brick -= 1
return num_of_destroyed_brick
|
def check_hit_brick(self, group_brick: pygame.sprite.RenderPlain) -> int:
'\n Check if the ball hits bricks in the `group_brick`.\n The hit bricks will be removed from `group_brick`, but the alive hard brick will not.\n However, if the ball speed is high, the hard brick will be removed with only one hit.\n\n @param group_brick The sprite group containing bricks\n @return The number of destroyed bricks\n '
hit_bricks = pygame.sprite.spritecollide(self, group_brick, 1, physics.collide_or_contact)
num_of_destroyed_brick = len(hit_bricks)
if (num_of_destroyed_brick > 0):
if ((num_of_destroyed_brick == 2) and ((hit_bricks[0].rect.y == hit_bricks[1].rect.y) or (hit_bricks[0].rect.x == hit_bricks[1].rect.x))):
combined_rect = hit_bricks[0].rect.union(hit_bricks[1].rect)
physics.bounce_off_ip(self.rect, self._speed, combined_rect, (0, 0))
else:
physics.bounce_off_ip(self.rect, self._speed, hit_bricks[0].rect, (0, 0))
if (abs(self._speed[0]) == 7):
for brick in hit_bricks:
if (isinstance(brick, HardBrick) and brick.hit()):
group_brick.add((brick,))
num_of_destroyed_brick -= 1
return num_of_destroyed_brick<|docstring|>Check if the ball hits bricks in the `group_brick`.
The hit bricks will be removed from `group_brick`, but the alive hard brick will not.
However, if the ball speed is high, the hard brick will be removed with only one hit.
@param group_brick The sprite group containing bricks
@return The number of destroyed bricks<|endoftext|>
|
02525ddedbfcc39d8788c2663778e039a75e08f874d60aeac9b2e5dec25367d5
|
def create_optimizer(loss, learning_rate, num_train_steps, weight_decay_rate=0.0, warmup_steps=0, warmup_proportion=0, lr_decay_power=1.0, layerwise_lr_decay_power=(- 1), n_transformer_layers=None, hvd=None, use_fp16=False, num_accumulation_steps=1, allreduce_post_accumulation=False):
'\n Creates an optimizer and training op.\n '
compression = (Compression.fp16 if use_fp16 else Compression.none)
global_step = tf.train.get_or_create_global_step()
learning_rate = tf.train.polynomial_decay(learning_rate, global_step, num_train_steps, end_learning_rate=0.0, power=lr_decay_power, cycle=False)
warmup_steps = max((num_train_steps * warmup_proportion), warmup_steps)
learning_rate *= tf.minimum(1.0, (tf.cast(global_step, tf.float32) / tf.cast(warmup_steps, tf.float32)))
if (layerwise_lr_decay_power > 0):
learning_rate = _get_layer_lrs(learning_rate, layerwise_lr_decay_power, n_transformer_layers)
optimizer = AdamWeightDecayOptimizer(learning_rate=learning_rate, weight_decay_rate=weight_decay_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-06, exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'])
if ((hvd is not None) and ((num_accumulation_steps == 1) or (not allreduce_post_accumulation))):
optimizer = hvd.DistributedOptimizer(optimizer, sparse_as_dense=True, compression=compression)
if use_fp16:
loss_scale_manager = tf_contrib.mixed_precision.ExponentialUpdateLossScaleManager(init_loss_scale=(2 ** 32), incr_every_n_steps=1000, decr_every_n_nan_or_inf=2, decr_ratio=0.5)
optimizer = tf_contrib.mixed_precision.LossScaleOptimizer(optimizer, loss_scale_manager)
tvars = tf.trainable_variables()
grads_and_vars = optimizer.compute_gradients(((loss * 1.0) / num_accumulation_steps), tvars)
if (num_accumulation_steps > 1):
local_step = tf.get_variable(name='local_step', shape=[], dtype=tf.int32, trainable=False, initializer=tf.zeros_initializer())
batch_finite = tf.get_variable(name='batch_finite', shape=[], dtype=tf.bool, trainable=False, initializer=tf.ones_initializer())
accum_vars = [tf.get_variable(name=(tvar.name.split(':')[0] + '/accum'), shape=tvar.shape.as_list(), dtype=tf.float32, trainable=False, initializer=tf.zeros_initializer()) for tvar in tvars]
reset_step = tf.cast(tf.math.equal((local_step % num_accumulation_steps), 0), dtype=tf.bool)
local_step = tf.cond(reset_step, (lambda : local_step.assign(tf.ones_like(local_step))), (lambda : local_step.assign_add(1)))
(grads, tvars, accum_vars) = zip(*[(g, v, g_acc) for ((g, v), g_acc) in zip(grads_and_vars, accum_vars) if (g is not None)])
if use_fp16:
all_are_finite = tf.reduce_all([tf.reduce_all(tf.is_finite(g)) for g in grads])
else:
all_are_finite = tf.constant(True, dtype=tf.bool)
batch_finite = tf.cond(reset_step, (lambda : batch_finite.assign(tf.math.logical_and(tf.constant(True, dtype=tf.bool), all_are_finite))), (lambda : batch_finite.assign(tf.math.logical_and(batch_finite, all_are_finite))))
(clipped_grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0, use_norm=tf.cond(all_are_finite, (lambda : tf.global_norm(grads)), (lambda : tf.constant(1.0))))
accum_vars = tf.cond(reset_step, (lambda : [v.assign(grad) for (v, grad) in zip(accum_vars, clipped_grads)]), (lambda : [v.assign_add(grad) for (v, grad) in zip(accum_vars, clipped_grads)]))
def update(accum_vars):
if (allreduce_post_accumulation and (hvd is not None)):
accum_vars = [(hvd.allreduce(tf.convert_to_tensor(accum_var), compression=compression) if isinstance(accum_var, tf.IndexedSlices) else hvd.allreduce(accum_var, compression=compression)) for accum_var in accum_vars]
return optimizer.apply_gradients(list(zip(accum_vars, tvars)), global_step=global_step)
update_step = tf.identity(tf.cast(tf.math.equal((local_step % num_accumulation_steps), 0), dtype=tf.bool), name='update_step')
update_op = tf.cond(update_step, (lambda : update(accum_vars)), (lambda : tf.no_op()))
new_global_step = tf.cond(tf.math.logical_and(update_step, tf.cast(hvd.allreduce(tf.cast(batch_finite, tf.int32)), tf.bool)), (lambda : (global_step + 1)), (lambda : global_step))
new_global_step = tf.identity(new_global_step, name='step_update')
train_op = tf.group(update_op, [global_step.assign(new_global_step)])
else:
grads_and_vars = [(g, v) for (g, v) in grads_and_vars if (g is not None)]
(grads, tvars) = list(zip(*grads_and_vars))
if use_fp16:
all_are_finite = tf.reduce_all([tf.reduce_all(tf.is_finite(g)) for g in grads])
else:
all_are_finite = tf.constant(True, dtype=tf.bool)
(clipped_grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0, use_norm=tf.cond(all_are_finite, (lambda : tf.global_norm(grads)), (lambda : tf.constant(1.0))))
train_op = optimizer.apply_gradients(list(zip(clipped_grads, tvars)), global_step=global_step)
new_global_step = tf.cond(all_are_finite, (lambda : (global_step + 1)), (lambda : global_step))
new_global_step = tf.identity(new_global_step, name='step_update')
train_op = tf.group(train_op, [global_step.assign(new_global_step)])
return train_op
|
Creates an optimizer and training op.
|
model/optimization.py
|
create_optimizer
|
ololo123321/electra
| 0 |
python
|
def create_optimizer(loss, learning_rate, num_train_steps, weight_decay_rate=0.0, warmup_steps=0, warmup_proportion=0, lr_decay_power=1.0, layerwise_lr_decay_power=(- 1), n_transformer_layers=None, hvd=None, use_fp16=False, num_accumulation_steps=1, allreduce_post_accumulation=False):
'\n \n '
compression = (Compression.fp16 if use_fp16 else Compression.none)
global_step = tf.train.get_or_create_global_step()
learning_rate = tf.train.polynomial_decay(learning_rate, global_step, num_train_steps, end_learning_rate=0.0, power=lr_decay_power, cycle=False)
warmup_steps = max((num_train_steps * warmup_proportion), warmup_steps)
learning_rate *= tf.minimum(1.0, (tf.cast(global_step, tf.float32) / tf.cast(warmup_steps, tf.float32)))
if (layerwise_lr_decay_power > 0):
learning_rate = _get_layer_lrs(learning_rate, layerwise_lr_decay_power, n_transformer_layers)
optimizer = AdamWeightDecayOptimizer(learning_rate=learning_rate, weight_decay_rate=weight_decay_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-06, exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'])
if ((hvd is not None) and ((num_accumulation_steps == 1) or (not allreduce_post_accumulation))):
optimizer = hvd.DistributedOptimizer(optimizer, sparse_as_dense=True, compression=compression)
if use_fp16:
loss_scale_manager = tf_contrib.mixed_precision.ExponentialUpdateLossScaleManager(init_loss_scale=(2 ** 32), incr_every_n_steps=1000, decr_every_n_nan_or_inf=2, decr_ratio=0.5)
optimizer = tf_contrib.mixed_precision.LossScaleOptimizer(optimizer, loss_scale_manager)
tvars = tf.trainable_variables()
grads_and_vars = optimizer.compute_gradients(((loss * 1.0) / num_accumulation_steps), tvars)
if (num_accumulation_steps > 1):
local_step = tf.get_variable(name='local_step', shape=[], dtype=tf.int32, trainable=False, initializer=tf.zeros_initializer())
batch_finite = tf.get_variable(name='batch_finite', shape=[], dtype=tf.bool, trainable=False, initializer=tf.ones_initializer())
accum_vars = [tf.get_variable(name=(tvar.name.split(':')[0] + '/accum'), shape=tvar.shape.as_list(), dtype=tf.float32, trainable=False, initializer=tf.zeros_initializer()) for tvar in tvars]
reset_step = tf.cast(tf.math.equal((local_step % num_accumulation_steps), 0), dtype=tf.bool)
local_step = tf.cond(reset_step, (lambda : local_step.assign(tf.ones_like(local_step))), (lambda : local_step.assign_add(1)))
(grads, tvars, accum_vars) = zip(*[(g, v, g_acc) for ((g, v), g_acc) in zip(grads_and_vars, accum_vars) if (g is not None)])
if use_fp16:
all_are_finite = tf.reduce_all([tf.reduce_all(tf.is_finite(g)) for g in grads])
else:
all_are_finite = tf.constant(True, dtype=tf.bool)
batch_finite = tf.cond(reset_step, (lambda : batch_finite.assign(tf.math.logical_and(tf.constant(True, dtype=tf.bool), all_are_finite))), (lambda : batch_finite.assign(tf.math.logical_and(batch_finite, all_are_finite))))
(clipped_grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0, use_norm=tf.cond(all_are_finite, (lambda : tf.global_norm(grads)), (lambda : tf.constant(1.0))))
accum_vars = tf.cond(reset_step, (lambda : [v.assign(grad) for (v, grad) in zip(accum_vars, clipped_grads)]), (lambda : [v.assign_add(grad) for (v, grad) in zip(accum_vars, clipped_grads)]))
def update(accum_vars):
if (allreduce_post_accumulation and (hvd is not None)):
accum_vars = [(hvd.allreduce(tf.convert_to_tensor(accum_var), compression=compression) if isinstance(accum_var, tf.IndexedSlices) else hvd.allreduce(accum_var, compression=compression)) for accum_var in accum_vars]
return optimizer.apply_gradients(list(zip(accum_vars, tvars)), global_step=global_step)
update_step = tf.identity(tf.cast(tf.math.equal((local_step % num_accumulation_steps), 0), dtype=tf.bool), name='update_step')
update_op = tf.cond(update_step, (lambda : update(accum_vars)), (lambda : tf.no_op()))
new_global_step = tf.cond(tf.math.logical_and(update_step, tf.cast(hvd.allreduce(tf.cast(batch_finite, tf.int32)), tf.bool)), (lambda : (global_step + 1)), (lambda : global_step))
new_global_step = tf.identity(new_global_step, name='step_update')
train_op = tf.group(update_op, [global_step.assign(new_global_step)])
else:
grads_and_vars = [(g, v) for (g, v) in grads_and_vars if (g is not None)]
(grads, tvars) = list(zip(*grads_and_vars))
if use_fp16:
all_are_finite = tf.reduce_all([tf.reduce_all(tf.is_finite(g)) for g in grads])
else:
all_are_finite = tf.constant(True, dtype=tf.bool)
(clipped_grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0, use_norm=tf.cond(all_are_finite, (lambda : tf.global_norm(grads)), (lambda : tf.constant(1.0))))
train_op = optimizer.apply_gradients(list(zip(clipped_grads, tvars)), global_step=global_step)
new_global_step = tf.cond(all_are_finite, (lambda : (global_step + 1)), (lambda : global_step))
new_global_step = tf.identity(new_global_step, name='step_update')
train_op = tf.group(train_op, [global_step.assign(new_global_step)])
return train_op
|
def create_optimizer(loss, learning_rate, num_train_steps, weight_decay_rate=0.0, warmup_steps=0, warmup_proportion=0, lr_decay_power=1.0, layerwise_lr_decay_power=(- 1), n_transformer_layers=None, hvd=None, use_fp16=False, num_accumulation_steps=1, allreduce_post_accumulation=False):
'\n \n '
compression = (Compression.fp16 if use_fp16 else Compression.none)
global_step = tf.train.get_or_create_global_step()
learning_rate = tf.train.polynomial_decay(learning_rate, global_step, num_train_steps, end_learning_rate=0.0, power=lr_decay_power, cycle=False)
warmup_steps = max((num_train_steps * warmup_proportion), warmup_steps)
learning_rate *= tf.minimum(1.0, (tf.cast(global_step, tf.float32) / tf.cast(warmup_steps, tf.float32)))
if (layerwise_lr_decay_power > 0):
learning_rate = _get_layer_lrs(learning_rate, layerwise_lr_decay_power, n_transformer_layers)
optimizer = AdamWeightDecayOptimizer(learning_rate=learning_rate, weight_decay_rate=weight_decay_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-06, exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'])
if ((hvd is not None) and ((num_accumulation_steps == 1) or (not allreduce_post_accumulation))):
optimizer = hvd.DistributedOptimizer(optimizer, sparse_as_dense=True, compression=compression)
if use_fp16:
loss_scale_manager = tf_contrib.mixed_precision.ExponentialUpdateLossScaleManager(init_loss_scale=(2 ** 32), incr_every_n_steps=1000, decr_every_n_nan_or_inf=2, decr_ratio=0.5)
optimizer = tf_contrib.mixed_precision.LossScaleOptimizer(optimizer, loss_scale_manager)
tvars = tf.trainable_variables()
grads_and_vars = optimizer.compute_gradients(((loss * 1.0) / num_accumulation_steps), tvars)
if (num_accumulation_steps > 1):
local_step = tf.get_variable(name='local_step', shape=[], dtype=tf.int32, trainable=False, initializer=tf.zeros_initializer())
batch_finite = tf.get_variable(name='batch_finite', shape=[], dtype=tf.bool, trainable=False, initializer=tf.ones_initializer())
accum_vars = [tf.get_variable(name=(tvar.name.split(':')[0] + '/accum'), shape=tvar.shape.as_list(), dtype=tf.float32, trainable=False, initializer=tf.zeros_initializer()) for tvar in tvars]
reset_step = tf.cast(tf.math.equal((local_step % num_accumulation_steps), 0), dtype=tf.bool)
local_step = tf.cond(reset_step, (lambda : local_step.assign(tf.ones_like(local_step))), (lambda : local_step.assign_add(1)))
(grads, tvars, accum_vars) = zip(*[(g, v, g_acc) for ((g, v), g_acc) in zip(grads_and_vars, accum_vars) if (g is not None)])
if use_fp16:
all_are_finite = tf.reduce_all([tf.reduce_all(tf.is_finite(g)) for g in grads])
else:
all_are_finite = tf.constant(True, dtype=tf.bool)
batch_finite = tf.cond(reset_step, (lambda : batch_finite.assign(tf.math.logical_and(tf.constant(True, dtype=tf.bool), all_are_finite))), (lambda : batch_finite.assign(tf.math.logical_and(batch_finite, all_are_finite))))
(clipped_grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0, use_norm=tf.cond(all_are_finite, (lambda : tf.global_norm(grads)), (lambda : tf.constant(1.0))))
accum_vars = tf.cond(reset_step, (lambda : [v.assign(grad) for (v, grad) in zip(accum_vars, clipped_grads)]), (lambda : [v.assign_add(grad) for (v, grad) in zip(accum_vars, clipped_grads)]))
def update(accum_vars):
if (allreduce_post_accumulation and (hvd is not None)):
accum_vars = [(hvd.allreduce(tf.convert_to_tensor(accum_var), compression=compression) if isinstance(accum_var, tf.IndexedSlices) else hvd.allreduce(accum_var, compression=compression)) for accum_var in accum_vars]
return optimizer.apply_gradients(list(zip(accum_vars, tvars)), global_step=global_step)
update_step = tf.identity(tf.cast(tf.math.equal((local_step % num_accumulation_steps), 0), dtype=tf.bool), name='update_step')
update_op = tf.cond(update_step, (lambda : update(accum_vars)), (lambda : tf.no_op()))
new_global_step = tf.cond(tf.math.logical_and(update_step, tf.cast(hvd.allreduce(tf.cast(batch_finite, tf.int32)), tf.bool)), (lambda : (global_step + 1)), (lambda : global_step))
new_global_step = tf.identity(new_global_step, name='step_update')
train_op = tf.group(update_op, [global_step.assign(new_global_step)])
else:
grads_and_vars = [(g, v) for (g, v) in grads_and_vars if (g is not None)]
(grads, tvars) = list(zip(*grads_and_vars))
if use_fp16:
all_are_finite = tf.reduce_all([tf.reduce_all(tf.is_finite(g)) for g in grads])
else:
all_are_finite = tf.constant(True, dtype=tf.bool)
(clipped_grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0, use_norm=tf.cond(all_are_finite, (lambda : tf.global_norm(grads)), (lambda : tf.constant(1.0))))
train_op = optimizer.apply_gradients(list(zip(clipped_grads, tvars)), global_step=global_step)
new_global_step = tf.cond(all_are_finite, (lambda : (global_step + 1)), (lambda : global_step))
new_global_step = tf.identity(new_global_step, name='step_update')
train_op = tf.group(train_op, [global_step.assign(new_global_step)])
return train_op<|docstring|>Creates an optimizer and training op.<|endoftext|>
|
355f36dbfd98170c5e518f5a4bba67bad63a658ab2faf69f5cb07010722ba74c
|
def _get_layer_lrs(learning_rate, layer_decay, n_layers):
'Have lower learning rates for layers closer to the input.'
key_to_depths = collections.OrderedDict({'/embeddings/': 0, '/embeddings_project/': 0, 'task_specific/': (n_layers + 2)})
for layer in range(n_layers):
key_to_depths[(('encoder/layer_' + str(layer)) + '/')] = (layer + 1)
return {key: (learning_rate * (layer_decay ** ((n_layers + 2) - depth))) for (key, depth) in key_to_depths.items()}
|
Have lower learning rates for layers closer to the input.
|
model/optimization.py
|
_get_layer_lrs
|
ololo123321/electra
| 0 |
python
|
def _get_layer_lrs(learning_rate, layer_decay, n_layers):
key_to_depths = collections.OrderedDict({'/embeddings/': 0, '/embeddings_project/': 0, 'task_specific/': (n_layers + 2)})
for layer in range(n_layers):
key_to_depths[(('encoder/layer_' + str(layer)) + '/')] = (layer + 1)
return {key: (learning_rate * (layer_decay ** ((n_layers + 2) - depth))) for (key, depth) in key_to_depths.items()}
|
def _get_layer_lrs(learning_rate, layer_decay, n_layers):
key_to_depths = collections.OrderedDict({'/embeddings/': 0, '/embeddings_project/': 0, 'task_specific/': (n_layers + 2)})
for layer in range(n_layers):
key_to_depths[(('encoder/layer_' + str(layer)) + '/')] = (layer + 1)
return {key: (learning_rate * (layer_decay ** ((n_layers + 2) - depth))) for (key, depth) in key_to_depths.items()}<|docstring|>Have lower learning rates for layers closer to the input.<|endoftext|>
|
24adf298c22794b47c56f2cad97d96d0fe9bd10752682c591a28b0dbca159649
|
def __init__(self, learning_rate, weight_decay_rate=0.0, beta_1=0.9, beta_2=0.999, epsilon=1e-06, exclude_from_weight_decay=None, name='AdamWeightDecayOptimizer'):
'Constructs a AdamWeightDecayOptimizer.'
super(AdamWeightDecayOptimizer, self).__init__(False, name)
self.learning_rate = learning_rate
self.weight_decay_rate = weight_decay_rate
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
self.exclude_from_weight_decay = exclude_from_weight_decay
|
Constructs a AdamWeightDecayOptimizer.
|
model/optimization.py
|
__init__
|
ololo123321/electra
| 0 |
python
|
def __init__(self, learning_rate, weight_decay_rate=0.0, beta_1=0.9, beta_2=0.999, epsilon=1e-06, exclude_from_weight_decay=None, name='AdamWeightDecayOptimizer'):
super(AdamWeightDecayOptimizer, self).__init__(False, name)
self.learning_rate = learning_rate
self.weight_decay_rate = weight_decay_rate
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
self.exclude_from_weight_decay = exclude_from_weight_decay
|
def __init__(self, learning_rate, weight_decay_rate=0.0, beta_1=0.9, beta_2=0.999, epsilon=1e-06, exclude_from_weight_decay=None, name='AdamWeightDecayOptimizer'):
super(AdamWeightDecayOptimizer, self).__init__(False, name)
self.learning_rate = learning_rate
self.weight_decay_rate = weight_decay_rate
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
self.exclude_from_weight_decay = exclude_from_weight_decay<|docstring|>Constructs a AdamWeightDecayOptimizer.<|endoftext|>
|
d7327d149d782103e88635073d69baa53030a35cf75348614ceefcd08b72441d
|
def _apply_gradients(self, grads_and_vars, learning_rate):
'See base class.'
assignments = []
for (grad, param) in grads_and_vars:
if ((grad is None) or (param is None)):
continue
param_name = self._get_variable_name(param.name)
m = tf.get_variable(name=(param_name + '/adam_m'), shape=param.shape.as_list(), dtype=tf.float32, trainable=False, initializer=tf.zeros_initializer())
v = tf.get_variable(name=(param_name + '/adam_v'), shape=param.shape.as_list(), dtype=tf.float32, trainable=False, initializer=tf.zeros_initializer())
next_m = (tf.multiply(self.beta_1, m) + tf.multiply((1.0 - self.beta_1), grad))
next_v = (tf.multiply(self.beta_2, v) + tf.multiply((1.0 - self.beta_2), tf.square(grad)))
update = (next_m / (tf.sqrt(next_v) + self.epsilon))
if (self.weight_decay_rate > 0):
if self._do_use_weight_decay(param_name):
update += (self.weight_decay_rate * param)
update_with_lr = (learning_rate * update)
next_param = (param - update_with_lr)
assignments.extend([param.assign(next_param), m.assign(next_m), v.assign(next_v)])
return assignments
|
See base class.
|
model/optimization.py
|
_apply_gradients
|
ololo123321/electra
| 0 |
python
|
def _apply_gradients(self, grads_and_vars, learning_rate):
assignments = []
for (grad, param) in grads_and_vars:
if ((grad is None) or (param is None)):
continue
param_name = self._get_variable_name(param.name)
m = tf.get_variable(name=(param_name + '/adam_m'), shape=param.shape.as_list(), dtype=tf.float32, trainable=False, initializer=tf.zeros_initializer())
v = tf.get_variable(name=(param_name + '/adam_v'), shape=param.shape.as_list(), dtype=tf.float32, trainable=False, initializer=tf.zeros_initializer())
next_m = (tf.multiply(self.beta_1, m) + tf.multiply((1.0 - self.beta_1), grad))
next_v = (tf.multiply(self.beta_2, v) + tf.multiply((1.0 - self.beta_2), tf.square(grad)))
update = (next_m / (tf.sqrt(next_v) + self.epsilon))
if (self.weight_decay_rate > 0):
if self._do_use_weight_decay(param_name):
update += (self.weight_decay_rate * param)
update_with_lr = (learning_rate * update)
next_param = (param - update_with_lr)
assignments.extend([param.assign(next_param), m.assign(next_m), v.assign(next_v)])
return assignments
|
def _apply_gradients(self, grads_and_vars, learning_rate):
assignments = []
for (grad, param) in grads_and_vars:
if ((grad is None) or (param is None)):
continue
param_name = self._get_variable_name(param.name)
m = tf.get_variable(name=(param_name + '/adam_m'), shape=param.shape.as_list(), dtype=tf.float32, trainable=False, initializer=tf.zeros_initializer())
v = tf.get_variable(name=(param_name + '/adam_v'), shape=param.shape.as_list(), dtype=tf.float32, trainable=False, initializer=tf.zeros_initializer())
next_m = (tf.multiply(self.beta_1, m) + tf.multiply((1.0 - self.beta_1), grad))
next_v = (tf.multiply(self.beta_2, v) + tf.multiply((1.0 - self.beta_2), tf.square(grad)))
update = (next_m / (tf.sqrt(next_v) + self.epsilon))
if (self.weight_decay_rate > 0):
if self._do_use_weight_decay(param_name):
update += (self.weight_decay_rate * param)
update_with_lr = (learning_rate * update)
next_param = (param - update_with_lr)
assignments.extend([param.assign(next_param), m.assign(next_m), v.assign(next_v)])
return assignments<|docstring|>See base class.<|endoftext|>
|
da0acc64040e539fa03f8439eac3eb576613720c4bd7b4eea35feb7371ae9ea6
|
def _do_use_weight_decay(self, param_name):
'Whether to use L2 weight decay for `param_name`.'
if (not self.weight_decay_rate):
return False
if self.exclude_from_weight_decay:
for r in self.exclude_from_weight_decay:
if (re.search(r, param_name) is not None):
return False
return True
|
Whether to use L2 weight decay for `param_name`.
|
model/optimization.py
|
_do_use_weight_decay
|
ololo123321/electra
| 0 |
python
|
def _do_use_weight_decay(self, param_name):
if (not self.weight_decay_rate):
return False
if self.exclude_from_weight_decay:
for r in self.exclude_from_weight_decay:
if (re.search(r, param_name) is not None):
return False
return True
|
def _do_use_weight_decay(self, param_name):
if (not self.weight_decay_rate):
return False
if self.exclude_from_weight_decay:
for r in self.exclude_from_weight_decay:
if (re.search(r, param_name) is not None):
return False
return True<|docstring|>Whether to use L2 weight decay for `param_name`.<|endoftext|>
|
9bd4d7a00c67683b92306e6f63516a48c730f5e6f02201323d156c1c60096274
|
def _get_variable_name(self, param_name):
'Get the variable name from the tensor name.'
m = re.match('^(.*):\\d+$', param_name)
if (m is not None):
param_name = m.group(1)
return param_name
|
Get the variable name from the tensor name.
|
model/optimization.py
|
_get_variable_name
|
ololo123321/electra
| 0 |
python
|
def _get_variable_name(self, param_name):
m = re.match('^(.*):\\d+$', param_name)
if (m is not None):
param_name = m.group(1)
return param_name
|
def _get_variable_name(self, param_name):
m = re.match('^(.*):\\d+$', param_name)
if (m is not None):
param_name = m.group(1)
return param_name<|docstring|>Get the variable name from the tensor name.<|endoftext|>
|
78174c503a36c91b6dcfe505831b062f5ce01da2d47ed74cc4023f1b9cad3aba
|
def main_cron(no_confirm=True):
'set no_confirm to True for running this script automatically\n without intervention.'
src_dump = get_src_dump()
mart_version = chk_latest_mart_version()
logging.info(('Checking latest mart_version:\t%s' % mart_version))
doc = src_dump.find_one({'_id': 'ensembl'})
if (doc and ('release' in doc) and (mart_version <= doc['release'])):
data_file = os.path.join(doc['data_folder'], 'gene_ensembl__gene__main.txt')
if os.path.exists(data_file):
logging.info('No newer release found. Abort now.')
sys.exit(0)
DATA_FOLDER = os.path.join(ENSEMBL_FOLDER, str(mart_version))
if (not os.path.exists(DATA_FOLDER)):
os.makedirs(DATA_FOLDER)
elif (not (no_confirm or (len(os.listdir(DATA_FOLDER)) == 0) or (ask(('DATA_FOLDER (%s) is not empty. Continue?' % DATA_FOLDER)) == 'Y'))):
sys.exit(0)
logfile = os.path.join(DATA_FOLDER, ('ensembl_mart_%s.log' % mart_version))
setup_logfile(logfile)
doc = {'_id': 'ensembl', 'release': mart_version, 'timestamp': time.strftime('%Y%m%d'), 'data_folder': DATA_FOLDER, 'logfile': logfile, 'status': 'downloading'}
src_dump.save(doc)
t0 = time.time()
try:
BM = BioMart()
BM.species_li = get_all_species(mart_version)
BM.get_gene__main(os.path.join(DATA_FOLDER, 'gene_ensembl__gene__main.txt'))
BM.get_translation__main(os.path.join(DATA_FOLDER, 'gene_ensembl__translation__main.txt'))
BM.get_xref_entrezgene(os.path.join(DATA_FOLDER, 'gene_ensembl__xref_entrezgene__dm.txt'))
BM.get_profile(os.path.join(DATA_FOLDER, 'gene_ensembl__prot_profile__dm.txt'))
BM.get_interpro(os.path.join(DATA_FOLDER, 'gene_ensembl__prot_interpro__dm.txt'))
BM.get_pfam(os.path.join(DATA_FOLDER, 'gene_ensembl__prot_pfam__dm.txt'))
finally:
sys.stdout.close()
_updates = {'status': 'success', 'time': timesofar(t0), 'pending_to_upload': True}
src_dump.update({'_id': 'ensembl'}, {'$set': _updates})
|
set no_confirm to True for running this script automatically
without intervention.
|
src/dataload/data_dump/dl_ensembl_mart.py
|
main_cron
|
karawallace/mygene
| 0 |
python
|
def main_cron(no_confirm=True):
'set no_confirm to True for running this script automatically\n without intervention.'
src_dump = get_src_dump()
mart_version = chk_latest_mart_version()
logging.info(('Checking latest mart_version:\t%s' % mart_version))
doc = src_dump.find_one({'_id': 'ensembl'})
if (doc and ('release' in doc) and (mart_version <= doc['release'])):
data_file = os.path.join(doc['data_folder'], 'gene_ensembl__gene__main.txt')
if os.path.exists(data_file):
logging.info('No newer release found. Abort now.')
sys.exit(0)
DATA_FOLDER = os.path.join(ENSEMBL_FOLDER, str(mart_version))
if (not os.path.exists(DATA_FOLDER)):
os.makedirs(DATA_FOLDER)
elif (not (no_confirm or (len(os.listdir(DATA_FOLDER)) == 0) or (ask(('DATA_FOLDER (%s) is not empty. Continue?' % DATA_FOLDER)) == 'Y'))):
sys.exit(0)
logfile = os.path.join(DATA_FOLDER, ('ensembl_mart_%s.log' % mart_version))
setup_logfile(logfile)
doc = {'_id': 'ensembl', 'release': mart_version, 'timestamp': time.strftime('%Y%m%d'), 'data_folder': DATA_FOLDER, 'logfile': logfile, 'status': 'downloading'}
src_dump.save(doc)
t0 = time.time()
try:
BM = BioMart()
BM.species_li = get_all_species(mart_version)
BM.get_gene__main(os.path.join(DATA_FOLDER, 'gene_ensembl__gene__main.txt'))
BM.get_translation__main(os.path.join(DATA_FOLDER, 'gene_ensembl__translation__main.txt'))
BM.get_xref_entrezgene(os.path.join(DATA_FOLDER, 'gene_ensembl__xref_entrezgene__dm.txt'))
BM.get_profile(os.path.join(DATA_FOLDER, 'gene_ensembl__prot_profile__dm.txt'))
BM.get_interpro(os.path.join(DATA_FOLDER, 'gene_ensembl__prot_interpro__dm.txt'))
BM.get_pfam(os.path.join(DATA_FOLDER, 'gene_ensembl__prot_pfam__dm.txt'))
finally:
sys.stdout.close()
_updates = {'status': 'success', 'time': timesofar(t0), 'pending_to_upload': True}
src_dump.update({'_id': 'ensembl'}, {'$set': _updates})
|
def main_cron(no_confirm=True):
'set no_confirm to True for running this script automatically\n without intervention.'
src_dump = get_src_dump()
mart_version = chk_latest_mart_version()
logging.info(('Checking latest mart_version:\t%s' % mart_version))
doc = src_dump.find_one({'_id': 'ensembl'})
if (doc and ('release' in doc) and (mart_version <= doc['release'])):
data_file = os.path.join(doc['data_folder'], 'gene_ensembl__gene__main.txt')
if os.path.exists(data_file):
logging.info('No newer release found. Abort now.')
sys.exit(0)
DATA_FOLDER = os.path.join(ENSEMBL_FOLDER, str(mart_version))
if (not os.path.exists(DATA_FOLDER)):
os.makedirs(DATA_FOLDER)
elif (not (no_confirm or (len(os.listdir(DATA_FOLDER)) == 0) or (ask(('DATA_FOLDER (%s) is not empty. Continue?' % DATA_FOLDER)) == 'Y'))):
sys.exit(0)
logfile = os.path.join(DATA_FOLDER, ('ensembl_mart_%s.log' % mart_version))
setup_logfile(logfile)
doc = {'_id': 'ensembl', 'release': mart_version, 'timestamp': time.strftime('%Y%m%d'), 'data_folder': DATA_FOLDER, 'logfile': logfile, 'status': 'downloading'}
src_dump.save(doc)
t0 = time.time()
try:
BM = BioMart()
BM.species_li = get_all_species(mart_version)
BM.get_gene__main(os.path.join(DATA_FOLDER, 'gene_ensembl__gene__main.txt'))
BM.get_translation__main(os.path.join(DATA_FOLDER, 'gene_ensembl__translation__main.txt'))
BM.get_xref_entrezgene(os.path.join(DATA_FOLDER, 'gene_ensembl__xref_entrezgene__dm.txt'))
BM.get_profile(os.path.join(DATA_FOLDER, 'gene_ensembl__prot_profile__dm.txt'))
BM.get_interpro(os.path.join(DATA_FOLDER, 'gene_ensembl__prot_interpro__dm.txt'))
BM.get_pfam(os.path.join(DATA_FOLDER, 'gene_ensembl__prot_pfam__dm.txt'))
finally:
sys.stdout.close()
_updates = {'status': 'success', 'time': timesofar(t0), 'pending_to_upload': True}
src_dump.update({'_id': 'ensembl'}, {'$set': _updates})<|docstring|>set no_confirm to True for running this script automatically
without intervention.<|endoftext|>
|
0a02cb030fe1194a48c18c592b889af2423edbd64b23cb83025e813675ca88f8
|
@router.get('', response_model=projects.schemas.template.TemplateList)
async def handle_list_templates(session: Session=Depends(session_scope)):
'\n Handles GET requests to /.\n\n Parameters\n ----------\n session : sqlalchemy.orm.session.Session\n\n Returns\n -------\n projects.schemas.template.TemplateList\n '
template_controller = TemplateController(session)
templates = template_controller.list_templates()
return templates
|
Handles GET requests to /.
Parameters
----------
session : sqlalchemy.orm.session.Session
Returns
-------
projects.schemas.template.TemplateList
|
projects/api/templates.py
|
handle_list_templates
|
dnlcesilva/projects
| 0 |
python
|
@router.get(, response_model=projects.schemas.template.TemplateList)
async def handle_list_templates(session: Session=Depends(session_scope)):
'\n Handles GET requests to /.\n\n Parameters\n ----------\n session : sqlalchemy.orm.session.Session\n\n Returns\n -------\n projects.schemas.template.TemplateList\n '
template_controller = TemplateController(session)
templates = template_controller.list_templates()
return templates
|
@router.get(, response_model=projects.schemas.template.TemplateList)
async def handle_list_templates(session: Session=Depends(session_scope)):
'\n Handles GET requests to /.\n\n Parameters\n ----------\n session : sqlalchemy.orm.session.Session\n\n Returns\n -------\n projects.schemas.template.TemplateList\n '
template_controller = TemplateController(session)
templates = template_controller.list_templates()
return templates<|docstring|>Handles GET requests to /.
Parameters
----------
session : sqlalchemy.orm.session.Session
Returns
-------
projects.schemas.template.TemplateList<|endoftext|>
|
33970662e9238be37ee1034084554e7c91dacd265ac4cbc7bb0526253fea74d2
|
@router.post('', response_model=projects.schemas.template.Template)
async def handle_post_templates(template: projects.schemas.template.TemplateCreate, session: Session=Depends(session_scope)):
'\n Handles POST requests to /.\n\n Parameters\n ----------\n session : sqlalchemy.orm.session.Session\n template : projects.schemas.template.TemplateCreate\n\n Returns\n -------\n projects.schemas.template.Template\n '
template_controller = TemplateController(session)
template = template_controller.create_template(template=template)
return template
|
Handles POST requests to /.
Parameters
----------
session : sqlalchemy.orm.session.Session
template : projects.schemas.template.TemplateCreate
Returns
-------
projects.schemas.template.Template
|
projects/api/templates.py
|
handle_post_templates
|
dnlcesilva/projects
| 0 |
python
|
@router.post(, response_model=projects.schemas.template.Template)
async def handle_post_templates(template: projects.schemas.template.TemplateCreate, session: Session=Depends(session_scope)):
'\n Handles POST requests to /.\n\n Parameters\n ----------\n session : sqlalchemy.orm.session.Session\n template : projects.schemas.template.TemplateCreate\n\n Returns\n -------\n projects.schemas.template.Template\n '
template_controller = TemplateController(session)
template = template_controller.create_template(template=template)
return template
|
@router.post(, response_model=projects.schemas.template.Template)
async def handle_post_templates(template: projects.schemas.template.TemplateCreate, session: Session=Depends(session_scope)):
'\n Handles POST requests to /.\n\n Parameters\n ----------\n session : sqlalchemy.orm.session.Session\n template : projects.schemas.template.TemplateCreate\n\n Returns\n -------\n projects.schemas.template.Template\n '
template_controller = TemplateController(session)
template = template_controller.create_template(template=template)
return template<|docstring|>Handles POST requests to /.
Parameters
----------
session : sqlalchemy.orm.session.Session
template : projects.schemas.template.TemplateCreate
Returns
-------
projects.schemas.template.Template<|endoftext|>
|
6851dd80601a6984ddeda54275bfb4f7892cc1468c94ea53da887e0568ce6ce4
|
@router.get('/{template_id}', response_model=projects.schemas.template.Template)
async def handle_get_template(template_id: str, session: Session=Depends(session_scope)):
'\n Handles GET requests to /<template_id>.\n\n Parameters\n ----------\n template_id : str\n session : sqlalchemy.orm.session.Session\n\n Returns\n -------\n projects.schemas.template.Template\n '
template_controller = TemplateController(session)
template = template_controller.get_template(template_id=template_id)
return template
|
Handles GET requests to /<template_id>.
Parameters
----------
template_id : str
session : sqlalchemy.orm.session.Session
Returns
-------
projects.schemas.template.Template
|
projects/api/templates.py
|
handle_get_template
|
dnlcesilva/projects
| 0 |
python
|
@router.get('/{template_id}', response_model=projects.schemas.template.Template)
async def handle_get_template(template_id: str, session: Session=Depends(session_scope)):
'\n Handles GET requests to /<template_id>.\n\n Parameters\n ----------\n template_id : str\n session : sqlalchemy.orm.session.Session\n\n Returns\n -------\n projects.schemas.template.Template\n '
template_controller = TemplateController(session)
template = template_controller.get_template(template_id=template_id)
return template
|
@router.get('/{template_id}', response_model=projects.schemas.template.Template)
async def handle_get_template(template_id: str, session: Session=Depends(session_scope)):
'\n Handles GET requests to /<template_id>.\n\n Parameters\n ----------\n template_id : str\n session : sqlalchemy.orm.session.Session\n\n Returns\n -------\n projects.schemas.template.Template\n '
template_controller = TemplateController(session)
template = template_controller.get_template(template_id=template_id)
return template<|docstring|>Handles GET requests to /<template_id>.
Parameters
----------
template_id : str
session : sqlalchemy.orm.session.Session
Returns
-------
projects.schemas.template.Template<|endoftext|>
|
417ed3ac46968bf7b017d3b69ef199f5a917d4db3a6caf9894a98d0c4bbc48d8
|
@router.patch('/{template_id}', response_model=projects.schemas.template.Template)
async def handle_patch_template(template_id: str, template: projects.schemas.template.TemplateUpdate, session: Session=Depends(session_scope)):
'\n Handles PATCH requests to /<template_id>.\n\n Parameters\n ----------\n template_id : str\n template : projects.schemas.template.TemplateUpdate\n session : sqlalchemy.orm.session.Session\n\n Returns\n -------\n projects.schemas.template.Template\n '
template_controller = TemplateController(session)
template = template_controller.update_template(template_id=template_id, template=template)
return template
|
Handles PATCH requests to /<template_id>.
Parameters
----------
template_id : str
template : projects.schemas.template.TemplateUpdate
session : sqlalchemy.orm.session.Session
Returns
-------
projects.schemas.template.Template
|
projects/api/templates.py
|
handle_patch_template
|
dnlcesilva/projects
| 0 |
python
|
@router.patch('/{template_id}', response_model=projects.schemas.template.Template)
async def handle_patch_template(template_id: str, template: projects.schemas.template.TemplateUpdate, session: Session=Depends(session_scope)):
'\n Handles PATCH requests to /<template_id>.\n\n Parameters\n ----------\n template_id : str\n template : projects.schemas.template.TemplateUpdate\n session : sqlalchemy.orm.session.Session\n\n Returns\n -------\n projects.schemas.template.Template\n '
template_controller = TemplateController(session)
template = template_controller.update_template(template_id=template_id, template=template)
return template
|
@router.patch('/{template_id}', response_model=projects.schemas.template.Template)
async def handle_patch_template(template_id: str, template: projects.schemas.template.TemplateUpdate, session: Session=Depends(session_scope)):
'\n Handles PATCH requests to /<template_id>.\n\n Parameters\n ----------\n template_id : str\n template : projects.schemas.template.TemplateUpdate\n session : sqlalchemy.orm.session.Session\n\n Returns\n -------\n projects.schemas.template.Template\n '
template_controller = TemplateController(session)
template = template_controller.update_template(template_id=template_id, template=template)
return template<|docstring|>Handles PATCH requests to /<template_id>.
Parameters
----------
template_id : str
template : projects.schemas.template.TemplateUpdate
session : sqlalchemy.orm.session.Session
Returns
-------
projects.schemas.template.Template<|endoftext|>
|
821f5820c08fe2fcb3746e72b11a790f8d30500a1b09d10f434e2b94257d6b77
|
@router.delete('/{template_id}')
async def handle_delete_template(template_id: str, session: Session=Depends(session_scope)):
'\n Handles DELETE requests to /<template_id>.\n\n Parameters\n ----------\n template_id : str\n session : sqlalchemy.orm.session.Session\n\n Returns\n -------\n projects.schemas.message.Message\n '
template_controller = TemplateController(session)
template = template_controller.delete_template(template_id=template_id)
return template
|
Handles DELETE requests to /<template_id>.
Parameters
----------
template_id : str
session : sqlalchemy.orm.session.Session
Returns
-------
projects.schemas.message.Message
|
projects/api/templates.py
|
handle_delete_template
|
dnlcesilva/projects
| 0 |
python
|
@router.delete('/{template_id}')
async def handle_delete_template(template_id: str, session: Session=Depends(session_scope)):
'\n Handles DELETE requests to /<template_id>.\n\n Parameters\n ----------\n template_id : str\n session : sqlalchemy.orm.session.Session\n\n Returns\n -------\n projects.schemas.message.Message\n '
template_controller = TemplateController(session)
template = template_controller.delete_template(template_id=template_id)
return template
|
@router.delete('/{template_id}')
async def handle_delete_template(template_id: str, session: Session=Depends(session_scope)):
'\n Handles DELETE requests to /<template_id>.\n\n Parameters\n ----------\n template_id : str\n session : sqlalchemy.orm.session.Session\n\n Returns\n -------\n projects.schemas.message.Message\n '
template_controller = TemplateController(session)
template = template_controller.delete_template(template_id=template_id)
return template<|docstring|>Handles DELETE requests to /<template_id>.
Parameters
----------
template_id : str
session : sqlalchemy.orm.session.Session
Returns
-------
projects.schemas.message.Message<|endoftext|>
|
d11619b5a0d46b298de29c133ef56d59f58d6314891b229ce5af2a34a350a4fc
|
def limit_speed(self, velocity):
'\n Returns a velocity that is no faster than self.speed_limit.\n '
speed = la.norm(velocity)
if (speed < self.speed_limit):
return velocity
else:
return ((self.speed_limit * velocity) / speed)
|
Returns a velocity that is no faster than self.speed_limit.
|
src/ball.py
|
limit_speed
|
dnagarkar/bubble-deflector
| 2 |
python
|
def limit_speed(self, velocity):
'\n \n '
speed = la.norm(velocity)
if (speed < self.speed_limit):
return velocity
else:
return ((self.speed_limit * velocity) / speed)
|
def limit_speed(self, velocity):
'\n \n '
speed = la.norm(velocity)
if (speed < self.speed_limit):
return velocity
else:
return ((self.speed_limit * velocity) / speed)<|docstring|>Returns a velocity that is no faster than self.speed_limit.<|endoftext|>
|
8caec319c35ccb7961a3d028d71211be971da85fe08288c565b53e7d1b251ae6
|
def apply_force(self, force):
'\n Compute and update the acceleration on this ball.\n Hint: Force = Mass * Acceleration\n '
pass
|
Compute and update the acceleration on this ball.
Hint: Force = Mass * Acceleration
|
src/ball.py
|
apply_force
|
dnagarkar/bubble-deflector
| 2 |
python
|
def apply_force(self, force):
'\n Compute and update the acceleration on this ball.\n Hint: Force = Mass * Acceleration\n '
pass
|
def apply_force(self, force):
'\n Compute and update the acceleration on this ball.\n Hint: Force = Mass * Acceleration\n '
pass<|docstring|>Compute and update the acceleration on this ball.
Hint: Force = Mass * Acceleration<|endoftext|>
|
b5acf983b070c671765ed0bfbcd92d05bc47322dbd4c4e76df836bf5bfc7b6de
|
def update(self, dt):
'\n Update the velocity and position.\n Hint: new_velocity = dt * acceleration + old_velocity\n Hint: new_position = dt * velocity + old_position\n '
self.acceleration = np.array([0, 0], dtype=np.float)
|
Update the velocity and position.
Hint: new_velocity = dt * acceleration + old_velocity
Hint: new_position = dt * velocity + old_position
|
src/ball.py
|
update
|
dnagarkar/bubble-deflector
| 2 |
python
|
def update(self, dt):
'\n Update the velocity and position.\n Hint: new_velocity = dt * acceleration + old_velocity\n Hint: new_position = dt * velocity + old_position\n '
self.acceleration = np.array([0, 0], dtype=np.float)
|
def update(self, dt):
'\n Update the velocity and position.\n Hint: new_velocity = dt * acceleration + old_velocity\n Hint: new_position = dt * velocity + old_position\n '
self.acceleration = np.array([0, 0], dtype=np.float)<|docstring|>Update the velocity and position.
Hint: new_velocity = dt * acceleration + old_velocity
Hint: new_position = dt * velocity + old_position<|endoftext|>
|
bc9d7566fec526d8e9bfd77f8955c3e9742ef12a9666a603d67e9c2cd1f113a4
|
def wall_collision(self, wall, dt):
'\n We first want to check if this ball is colliding with `wall`.\n If so, we want to compute the force on the ball.\n Hint: Take a look at `self.compute_wall_collision_point`\n '
pass
|
We first want to check if this ball is colliding with `wall`.
If so, we want to compute the force on the ball.
Hint: Take a look at `self.compute_wall_collision_point`
|
src/ball.py
|
wall_collision
|
dnagarkar/bubble-deflector
| 2 |
python
|
def wall_collision(self, wall, dt):
'\n We first want to check if this ball is colliding with `wall`.\n If so, we want to compute the force on the ball.\n Hint: Take a look at `self.compute_wall_collision_point`\n '
pass
|
def wall_collision(self, wall, dt):
'\n We first want to check if this ball is colliding with `wall`.\n If so, we want to compute the force on the ball.\n Hint: Take a look at `self.compute_wall_collision_point`\n '
pass<|docstring|>We first want to check if this ball is colliding with `wall`.
If so, we want to compute the force on the ball.
Hint: Take a look at `self.compute_wall_collision_point`<|endoftext|>
|
87d6a63096548fb14b116ae934be8836b2409d7033874192f1c685a896578913
|
def ball_ball_collision(ball_a, ball_b, dt):
'\n We first want to check if `ball_a` and `ball_b` are colliding.\n If so, we need to apply a force to each ball.\n Hint: Take a look at `Ball.reset_ball_collision_positions`\n and `Ball.compute_ball_collision_forces`\n '
pass
|
We first want to check if `ball_a` and `ball_b` are colliding.
If so, we need to apply a force to each ball.
Hint: Take a look at `Ball.reset_ball_collision_positions`
and `Ball.compute_ball_collision_forces`
|
src/ball.py
|
ball_ball_collision
|
dnagarkar/bubble-deflector
| 2 |
python
|
def ball_ball_collision(ball_a, ball_b, dt):
'\n We first want to check if `ball_a` and `ball_b` are colliding.\n If so, we need to apply a force to each ball.\n Hint: Take a look at `Ball.reset_ball_collision_positions`\n and `Ball.compute_ball_collision_forces`\n '
pass
|
def ball_ball_collision(ball_a, ball_b, dt):
'\n We first want to check if `ball_a` and `ball_b` are colliding.\n If so, we need to apply a force to each ball.\n Hint: Take a look at `Ball.reset_ball_collision_positions`\n and `Ball.compute_ball_collision_forces`\n '
pass<|docstring|>We first want to check if `ball_a` and `ball_b` are colliding.
If so, we need to apply a force to each ball.
Hint: Take a look at `Ball.reset_ball_collision_positions`
and `Ball.compute_ball_collision_forces`<|endoftext|>
|
fb70efea6b791203e60943d86c014d8b5f32aa3b18bc45da78ba4cb14836389a
|
def draw(self, screen):
'\n Draw this ball to the screen.\n '
position = self.position.astype(np.int)
screen_bounds = np.array([[0, 0], [screen.get_width(), screen.get_height()]], dtype=np.int)
if (np.all((screen_bounds[0] < position)) and np.all((position < screen_bounds[1]))):
pygame.draw.circle(screen, self.color, position, self.radius, self.thickness)
|
Draw this ball to the screen.
|
src/ball.py
|
draw
|
dnagarkar/bubble-deflector
| 2 |
python
|
def draw(self, screen):
'\n \n '
position = self.position.astype(np.int)
screen_bounds = np.array([[0, 0], [screen.get_width(), screen.get_height()]], dtype=np.int)
if (np.all((screen_bounds[0] < position)) and np.all((position < screen_bounds[1]))):
pygame.draw.circle(screen, self.color, position, self.radius, self.thickness)
|
def draw(self, screen):
'\n \n '
position = self.position.astype(np.int)
screen_bounds = np.array([[0, 0], [screen.get_width(), screen.get_height()]], dtype=np.int)
if (np.all((screen_bounds[0] < position)) and np.all((position < screen_bounds[1]))):
pygame.draw.circle(screen, self.color, position, self.radius, self.thickness)<|docstring|>Draw this ball to the screen.<|endoftext|>
|
b3760ffbc4f3b0db07b1980c4829e4bb70cbd059d99ae442f787ce212ef51629
|
def compute_wall_collision_point(self, wall):
'\n Returns the point where ball and wall intersect or False if they do not\n intersect.\n '
if (la.norm((wall.a - wall.b)) == 0):
return False
d_norm = np.dot(((wall.b - wall.a) / la.norm((wall.b - wall.a))), (self.position - wall.a))
if ((0 <= d_norm) and (d_norm <= la.norm((wall.b - wall.a)))):
d = (((d_norm * (wall.b - wall.a)) / la.norm((wall.b - wall.a))) + wall.a)
elif (((- self.radius) <= d_norm) and (d_norm < 0)):
d = wall.a
elif ((la.norm((wall.b - wall.a)) < d_norm) and (d_norm <= (la.norm((wall.b - wall.a)) + self.radius))):
d = wall.b
else:
return False
if (la.norm((self.position - d)) <= self.radius):
return d
return False
|
Returns the point where ball and wall intersect or False if they do not
intersect.
|
src/ball.py
|
compute_wall_collision_point
|
dnagarkar/bubble-deflector
| 2 |
python
|
def compute_wall_collision_point(self, wall):
'\n Returns the point where ball and wall intersect or False if they do not\n intersect.\n '
if (la.norm((wall.a - wall.b)) == 0):
return False
d_norm = np.dot(((wall.b - wall.a) / la.norm((wall.b - wall.a))), (self.position - wall.a))
if ((0 <= d_norm) and (d_norm <= la.norm((wall.b - wall.a)))):
d = (((d_norm * (wall.b - wall.a)) / la.norm((wall.b - wall.a))) + wall.a)
elif (((- self.radius) <= d_norm) and (d_norm < 0)):
d = wall.a
elif ((la.norm((wall.b - wall.a)) < d_norm) and (d_norm <= (la.norm((wall.b - wall.a)) + self.radius))):
d = wall.b
else:
return False
if (la.norm((self.position - d)) <= self.radius):
return d
return False
|
def compute_wall_collision_point(self, wall):
'\n Returns the point where ball and wall intersect or False if they do not\n intersect.\n '
if (la.norm((wall.a - wall.b)) == 0):
return False
d_norm = np.dot(((wall.b - wall.a) / la.norm((wall.b - wall.a))), (self.position - wall.a))
if ((0 <= d_norm) and (d_norm <= la.norm((wall.b - wall.a)))):
d = (((d_norm * (wall.b - wall.a)) / la.norm((wall.b - wall.a))) + wall.a)
elif (((- self.radius) <= d_norm) and (d_norm < 0)):
d = wall.a
elif ((la.norm((wall.b - wall.a)) < d_norm) and (d_norm <= (la.norm((wall.b - wall.a)) + self.radius))):
d = wall.b
else:
return False
if (la.norm((self.position - d)) <= self.radius):
return d
return False<|docstring|>Returns the point where ball and wall intersect or False if they do not
intersect.<|endoftext|>
|
109e53e69285198f43f4299d5a7963e021b55bef63a7b36e5eb9d7f8710b9628
|
def compute_ball_collision_forces(ball_a, ball_b, dt):
'\n Returns the two forces acting on `ball_a` and `ball_b` when\n they collide.\n '
force_a = ((((((((- 2.0) / dt) * ball_a.mass) * ball_b.mass) / (ball_a.mass + ball_b.mass)) * np.dot((ball_a.velocity - ball_b.velocity), (ball_a.position - ball_b.position))) / (la.norm((ball_a.position - ball_b.position)) ** 2.0)) * (ball_a.position - ball_b.position))
force_b = ((((((((- 2.0) / dt) * ball_a.mass) * ball_b.mass) / (ball_a.mass + ball_b.mass)) * np.dot((ball_b.velocity - ball_a.velocity), (ball_b.position - ball_a.position))) / (la.norm((ball_b.position - ball_a.position)) ** 2.0)) * (ball_b.position - ball_a.position))
return (force_a, force_b)
|
Returns the two forces acting on `ball_a` and `ball_b` when
they collide.
|
src/ball.py
|
compute_ball_collision_forces
|
dnagarkar/bubble-deflector
| 2 |
python
|
def compute_ball_collision_forces(ball_a, ball_b, dt):
'\n Returns the two forces acting on `ball_a` and `ball_b` when\n they collide.\n '
force_a = ((((((((- 2.0) / dt) * ball_a.mass) * ball_b.mass) / (ball_a.mass + ball_b.mass)) * np.dot((ball_a.velocity - ball_b.velocity), (ball_a.position - ball_b.position))) / (la.norm((ball_a.position - ball_b.position)) ** 2.0)) * (ball_a.position - ball_b.position))
force_b = ((((((((- 2.0) / dt) * ball_a.mass) * ball_b.mass) / (ball_a.mass + ball_b.mass)) * np.dot((ball_b.velocity - ball_a.velocity), (ball_b.position - ball_a.position))) / (la.norm((ball_b.position - ball_a.position)) ** 2.0)) * (ball_b.position - ball_a.position))
return (force_a, force_b)
|
def compute_ball_collision_forces(ball_a, ball_b, dt):
'\n Returns the two forces acting on `ball_a` and `ball_b` when\n they collide.\n '
force_a = ((((((((- 2.0) / dt) * ball_a.mass) * ball_b.mass) / (ball_a.mass + ball_b.mass)) * np.dot((ball_a.velocity - ball_b.velocity), (ball_a.position - ball_b.position))) / (la.norm((ball_a.position - ball_b.position)) ** 2.0)) * (ball_a.position - ball_b.position))
force_b = ((((((((- 2.0) / dt) * ball_a.mass) * ball_b.mass) / (ball_a.mass + ball_b.mass)) * np.dot((ball_b.velocity - ball_a.velocity), (ball_b.position - ball_a.position))) / (la.norm((ball_b.position - ball_a.position)) ** 2.0)) * (ball_b.position - ball_a.position))
return (force_a, force_b)<|docstring|>Returns the two forces acting on `ball_a` and `ball_b` when
they collide.<|endoftext|>
|
60ebb56e820fcf30fc5a013e8b95328286905a0176f0d6c5817a96d4d137eed5
|
def reset_ball_collision_positions(ball_a, ball_b):
'\n Set the positions of `ball_a` and `ball_b` so that they intersect at\n exactly one point.\n '
distance = la.norm((ball_a.position - ball_b.position))
error = (((((ball_a.radius + ball_b.radius) - distance) / 2.0) * (ball_a.position - ball_b.position)) / distance)
ball_a.position = (ball_a.position + error)
ball_b.position = (ball_b.position - error)
|
Set the positions of `ball_a` and `ball_b` so that they intersect at
exactly one point.
|
src/ball.py
|
reset_ball_collision_positions
|
dnagarkar/bubble-deflector
| 2 |
python
|
def reset_ball_collision_positions(ball_a, ball_b):
'\n Set the positions of `ball_a` and `ball_b` so that they intersect at\n exactly one point.\n '
distance = la.norm((ball_a.position - ball_b.position))
error = (((((ball_a.radius + ball_b.radius) - distance) / 2.0) * (ball_a.position - ball_b.position)) / distance)
ball_a.position = (ball_a.position + error)
ball_b.position = (ball_b.position - error)
|
def reset_ball_collision_positions(ball_a, ball_b):
'\n Set the positions of `ball_a` and `ball_b` so that they intersect at\n exactly one point.\n '
distance = la.norm((ball_a.position - ball_b.position))
error = (((((ball_a.radius + ball_b.radius) - distance) / 2.0) * (ball_a.position - ball_b.position)) / distance)
ball_a.position = (ball_a.position + error)
ball_b.position = (ball_b.position - error)<|docstring|>Set the positions of `ball_a` and `ball_b` so that they intersect at
exactly one point.<|endoftext|>
|
398bdb3cd8c34c6bd455c9fa918abdb9b2501cf5d02b423e4bdd5e78df6a6c57
|
def _parse(s, handler):
'Simple configuration parser.'
def stmts(obj, next, token):
'Process statements until EOF.'
while (token is not EOF):
token = assignlist(obj, next, token)
def assign(obj, next, token):
if (not isinstance(token, six.string_types)):
raise ParserError(("term expected, got '%s'" % token))
_key = token
token = next()
if (_key.startswith('!') and (token is not EQ) and (token is not OPEN) and (token is not CLOSE)):
if handler:
obj.update(handler(_key, token, 'object'))
elif (token is EQ):
token = next()
obj[_key] = value(obj, next, token)
elif (token is OPEN):
token = next()
subobj = OrderedDict()
while (token is not CLOSE):
token = assignlist(subobj, next, token)
obj[_key] = subobj
else:
raise ParserError(("expected '=' or '{' got '%s'" % token))
return token
def assignlist(obj, next, token):
while True:
assign(obj, next, token)
token = next()
if (type(token) != str):
return token
def value(obj, next, token):
if (token is OPEN):
token = next()
_value = []
while (token is not CLOSE):
if (token is OPEN):
obj = {}
while True:
token = next()
if (token is CLOSE):
break
assign(obj, next, token)
_value.append(obj)
elif token.startswith('!'):
key = token
token = next()
if (token is CLOSE):
raise ParserError("expected token, got '}'")
_value.extend(handler(key, token, 'value'))
else:
_value.append(token)
token = next()
return _value
if (not isinstance(token, six.string_types)):
raise ParserError(('expected string token, got %r' % token))
try:
return json.loads(token)
except ValueError:
return token
lexer = Lexer()
tokenizer = lexer.tokenize(s)
def pop_token():
return next(tokenizer)
token = pop_token()
result = OrderedDict()
stmts(result, pop_token, token)
return result
|
Simple configuration parser.
|
structprop/__init__.py
|
_parse
|
edgeware/structprop
| 1 |
python
|
def _parse(s, handler):
def stmts(obj, next, token):
'Process statements until EOF.'
while (token is not EOF):
token = assignlist(obj, next, token)
def assign(obj, next, token):
if (not isinstance(token, six.string_types)):
raise ParserError(("term expected, got '%s'" % token))
_key = token
token = next()
if (_key.startswith('!') and (token is not EQ) and (token is not OPEN) and (token is not CLOSE)):
if handler:
obj.update(handler(_key, token, 'object'))
elif (token is EQ):
token = next()
obj[_key] = value(obj, next, token)
elif (token is OPEN):
token = next()
subobj = OrderedDict()
while (token is not CLOSE):
token = assignlist(subobj, next, token)
obj[_key] = subobj
else:
raise ParserError(("expected '=' or '{' got '%s'" % token))
return token
def assignlist(obj, next, token):
while True:
assign(obj, next, token)
token = next()
if (type(token) != str):
return token
def value(obj, next, token):
if (token is OPEN):
token = next()
_value = []
while (token is not CLOSE):
if (token is OPEN):
obj = {}
while True:
token = next()
if (token is CLOSE):
break
assign(obj, next, token)
_value.append(obj)
elif token.startswith('!'):
key = token
token = next()
if (token is CLOSE):
raise ParserError("expected token, got '}'")
_value.extend(handler(key, token, 'value'))
else:
_value.append(token)
token = next()
return _value
if (not isinstance(token, six.string_types)):
raise ParserError(('expected string token, got %r' % token))
try:
return json.loads(token)
except ValueError:
return token
lexer = Lexer()
tokenizer = lexer.tokenize(s)
def pop_token():
return next(tokenizer)
token = pop_token()
result = OrderedDict()
stmts(result, pop_token, token)
return result
|
def _parse(s, handler):
def stmts(obj, next, token):
'Process statements until EOF.'
while (token is not EOF):
token = assignlist(obj, next, token)
def assign(obj, next, token):
if (not isinstance(token, six.string_types)):
raise ParserError(("term expected, got '%s'" % token))
_key = token
token = next()
if (_key.startswith('!') and (token is not EQ) and (token is not OPEN) and (token is not CLOSE)):
if handler:
obj.update(handler(_key, token, 'object'))
elif (token is EQ):
token = next()
obj[_key] = value(obj, next, token)
elif (token is OPEN):
token = next()
subobj = OrderedDict()
while (token is not CLOSE):
token = assignlist(subobj, next, token)
obj[_key] = subobj
else:
raise ParserError(("expected '=' or '{' got '%s'" % token))
return token
def assignlist(obj, next, token):
while True:
assign(obj, next, token)
token = next()
if (type(token) != str):
return token
def value(obj, next, token):
if (token is OPEN):
token = next()
_value = []
while (token is not CLOSE):
if (token is OPEN):
obj = {}
while True:
token = next()
if (token is CLOSE):
break
assign(obj, next, token)
_value.append(obj)
elif token.startswith('!'):
key = token
token = next()
if (token is CLOSE):
raise ParserError("expected token, got '}'")
_value.extend(handler(key, token, 'value'))
else:
_value.append(token)
token = next()
return _value
if (not isinstance(token, six.string_types)):
raise ParserError(('expected string token, got %r' % token))
try:
return json.loads(token)
except ValueError:
return token
lexer = Lexer()
tokenizer = lexer.tokenize(s)
def pop_token():
return next(tokenizer)
token = pop_token()
result = OrderedDict()
stmts(result, pop_token, token)
return result<|docstring|>Simple configuration parser.<|endoftext|>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.