body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
---|---|---|---|---|---|---|---|---|---|
6bae0bcc6da8656c9a64ffedc916dbc4baddd38a911b3405e475aa329805c225 | @property
def standard_bias(self) -> int:
'\n Offset in minutes from lBias during standard time. \n\n :return: The standard_bias of this MapiCalendarTimeZoneInfoDto.\n :rtype: int\n '
return self._standard_bias | Offset in minutes from lBias during standard time.
:return: The standard_bias of this MapiCalendarTimeZoneInfoDto.
:rtype: int | sdk/AsposeEmailCloudSdk/models/mapi_calendar_time_zone_info_dto.py | standard_bias | aspose-email-cloud/aspose-email-cloud-python | 1 | python | @property
def standard_bias(self) -> int:
'\n Offset in minutes from lBias during standard time. \n\n :return: The standard_bias of this MapiCalendarTimeZoneInfoDto.\n :rtype: int\n '
return self._standard_bias | @property
def standard_bias(self) -> int:
'\n Offset in minutes from lBias during standard time. \n\n :return: The standard_bias of this MapiCalendarTimeZoneInfoDto.\n :rtype: int\n '
return self._standard_bias<|docstring|>Offset in minutes from lBias during standard time.
:return: The standard_bias of this MapiCalendarTimeZoneInfoDto.
:rtype: int<|endoftext|> |
4c510c33e812abb8a21974687e3cad9df8b038f6919a22edbe468da48c0644e9 | @standard_bias.setter
def standard_bias(self, standard_bias: int):
'\n Offset in minutes from lBias during standard time. \n\n :param standard_bias: The standard_bias of this MapiCalendarTimeZoneInfoDto.\n :type: int\n '
if (standard_bias is None):
raise ValueError('Invalid value for `standard_bias`, must not be `None`')
self._standard_bias = standard_bias | Offset in minutes from lBias during standard time.
:param standard_bias: The standard_bias of this MapiCalendarTimeZoneInfoDto.
:type: int | sdk/AsposeEmailCloudSdk/models/mapi_calendar_time_zone_info_dto.py | standard_bias | aspose-email-cloud/aspose-email-cloud-python | 1 | python | @standard_bias.setter
def standard_bias(self, standard_bias: int):
'\n Offset in minutes from lBias during standard time. \n\n :param standard_bias: The standard_bias of this MapiCalendarTimeZoneInfoDto.\n :type: int\n '
if (standard_bias is None):
raise ValueError('Invalid value for `standard_bias`, must not be `None`')
self._standard_bias = standard_bias | @standard_bias.setter
def standard_bias(self, standard_bias: int):
'\n Offset in minutes from lBias during standard time. \n\n :param standard_bias: The standard_bias of this MapiCalendarTimeZoneInfoDto.\n :type: int\n '
if (standard_bias is None):
raise ValueError('Invalid value for `standard_bias`, must not be `None`')
self._standard_bias = standard_bias<|docstring|>Offset in minutes from lBias during standard time.
:param standard_bias: The standard_bias of this MapiCalendarTimeZoneInfoDto.
:type: int<|endoftext|> |
a63342576d12189e4cf57fc71ddf4e4fd37fecebf7fcf4b82328eb12443940a6 | @property
def standard_date(self) -> MapiCalendarTimeZoneRuleDto:
'\n Date and local time that indicate when to begin using the StandardBias. \n\n :return: The standard_date of this MapiCalendarTimeZoneInfoDto.\n :rtype: MapiCalendarTimeZoneRuleDto\n '
return self._standard_date | Date and local time that indicate when to begin using the StandardBias.
:return: The standard_date of this MapiCalendarTimeZoneInfoDto.
:rtype: MapiCalendarTimeZoneRuleDto | sdk/AsposeEmailCloudSdk/models/mapi_calendar_time_zone_info_dto.py | standard_date | aspose-email-cloud/aspose-email-cloud-python | 1 | python | @property
def standard_date(self) -> MapiCalendarTimeZoneRuleDto:
'\n Date and local time that indicate when to begin using the StandardBias. \n\n :return: The standard_date of this MapiCalendarTimeZoneInfoDto.\n :rtype: MapiCalendarTimeZoneRuleDto\n '
return self._standard_date | @property
def standard_date(self) -> MapiCalendarTimeZoneRuleDto:
'\n Date and local time that indicate when to begin using the StandardBias. \n\n :return: The standard_date of this MapiCalendarTimeZoneInfoDto.\n :rtype: MapiCalendarTimeZoneRuleDto\n '
return self._standard_date<|docstring|>Date and local time that indicate when to begin using the StandardBias.
:return: The standard_date of this MapiCalendarTimeZoneInfoDto.
:rtype: MapiCalendarTimeZoneRuleDto<|endoftext|> |
28c40c46bca4f2ead1ec3bfa2fefea1446a4d3f7d87aacceef349ee1cc65ab96 | @standard_date.setter
def standard_date(self, standard_date: MapiCalendarTimeZoneRuleDto):
'\n Date and local time that indicate when to begin using the StandardBias. \n\n :param standard_date: The standard_date of this MapiCalendarTimeZoneInfoDto.\n :type: MapiCalendarTimeZoneRuleDto\n '
self._standard_date = standard_date | Date and local time that indicate when to begin using the StandardBias.
:param standard_date: The standard_date of this MapiCalendarTimeZoneInfoDto.
:type: MapiCalendarTimeZoneRuleDto | sdk/AsposeEmailCloudSdk/models/mapi_calendar_time_zone_info_dto.py | standard_date | aspose-email-cloud/aspose-email-cloud-python | 1 | python | @standard_date.setter
def standard_date(self, standard_date: MapiCalendarTimeZoneRuleDto):
'\n Date and local time that indicate when to begin using the StandardBias. \n\n :param standard_date: The standard_date of this MapiCalendarTimeZoneInfoDto.\n :type: MapiCalendarTimeZoneRuleDto\n '
self._standard_date = standard_date | @standard_date.setter
def standard_date(self, standard_date: MapiCalendarTimeZoneRuleDto):
'\n Date and local time that indicate when to begin using the StandardBias. \n\n :param standard_date: The standard_date of this MapiCalendarTimeZoneInfoDto.\n :type: MapiCalendarTimeZoneRuleDto\n '
self._standard_date = standard_date<|docstring|>Date and local time that indicate when to begin using the StandardBias.
:param standard_date: The standard_date of this MapiCalendarTimeZoneInfoDto.
:type: MapiCalendarTimeZoneRuleDto<|endoftext|> |
ebddf98cea11a0812f45c920854de6c10f2b00771ac9d94f366e84f1f7a0e731 | @property
def time_zone_flags(self) -> List[str]:
'\n Individual bit flags that specify information about this TimeZoneRule. Items: Enumerates the individual bit flags that specify information about TimeZoneRule. Enum, available values: TzRuleFlagRecurCurrentTzReg, TzRuleFlagEffectiveTzReg\n\n :return: The time_zone_flags of this MapiCalendarTimeZoneInfoDto.\n :rtype: list[str]\n '
return self._time_zone_flags | Individual bit flags that specify information about this TimeZoneRule. Items: Enumerates the individual bit flags that specify information about TimeZoneRule. Enum, available values: TzRuleFlagRecurCurrentTzReg, TzRuleFlagEffectiveTzReg
:return: The time_zone_flags of this MapiCalendarTimeZoneInfoDto.
:rtype: list[str] | sdk/AsposeEmailCloudSdk/models/mapi_calendar_time_zone_info_dto.py | time_zone_flags | aspose-email-cloud/aspose-email-cloud-python | 1 | python | @property
def time_zone_flags(self) -> List[str]:
'\n Individual bit flags that specify information about this TimeZoneRule. Items: Enumerates the individual bit flags that specify information about TimeZoneRule. Enum, available values: TzRuleFlagRecurCurrentTzReg, TzRuleFlagEffectiveTzReg\n\n :return: The time_zone_flags of this MapiCalendarTimeZoneInfoDto.\n :rtype: list[str]\n '
return self._time_zone_flags | @property
def time_zone_flags(self) -> List[str]:
'\n Individual bit flags that specify information about this TimeZoneRule. Items: Enumerates the individual bit flags that specify information about TimeZoneRule. Enum, available values: TzRuleFlagRecurCurrentTzReg, TzRuleFlagEffectiveTzReg\n\n :return: The time_zone_flags of this MapiCalendarTimeZoneInfoDto.\n :rtype: list[str]\n '
return self._time_zone_flags<|docstring|>Individual bit flags that specify information about this TimeZoneRule. Items: Enumerates the individual bit flags that specify information about TimeZoneRule. Enum, available values: TzRuleFlagRecurCurrentTzReg, TzRuleFlagEffectiveTzReg
:return: The time_zone_flags of this MapiCalendarTimeZoneInfoDto.
:rtype: list[str]<|endoftext|> |
e5baeb6c385d777182eec6b5d488c69694067de3af4523b838fd8bf2e1298826 | @time_zone_flags.setter
def time_zone_flags(self, time_zone_flags: List[str]):
'\n Individual bit flags that specify information about this TimeZoneRule. Items: Enumerates the individual bit flags that specify information about TimeZoneRule. Enum, available values: TzRuleFlagRecurCurrentTzReg, TzRuleFlagEffectiveTzReg\n\n :param time_zone_flags: The time_zone_flags of this MapiCalendarTimeZoneInfoDto.\n :type: list[str]\n '
self._time_zone_flags = time_zone_flags | Individual bit flags that specify information about this TimeZoneRule. Items: Enumerates the individual bit flags that specify information about TimeZoneRule. Enum, available values: TzRuleFlagRecurCurrentTzReg, TzRuleFlagEffectiveTzReg
:param time_zone_flags: The time_zone_flags of this MapiCalendarTimeZoneInfoDto.
:type: list[str] | sdk/AsposeEmailCloudSdk/models/mapi_calendar_time_zone_info_dto.py | time_zone_flags | aspose-email-cloud/aspose-email-cloud-python | 1 | python | @time_zone_flags.setter
def time_zone_flags(self, time_zone_flags: List[str]):
'\n Individual bit flags that specify information about this TimeZoneRule. Items: Enumerates the individual bit flags that specify information about TimeZoneRule. Enum, available values: TzRuleFlagRecurCurrentTzReg, TzRuleFlagEffectiveTzReg\n\n :param time_zone_flags: The time_zone_flags of this MapiCalendarTimeZoneInfoDto.\n :type: list[str]\n '
self._time_zone_flags = time_zone_flags | @time_zone_flags.setter
def time_zone_flags(self, time_zone_flags: List[str]):
'\n Individual bit flags that specify information about this TimeZoneRule. Items: Enumerates the individual bit flags that specify information about TimeZoneRule. Enum, available values: TzRuleFlagRecurCurrentTzReg, TzRuleFlagEffectiveTzReg\n\n :param time_zone_flags: The time_zone_flags of this MapiCalendarTimeZoneInfoDto.\n :type: list[str]\n '
self._time_zone_flags = time_zone_flags<|docstring|>Individual bit flags that specify information about this TimeZoneRule. Items: Enumerates the individual bit flags that specify information about TimeZoneRule. Enum, available values: TzRuleFlagRecurCurrentTzReg, TzRuleFlagEffectiveTzReg
:param time_zone_flags: The time_zone_flags of this MapiCalendarTimeZoneInfoDto.
:type: list[str]<|endoftext|> |
54ecca88f3b69ac7fbd23fbff276e66382635f46e58cd1e2221b21cae9e0518e | @property
def year(self) -> int:
'\n Year in which this rule is scheduled to take effect. \n\n :return: The year of this MapiCalendarTimeZoneInfoDto.\n :rtype: int\n '
return self._year | Year in which this rule is scheduled to take effect.
:return: The year of this MapiCalendarTimeZoneInfoDto.
:rtype: int | sdk/AsposeEmailCloudSdk/models/mapi_calendar_time_zone_info_dto.py | year | aspose-email-cloud/aspose-email-cloud-python | 1 | python | @property
def year(self) -> int:
'\n Year in which this rule is scheduled to take effect. \n\n :return: The year of this MapiCalendarTimeZoneInfoDto.\n :rtype: int\n '
return self._year | @property
def year(self) -> int:
'\n Year in which this rule is scheduled to take effect. \n\n :return: The year of this MapiCalendarTimeZoneInfoDto.\n :rtype: int\n '
return self._year<|docstring|>Year in which this rule is scheduled to take effect.
:return: The year of this MapiCalendarTimeZoneInfoDto.
:rtype: int<|endoftext|> |
b8298b00a8b8d42c185ee6786bf19402ff7bc1606a598d278a763dc369ba5e92 | @year.setter
def year(self, year: int):
'\n Year in which this rule is scheduled to take effect. \n\n :param year: The year of this MapiCalendarTimeZoneInfoDto.\n :type: int\n '
if (year is None):
raise ValueError('Invalid value for `year`, must not be `None`')
self._year = year | Year in which this rule is scheduled to take effect.
:param year: The year of this MapiCalendarTimeZoneInfoDto.
:type: int | sdk/AsposeEmailCloudSdk/models/mapi_calendar_time_zone_info_dto.py | year | aspose-email-cloud/aspose-email-cloud-python | 1 | python | @year.setter
def year(self, year: int):
'\n Year in which this rule is scheduled to take effect. \n\n :param year: The year of this MapiCalendarTimeZoneInfoDto.\n :type: int\n '
if (year is None):
raise ValueError('Invalid value for `year`, must not be `None`')
self._year = year | @year.setter
def year(self, year: int):
'\n Year in which this rule is scheduled to take effect. \n\n :param year: The year of this MapiCalendarTimeZoneInfoDto.\n :type: int\n '
if (year is None):
raise ValueError('Invalid value for `year`, must not be `None`')
self._year = year<|docstring|>Year in which this rule is scheduled to take effect.
:param year: The year of this MapiCalendarTimeZoneInfoDto.
:type: int<|endoftext|> |
137ba0f026bd6074febc2e7ebe1fec840dba70990f936f32b47eaf0fb048bd4a | def to_dict(self):
'Returns the model properties as a dict'
result = {}
for (attr, _) in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
return result | Returns the model properties as a dict | sdk/AsposeEmailCloudSdk/models/mapi_calendar_time_zone_info_dto.py | to_dict | aspose-email-cloud/aspose-email-cloud-python | 1 | python | def to_dict(self):
result = {}
for (attr, _) in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
return result | def to_dict(self):
result = {}
for (attr, _) in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
return result<|docstring|>Returns the model properties as a dict<|endoftext|> |
cbb19eaa2fc8a113d9e32f924ef280a7e97563f8915f94f65dab438997af2e99 | def to_str(self):
'Returns the string representation of the model'
return pprint.pformat(self.to_dict()) | Returns the string representation of the model | sdk/AsposeEmailCloudSdk/models/mapi_calendar_time_zone_info_dto.py | to_str | aspose-email-cloud/aspose-email-cloud-python | 1 | python | def to_str(self):
return pprint.pformat(self.to_dict()) | def to_str(self):
return pprint.pformat(self.to_dict())<|docstring|>Returns the string representation of the model<|endoftext|> |
772243a2c2b3261a9b954d07aaf295e3c1242a579a495e2d6a5679c677861703 | def __repr__(self):
'For `print` and `pprint`'
return self.to_str() | For `print` and `pprint` | sdk/AsposeEmailCloudSdk/models/mapi_calendar_time_zone_info_dto.py | __repr__ | aspose-email-cloud/aspose-email-cloud-python | 1 | python | def __repr__(self):
return self.to_str() | def __repr__(self):
return self.to_str()<|docstring|>For `print` and `pprint`<|endoftext|> |
94e204bc24f28154a2b602c5c6760071dc3140bcc0c099607cbe3704c06aa551 | def __eq__(self, other):
'Returns true if both objects are equal'
if (not isinstance(other, MapiCalendarTimeZoneInfoDto)):
return False
return (self.__dict__ == other.__dict__) | Returns true if both objects are equal | sdk/AsposeEmailCloudSdk/models/mapi_calendar_time_zone_info_dto.py | __eq__ | aspose-email-cloud/aspose-email-cloud-python | 1 | python | def __eq__(self, other):
if (not isinstance(other, MapiCalendarTimeZoneInfoDto)):
return False
return (self.__dict__ == other.__dict__) | def __eq__(self, other):
if (not isinstance(other, MapiCalendarTimeZoneInfoDto)):
return False
return (self.__dict__ == other.__dict__)<|docstring|>Returns true if both objects are equal<|endoftext|> |
43dc6740163eb9fc1161d09cb2208a64c7ad0cc8d9c8637ac3264522d3ec7e42 | def __ne__(self, other):
'Returns true if both objects are not equal'
return (not (self == other)) | Returns true if both objects are not equal | sdk/AsposeEmailCloudSdk/models/mapi_calendar_time_zone_info_dto.py | __ne__ | aspose-email-cloud/aspose-email-cloud-python | 1 | python | def __ne__(self, other):
return (not (self == other)) | def __ne__(self, other):
return (not (self == other))<|docstring|>Returns true if both objects are not equal<|endoftext|> |
2e895bad81962fbe35450fb1fe098ac5b90ea8d0d7135f4b478969459902f677 | def __init__(self, elasticsearch_follow, index, time_delta=60, processor=None):
'\n :param elasticsearch_follow: The instance of ElasticsearchFollow to use for yielding new lines.\n :param index: The index to use to fetch data.\n :param time_delta: Denotes how many seconds to look into the past when fetching lines.\n :param processor: The log processor which should be used to process the lines before yielding them.\n '
self.elasticsearch_follow = elasticsearch_follow
self.index = index
self.time_delta = time_delta
self.processor = processor | :param elasticsearch_follow: The instance of ElasticsearchFollow to use for yielding new lines.
:param index: The index to use to fetch data.
:param time_delta: Denotes how many seconds to look into the past when fetching lines.
:param processor: The log processor which should be used to process the lines before yielding them. | elasticsearch_follow/follower.py | __init__ | mdreem/elasticsearch_follow | 1 | python | def __init__(self, elasticsearch_follow, index, time_delta=60, processor=None):
'\n :param elasticsearch_follow: The instance of ElasticsearchFollow to use for yielding new lines.\n :param index: The index to use to fetch data.\n :param time_delta: Denotes how many seconds to look into the past when fetching lines.\n :param processor: The log processor which should be used to process the lines before yielding them.\n '
self.elasticsearch_follow = elasticsearch_follow
self.index = index
self.time_delta = time_delta
self.processor = processor | def __init__(self, elasticsearch_follow, index, time_delta=60, processor=None):
'\n :param elasticsearch_follow: The instance of ElasticsearchFollow to use for yielding new lines.\n :param index: The index to use to fetch data.\n :param time_delta: Denotes how many seconds to look into the past when fetching lines.\n :param processor: The log processor which should be used to process the lines before yielding them.\n '
self.elasticsearch_follow = elasticsearch_follow
self.index = index
self.time_delta = time_delta
self.processor = processor<|docstring|>:param elasticsearch_follow: The instance of ElasticsearchFollow to use for yielding new lines.
:param index: The index to use to fetch data.
:param time_delta: Denotes how many seconds to look into the past when fetching lines.
:param processor: The log processor which should be used to process the lines before yielding them.<|endoftext|> |
5f7c72d62a814046c98402ef1870abaaffcfff0298e1183b499106133eefeb70 | def generator(self):
'\n Creates a generator which will yield new lines until the most recent query has no more lines.\n :return: A generator.\n '
now = datetime.datetime.utcnow()
now = now.replace(tzinfo=tz.UTC)
delta = datetime.timedelta(seconds=self.time_delta)
for line in self.elasticsearch_follow.get_new_lines(self.index, (now - delta)):
self.elasticsearch_follow.prune_before((now - delta))
if self.processor:
processed_line = self.processor.process_line(line)
if processed_line:
(yield processed_line)
else:
(yield line) | Creates a generator which will yield new lines until the most recent query has no more lines.
:return: A generator. | elasticsearch_follow/follower.py | generator | mdreem/elasticsearch_follow | 1 | python | def generator(self):
'\n Creates a generator which will yield new lines until the most recent query has no more lines.\n :return: A generator.\n '
now = datetime.datetime.utcnow()
now = now.replace(tzinfo=tz.UTC)
delta = datetime.timedelta(seconds=self.time_delta)
for line in self.elasticsearch_follow.get_new_lines(self.index, (now - delta)):
self.elasticsearch_follow.prune_before((now - delta))
if self.processor:
processed_line = self.processor.process_line(line)
if processed_line:
(yield processed_line)
else:
(yield line) | def generator(self):
'\n Creates a generator which will yield new lines until the most recent query has no more lines.\n :return: A generator.\n '
now = datetime.datetime.utcnow()
now = now.replace(tzinfo=tz.UTC)
delta = datetime.timedelta(seconds=self.time_delta)
for line in self.elasticsearch_follow.get_new_lines(self.index, (now - delta)):
self.elasticsearch_follow.prune_before((now - delta))
if self.processor:
processed_line = self.processor.process_line(line)
if processed_line:
(yield processed_line)
else:
(yield line)<|docstring|>Creates a generator which will yield new lines until the most recent query has no more lines.
:return: A generator.<|endoftext|> |
463ecfe383bbc673fa3e9e6768ceae76f1bb3742e03161bff9d37e5db175e57b | def __init__(self, origin, vectors):
'Initializes the Axes2D object'
self.origin = asarray(origin)
self.vectors = asarray(vectors) | Initializes the Axes2D object | src/compas_plotters/core/helpers.py | __init__ | tkmmark/compas | 235 | python | def __init__(self, origin, vectors):
self.origin = asarray(origin)
self.vectors = asarray(vectors) | def __init__(self, origin, vectors):
self.origin = asarray(origin)
self.vectors = asarray(vectors)<|docstring|>Initializes the Axes2D object<|endoftext|> |
1ff61910685d3a2257265f32e332dcc79382f4b676dd2fcf71a078e49a931b27 | def plot(self, axes):
'Plots the axes object\n\n Parameters\n ----------\n axes : object\n The matplotlib axes object.\n\n '
assert_axes_dimension(axes, 2)
o = self.origin
xy = self.vectors
axes.plot([o[(0, 0)], (o[(0, 0)] + xy[(0, 0)])], [o[(0, 1)], (o[(0, 1)] + xy[(0, 1)])], 'r-')
axes.plot([o[(0, 0)], (o[(0, 0)] + xy[(1, 0)])], [o[(0, 1)], (o[(0, 1)] + xy[(1, 1)])], 'g-') | Plots the axes object
Parameters
----------
axes : object
The matplotlib axes object. | src/compas_plotters/core/helpers.py | plot | tkmmark/compas | 235 | python | def plot(self, axes):
'Plots the axes object\n\n Parameters\n ----------\n axes : object\n The matplotlib axes object.\n\n '
assert_axes_dimension(axes, 2)
o = self.origin
xy = self.vectors
axes.plot([o[(0, 0)], (o[(0, 0)] + xy[(0, 0)])], [o[(0, 1)], (o[(0, 1)] + xy[(0, 1)])], 'r-')
axes.plot([o[(0, 0)], (o[(0, 0)] + xy[(1, 0)])], [o[(0, 1)], (o[(0, 1)] + xy[(1, 1)])], 'g-') | def plot(self, axes):
'Plots the axes object\n\n Parameters\n ----------\n axes : object\n The matplotlib axes object.\n\n '
assert_axes_dimension(axes, 2)
o = self.origin
xy = self.vectors
axes.plot([o[(0, 0)], (o[(0, 0)] + xy[(0, 0)])], [o[(0, 1)], (o[(0, 1)] + xy[(0, 1)])], 'r-')
axes.plot([o[(0, 0)], (o[(0, 0)] + xy[(1, 0)])], [o[(0, 1)], (o[(0, 1)] + xy[(1, 1)])], 'g-')<|docstring|>Plots the axes object
Parameters
----------
axes : object
The matplotlib axes object.<|endoftext|> |
334e028a769c04d93c83996babeb855a5fe8346a255b9f2b86caec0a6a1812b1 | def __init__(self, origin, vectors, colors=None):
'Initializes the Axes3D object'
self.origin = asarray(origin)
self.vectors = asarray(vectors)
if (not colors):
colors = ('r', 'g', 'b')
self.colors = colors | Initializes the Axes3D object | src/compas_plotters/core/helpers.py | __init__ | tkmmark/compas | 235 | python | def __init__(self, origin, vectors, colors=None):
self.origin = asarray(origin)
self.vectors = asarray(vectors)
if (not colors):
colors = ('r', 'g', 'b')
self.colors = colors | def __init__(self, origin, vectors, colors=None):
self.origin = asarray(origin)
self.vectors = asarray(vectors)
if (not colors):
colors = ('r', 'g', 'b')
self.colors = colors<|docstring|>Initializes the Axes3D object<|endoftext|> |
8808f6249620f9eb851de3a9b6ab807d911f907b8c2d550ff75b80d814f4ebb8 | def plot(self, axes):
'Plots the axes object\n\n Parameters\n ----------\n axes : object\n The matplotlib axes object.\n '
assert_axes_dimension(axes, 3)
o = self.origin
xyz = self.vectors
axes.plot([o[(0, 0)], (o[(0, 0)] + xyz[(0, 0)])], [o[(0, 1)], (o[(0, 1)] + xyz[(0, 1)])], [o[(0, 2)], (o[(0, 2)] + xyz[(0, 2)])], '{0}-'.format(self.colors[0]), linewidth=3)
axes.plot([o[(0, 0)], (o[(0, 0)] + xyz[(1, 0)])], [o[(0, 1)], (o[(0, 1)] + xyz[(1, 1)])], [o[(0, 2)], (o[(0, 2)] + xyz[(1, 2)])], '{0}-'.format(self.colors[1]), linewidth=3)
axes.plot([o[(0, 0)], (o[(0, 0)] + xyz[(2, 0)])], [o[(0, 1)], (o[(0, 1)] + xyz[(2, 1)])], [o[(0, 2)], (o[(0, 2)] + xyz[(2, 2)])], '{0}-'.format(self.colors[2]), linewidth=3) | Plots the axes object
Parameters
----------
axes : object
The matplotlib axes object. | src/compas_plotters/core/helpers.py | plot | tkmmark/compas | 235 | python | def plot(self, axes):
'Plots the axes object\n\n Parameters\n ----------\n axes : object\n The matplotlib axes object.\n '
assert_axes_dimension(axes, 3)
o = self.origin
xyz = self.vectors
axes.plot([o[(0, 0)], (o[(0, 0)] + xyz[(0, 0)])], [o[(0, 1)], (o[(0, 1)] + xyz[(0, 1)])], [o[(0, 2)], (o[(0, 2)] + xyz[(0, 2)])], '{0}-'.format(self.colors[0]), linewidth=3)
axes.plot([o[(0, 0)], (o[(0, 0)] + xyz[(1, 0)])], [o[(0, 1)], (o[(0, 1)] + xyz[(1, 1)])], [o[(0, 2)], (o[(0, 2)] + xyz[(1, 2)])], '{0}-'.format(self.colors[1]), linewidth=3)
axes.plot([o[(0, 0)], (o[(0, 0)] + xyz[(2, 0)])], [o[(0, 1)], (o[(0, 1)] + xyz[(2, 1)])], [o[(0, 2)], (o[(0, 2)] + xyz[(2, 2)])], '{0}-'.format(self.colors[2]), linewidth=3) | def plot(self, axes):
'Plots the axes object\n\n Parameters\n ----------\n axes : object\n The matplotlib axes object.\n '
assert_axes_dimension(axes, 3)
o = self.origin
xyz = self.vectors
axes.plot([o[(0, 0)], (o[(0, 0)] + xyz[(0, 0)])], [o[(0, 1)], (o[(0, 1)] + xyz[(0, 1)])], [o[(0, 2)], (o[(0, 2)] + xyz[(0, 2)])], '{0}-'.format(self.colors[0]), linewidth=3)
axes.plot([o[(0, 0)], (o[(0, 0)] + xyz[(1, 0)])], [o[(0, 1)], (o[(0, 1)] + xyz[(1, 1)])], [o[(0, 2)], (o[(0, 2)] + xyz[(1, 2)])], '{0}-'.format(self.colors[1]), linewidth=3)
axes.plot([o[(0, 0)], (o[(0, 0)] + xyz[(2, 0)])], [o[(0, 1)], (o[(0, 1)] + xyz[(2, 1)])], [o[(0, 2)], (o[(0, 2)] + xyz[(2, 2)])], '{0}-'.format(self.colors[2]), linewidth=3)<|docstring|>Plots the axes object
Parameters
----------
axes : object
The matplotlib axes object.<|endoftext|> |
36e725474990a2f4f99464da9c593ba7e2b5aff334440effc6212f4387ffd7e3 | def longestCommonPrefix(self, strs):
'\n :type strs: List[str]\n :rtype: str\n '
comStr = ''
if (len(strs) == 0):
return comStr
firstStr = strs[0]
for i in range(len(firstStr)):
ch = firstStr[i:(i + 1)]
for j in range(len(strs)):
nowStr = strs[j]
if (len(nowStr) < i):
return comStr
if (nowStr[i:(i + 1)] != ch):
return comStr
comStr += ch
return comStr | :type strs: List[str]
:rtype: str | leetcode/0014.py | longestCommonPrefix | mndream/MyOJ | 1 | python | def longestCommonPrefix(self, strs):
'\n :type strs: List[str]\n :rtype: str\n '
comStr =
if (len(strs) == 0):
return comStr
firstStr = strs[0]
for i in range(len(firstStr)):
ch = firstStr[i:(i + 1)]
for j in range(len(strs)):
nowStr = strs[j]
if (len(nowStr) < i):
return comStr
if (nowStr[i:(i + 1)] != ch):
return comStr
comStr += ch
return comStr | def longestCommonPrefix(self, strs):
'\n :type strs: List[str]\n :rtype: str\n '
comStr =
if (len(strs) == 0):
return comStr
firstStr = strs[0]
for i in range(len(firstStr)):
ch = firstStr[i:(i + 1)]
for j in range(len(strs)):
nowStr = strs[j]
if (len(nowStr) < i):
return comStr
if (nowStr[i:(i + 1)] != ch):
return comStr
comStr += ch
return comStr<|docstring|>:type strs: List[str]
:rtype: str<|endoftext|> |
4073adf1eeebac6871eb5ddf064cb233b3c42ac474f8297b1d8106c440673c65 | def _fit(self, dm, binned, cells=None, noncovwarn=False):
'\n Fit a GLM using scikit-learn implementation of PoissonRegressor. Uses a regularization\n strength parameter alpha, which is the strength of ridge regularization term.\n\n Parameters\n ----------\n dm : numpy.ndarray\n Design matrix, in which rows are observations and columns are regressor values. Should\n NOT contain a bias column for the intercept. Scikit-learn handles that.\n binned : numpy.ndarray\n Vector of observed spike counts which we seek to predict. Must be of the same length\n as dm.shape[0]\n alpha : float\n Regularization strength, applied as multiplicative constant on ridge regularization.\n cells : list\n List of cells labels for columns in binned. Will default to all cells in model if None\n is passed. Must be of the same length as columns in binned. By default None.\n '
if (cells is None):
cells = self.clu_ids.flatten()
if (cells.shape[0] != binned.shape[1]):
raise ValueError('Length of cells does not match shape of binned')
coefs = pd.Series(index=cells, name='coefficients', dtype=object)
intercepts = pd.Series(index=cells, name='intercepts')
nonconverged = []
for cell in tqdm(cells, 'Fitting units:', leave=False):
cell_idx = np.argwhere((cells == cell))[(0, 0)]
cellbinned = binned[(:, cell_idx)]
with catch_warnings(record=True) as w:
fitobj = PoissonRegressor(alpha=self.alpha, max_iter=300, fit_intercept=self.fit_intercept).fit(dm, cellbinned)
if (len(w) != 0):
nonconverged.append(cell)
coefs.at[cell] = fitobj.coef_
if self.fit_intercept:
intercepts.at[cell] = fitobj.intercept_
else:
intercepts.at[cell] = 0
if noncovwarn:
if (len(nonconverged) != 0):
warn(f'Fitting did not converge for some units: {nonconverged}')
return (coefs, intercepts) | Fit a GLM using scikit-learn implementation of PoissonRegressor. Uses a regularization
strength parameter alpha, which is the strength of ridge regularization term.
Parameters
----------
dm : numpy.ndarray
Design matrix, in which rows are observations and columns are regressor values. Should
NOT contain a bias column for the intercept. Scikit-learn handles that.
binned : numpy.ndarray
Vector of observed spike counts which we seek to predict. Must be of the same length
as dm.shape[0]
alpha : float
Regularization strength, applied as multiplicative constant on ridge regularization.
cells : list
List of cells labels for columns in binned. Will default to all cells in model if None
is passed. Must be of the same length as columns in binned. By default None. | brainbox/modeling/poisson.py | _fit | int-brain-lab/ibllib | 38 | python | def _fit(self, dm, binned, cells=None, noncovwarn=False):
'\n Fit a GLM using scikit-learn implementation of PoissonRegressor. Uses a regularization\n strength parameter alpha, which is the strength of ridge regularization term.\n\n Parameters\n ----------\n dm : numpy.ndarray\n Design matrix, in which rows are observations and columns are regressor values. Should\n NOT contain a bias column for the intercept. Scikit-learn handles that.\n binned : numpy.ndarray\n Vector of observed spike counts which we seek to predict. Must be of the same length\n as dm.shape[0]\n alpha : float\n Regularization strength, applied as multiplicative constant on ridge regularization.\n cells : list\n List of cells labels for columns in binned. Will default to all cells in model if None\n is passed. Must be of the same length as columns in binned. By default None.\n '
if (cells is None):
cells = self.clu_ids.flatten()
if (cells.shape[0] != binned.shape[1]):
raise ValueError('Length of cells does not match shape of binned')
coefs = pd.Series(index=cells, name='coefficients', dtype=object)
intercepts = pd.Series(index=cells, name='intercepts')
nonconverged = []
for cell in tqdm(cells, 'Fitting units:', leave=False):
cell_idx = np.argwhere((cells == cell))[(0, 0)]
cellbinned = binned[(:, cell_idx)]
with catch_warnings(record=True) as w:
fitobj = PoissonRegressor(alpha=self.alpha, max_iter=300, fit_intercept=self.fit_intercept).fit(dm, cellbinned)
if (len(w) != 0):
nonconverged.append(cell)
coefs.at[cell] = fitobj.coef_
if self.fit_intercept:
intercepts.at[cell] = fitobj.intercept_
else:
intercepts.at[cell] = 0
if noncovwarn:
if (len(nonconverged) != 0):
warn(f'Fitting did not converge for some units: {nonconverged}')
return (coefs, intercepts) | def _fit(self, dm, binned, cells=None, noncovwarn=False):
'\n Fit a GLM using scikit-learn implementation of PoissonRegressor. Uses a regularization\n strength parameter alpha, which is the strength of ridge regularization term.\n\n Parameters\n ----------\n dm : numpy.ndarray\n Design matrix, in which rows are observations and columns are regressor values. Should\n NOT contain a bias column for the intercept. Scikit-learn handles that.\n binned : numpy.ndarray\n Vector of observed spike counts which we seek to predict. Must be of the same length\n as dm.shape[0]\n alpha : float\n Regularization strength, applied as multiplicative constant on ridge regularization.\n cells : list\n List of cells labels for columns in binned. Will default to all cells in model if None\n is passed. Must be of the same length as columns in binned. By default None.\n '
if (cells is None):
cells = self.clu_ids.flatten()
if (cells.shape[0] != binned.shape[1]):
raise ValueError('Length of cells does not match shape of binned')
coefs = pd.Series(index=cells, name='coefficients', dtype=object)
intercepts = pd.Series(index=cells, name='intercepts')
nonconverged = []
for cell in tqdm(cells, 'Fitting units:', leave=False):
cell_idx = np.argwhere((cells == cell))[(0, 0)]
cellbinned = binned[(:, cell_idx)]
with catch_warnings(record=True) as w:
fitobj = PoissonRegressor(alpha=self.alpha, max_iter=300, fit_intercept=self.fit_intercept).fit(dm, cellbinned)
if (len(w) != 0):
nonconverged.append(cell)
coefs.at[cell] = fitobj.coef_
if self.fit_intercept:
intercepts.at[cell] = fitobj.intercept_
else:
intercepts.at[cell] = 0
if noncovwarn:
if (len(nonconverged) != 0):
warn(f'Fitting did not converge for some units: {nonconverged}')
return (coefs, intercepts)<|docstring|>Fit a GLM using scikit-learn implementation of PoissonRegressor. Uses a regularization
strength parameter alpha, which is the strength of ridge regularization term.
Parameters
----------
dm : numpy.ndarray
Design matrix, in which rows are observations and columns are regressor values. Should
NOT contain a bias column for the intercept. Scikit-learn handles that.
binned : numpy.ndarray
Vector of observed spike counts which we seek to predict. Must be of the same length
as dm.shape[0]
alpha : float
Regularization strength, applied as multiplicative constant on ridge regularization.
cells : list
List of cells labels for columns in binned. Will default to all cells in model if None
is passed. Must be of the same length as columns in binned. By default None.<|endoftext|> |
42692e9b885c7b270db5798d4425b4dabb17778ff8f999b5441729794a866d6a | def score(self, metric='dsq', **kwargs):
'\n Utility function for computing D^2 (pseudo R^2) on a given set of weights and\n intercepts. Is be used in both model subsetting and the mother score() function of the GLM.\n\n Parameters\n ----------\n weights : pd.Series\n Series in which entries are numpy arrays containing the weights for a given cell.\n Indices should be cluster ids.\n intercepts : pd.Series\n Series in which elements are the intercept fit to each cell. Indicies should match\n weights.\n dm : numpy.ndarray\n Design matrix. Should not contain the bias column. dm.shape[1] should be the same as\n the length of an element in weights.\n binned : numpy.ndarray\n nT x nCells array, in which each column is the binned spike train for a single unit.\n Should be the same number of rows as dm.\n\n Compute the squared deviance of the model, i.e. how much variance beyond the null model\n (a poisson process with the same mean, defined by the intercept, at every time step) the\n model which was fit explains.\n For a detailed explanation see https://bookdown.org/egarpor/PM-UC3M/glm-deviance.html`\n\n Returns\n -------\n pandas.Series\n A series in which the index are cluster IDs and each entry is the D^2 for the model fit\n to that cluster\n '
assert (metric in ['dsq', 'rsq', 'negLog']), 'metric must be dsq, rsq or negLog'
assert ((len(kwargs) == 0) or (len(kwargs) == 4)), 'wrong input specification in score'
if ((not hasattr(self, 'coefs')) or ('weights' not in kwargs.keys())):
raise AttributeError('Fit was not run. Please run fit first.')
if hasattr(self, 'submodel_scores'):
return self.submodel_scores
if (len(kwargs) == 4):
(weights, intercepts, dm, binned) = (kwargs['weights'], kwargs['intercepts'], kwargs['dm'], kwargs['binned'])
else:
testmask = np.isin(self.trlabels, self.testinds).flatten()
(weights, intercepts, dm, binned) = (self.coefs, self.intercepts, self.dm[(testmask, :)], self.binnedspikes[testmask])
scores = pd.Series(index=weights.index, name='scores')
for cell in weights.index:
cell_idx = np.argwhere((self.clu_ids == cell))[(0, 0)]
wt = weights.loc[cell].reshape((- 1), 1)
bias = intercepts.loc[cell]
y = binned[(:, cell_idx)]
scores.at[cell] = self._scorer(wt, bias, dm, y)
return scores | Utility function for computing D^2 (pseudo R^2) on a given set of weights and
intercepts. Is be used in both model subsetting and the mother score() function of the GLM.
Parameters
----------
weights : pd.Series
Series in which entries are numpy arrays containing the weights for a given cell.
Indices should be cluster ids.
intercepts : pd.Series
Series in which elements are the intercept fit to each cell. Indicies should match
weights.
dm : numpy.ndarray
Design matrix. Should not contain the bias column. dm.shape[1] should be the same as
the length of an element in weights.
binned : numpy.ndarray
nT x nCells array, in which each column is the binned spike train for a single unit.
Should be the same number of rows as dm.
Compute the squared deviance of the model, i.e. how much variance beyond the null model
(a poisson process with the same mean, defined by the intercept, at every time step) the
model which was fit explains.
For a detailed explanation see https://bookdown.org/egarpor/PM-UC3M/glm-deviance.html`
Returns
-------
pandas.Series
A series in which the index are cluster IDs and each entry is the D^2 for the model fit
to that cluster | brainbox/modeling/poisson.py | score | int-brain-lab/ibllib | 38 | python | def score(self, metric='dsq', **kwargs):
'\n Utility function for computing D^2 (pseudo R^2) on a given set of weights and\n intercepts. Is be used in both model subsetting and the mother score() function of the GLM.\n\n Parameters\n ----------\n weights : pd.Series\n Series in which entries are numpy arrays containing the weights for a given cell.\n Indices should be cluster ids.\n intercepts : pd.Series\n Series in which elements are the intercept fit to each cell. Indicies should match\n weights.\n dm : numpy.ndarray\n Design matrix. Should not contain the bias column. dm.shape[1] should be the same as\n the length of an element in weights.\n binned : numpy.ndarray\n nT x nCells array, in which each column is the binned spike train for a single unit.\n Should be the same number of rows as dm.\n\n Compute the squared deviance of the model, i.e. how much variance beyond the null model\n (a poisson process with the same mean, defined by the intercept, at every time step) the\n model which was fit explains.\n For a detailed explanation see https://bookdown.org/egarpor/PM-UC3M/glm-deviance.html`\n\n Returns\n -------\n pandas.Series\n A series in which the index are cluster IDs and each entry is the D^2 for the model fit\n to that cluster\n '
assert (metric in ['dsq', 'rsq', 'negLog']), 'metric must be dsq, rsq or negLog'
assert ((len(kwargs) == 0) or (len(kwargs) == 4)), 'wrong input specification in score'
if ((not hasattr(self, 'coefs')) or ('weights' not in kwargs.keys())):
raise AttributeError('Fit was not run. Please run fit first.')
if hasattr(self, 'submodel_scores'):
return self.submodel_scores
if (len(kwargs) == 4):
(weights, intercepts, dm, binned) = (kwargs['weights'], kwargs['intercepts'], kwargs['dm'], kwargs['binned'])
else:
testmask = np.isin(self.trlabels, self.testinds).flatten()
(weights, intercepts, dm, binned) = (self.coefs, self.intercepts, self.dm[(testmask, :)], self.binnedspikes[testmask])
scores = pd.Series(index=weights.index, name='scores')
for cell in weights.index:
cell_idx = np.argwhere((self.clu_ids == cell))[(0, 0)]
wt = weights.loc[cell].reshape((- 1), 1)
bias = intercepts.loc[cell]
y = binned[(:, cell_idx)]
scores.at[cell] = self._scorer(wt, bias, dm, y)
return scores | def score(self, metric='dsq', **kwargs):
'\n Utility function for computing D^2 (pseudo R^2) on a given set of weights and\n intercepts. Is be used in both model subsetting and the mother score() function of the GLM.\n\n Parameters\n ----------\n weights : pd.Series\n Series in which entries are numpy arrays containing the weights for a given cell.\n Indices should be cluster ids.\n intercepts : pd.Series\n Series in which elements are the intercept fit to each cell. Indicies should match\n weights.\n dm : numpy.ndarray\n Design matrix. Should not contain the bias column. dm.shape[1] should be the same as\n the length of an element in weights.\n binned : numpy.ndarray\n nT x nCells array, in which each column is the binned spike train for a single unit.\n Should be the same number of rows as dm.\n\n Compute the squared deviance of the model, i.e. how much variance beyond the null model\n (a poisson process with the same mean, defined by the intercept, at every time step) the\n model which was fit explains.\n For a detailed explanation see https://bookdown.org/egarpor/PM-UC3M/glm-deviance.html`\n\n Returns\n -------\n pandas.Series\n A series in which the index are cluster IDs and each entry is the D^2 for the model fit\n to that cluster\n '
assert (metric in ['dsq', 'rsq', 'negLog']), 'metric must be dsq, rsq or negLog'
assert ((len(kwargs) == 0) or (len(kwargs) == 4)), 'wrong input specification in score'
if ((not hasattr(self, 'coefs')) or ('weights' not in kwargs.keys())):
raise AttributeError('Fit was not run. Please run fit first.')
if hasattr(self, 'submodel_scores'):
return self.submodel_scores
if (len(kwargs) == 4):
(weights, intercepts, dm, binned) = (kwargs['weights'], kwargs['intercepts'], kwargs['dm'], kwargs['binned'])
else:
testmask = np.isin(self.trlabels, self.testinds).flatten()
(weights, intercepts, dm, binned) = (self.coefs, self.intercepts, self.dm[(testmask, :)], self.binnedspikes[testmask])
scores = pd.Series(index=weights.index, name='scores')
for cell in weights.index:
cell_idx = np.argwhere((self.clu_ids == cell))[(0, 0)]
wt = weights.loc[cell].reshape((- 1), 1)
bias = intercepts.loc[cell]
y = binned[(:, cell_idx)]
scores.at[cell] = self._scorer(wt, bias, dm, y)
return scores<|docstring|>Utility function for computing D^2 (pseudo R^2) on a given set of weights and
intercepts. Is be used in both model subsetting and the mother score() function of the GLM.
Parameters
----------
weights : pd.Series
Series in which entries are numpy arrays containing the weights for a given cell.
Indices should be cluster ids.
intercepts : pd.Series
Series in which elements are the intercept fit to each cell. Indicies should match
weights.
dm : numpy.ndarray
Design matrix. Should not contain the bias column. dm.shape[1] should be the same as
the length of an element in weights.
binned : numpy.ndarray
nT x nCells array, in which each column is the binned spike train for a single unit.
Should be the same number of rows as dm.
Compute the squared deviance of the model, i.e. how much variance beyond the null model
(a poisson process with the same mean, defined by the intercept, at every time step) the
model which was fit explains.
For a detailed explanation see https://bookdown.org/egarpor/PM-UC3M/glm-deviance.html`
Returns
-------
pandas.Series
A series in which the index are cluster IDs and each entry is the D^2 for the model fit
to that cluster<|endoftext|> |
6ef67707d19279ae74bfe9c59e8931afdfd27efea5afbcf77523df935bc358e8 | def set_axes_equal(fignum):
"\n Make axes of 3D plot have equal scale so that spheres appear as spheres,\n cubes as cubes, etc.. This is one possible solution to Matplotlib's\n ax.set_aspect('equal') and ax.axis('equal') not working for 3D.\n Input\n ax: a matplotlib axis, e.g., as output from plt.gca().\n "
fig = plt.figure(fignum)
ax = fig.gca(projection='3d')
limits = np.array([ax.get_xlim3d(), ax.get_ylim3d(), ax.get_zlim3d()])
origin = np.mean(limits, axis=1)
radius = (0.5 * np.max(np.abs((limits[(:, 1)] - limits[(:, 0)]))))
ax.set_xlim3d([(origin[0] - radius), (origin[0] + radius)])
ax.set_ylim3d([(origin[1] - radius), (origin[1] + radius)])
ax.set_zlim3d([(origin[2] - radius), (origin[2] + radius)]) | Make axes of 3D plot have equal scale so that spheres appear as spheres,
cubes as cubes, etc.. This is one possible solution to Matplotlib's
ax.set_aspect('equal') and ax.axis('equal') not working for 3D.
Input
ax: a matplotlib axis, e.g., as output from plt.gca(). | cython/gtsam/utils/plot.py | set_axes_equal | berndpfrommer/gtsam | 1 | python | def set_axes_equal(fignum):
"\n Make axes of 3D plot have equal scale so that spheres appear as spheres,\n cubes as cubes, etc.. This is one possible solution to Matplotlib's\n ax.set_aspect('equal') and ax.axis('equal') not working for 3D.\n Input\n ax: a matplotlib axis, e.g., as output from plt.gca().\n "
fig = plt.figure(fignum)
ax = fig.gca(projection='3d')
limits = np.array([ax.get_xlim3d(), ax.get_ylim3d(), ax.get_zlim3d()])
origin = np.mean(limits, axis=1)
radius = (0.5 * np.max(np.abs((limits[(:, 1)] - limits[(:, 0)]))))
ax.set_xlim3d([(origin[0] - radius), (origin[0] + radius)])
ax.set_ylim3d([(origin[1] - radius), (origin[1] + radius)])
ax.set_zlim3d([(origin[2] - radius), (origin[2] + radius)]) | def set_axes_equal(fignum):
"\n Make axes of 3D plot have equal scale so that spheres appear as spheres,\n cubes as cubes, etc.. This is one possible solution to Matplotlib's\n ax.set_aspect('equal') and ax.axis('equal') not working for 3D.\n Input\n ax: a matplotlib axis, e.g., as output from plt.gca().\n "
fig = plt.figure(fignum)
ax = fig.gca(projection='3d')
limits = np.array([ax.get_xlim3d(), ax.get_ylim3d(), ax.get_zlim3d()])
origin = np.mean(limits, axis=1)
radius = (0.5 * np.max(np.abs((limits[(:, 1)] - limits[(:, 0)]))))
ax.set_xlim3d([(origin[0] - radius), (origin[0] + radius)])
ax.set_ylim3d([(origin[1] - radius), (origin[1] + radius)])
ax.set_zlim3d([(origin[2] - radius), (origin[2] + radius)])<|docstring|>Make axes of 3D plot have equal scale so that spheres appear as spheres,
cubes as cubes, etc.. This is one possible solution to Matplotlib's
ax.set_aspect('equal') and ax.axis('equal') not working for 3D.
Input
ax: a matplotlib axis, e.g., as output from plt.gca().<|endoftext|> |
029a4d0fca47c0ba2499aececf837e730e99b94a4b9bb3cc76f3ccdb63e39369 | def ellipsoid(xc, yc, zc, rx, ry, rz, n):
"Numpy equivalent of Matlab's ellipsoid function"
u = np.linspace(0, (2 * np.pi), (n + 1))
v = np.linspace(0, np.pi, (n + 1))
x = ((- rx) * np.outer(np.cos(u), np.sin(v)).T)
y = ((- ry) * np.outer(np.sin(u), np.sin(v)).T)
z = ((- rz) * np.outer(np.ones_like(u), np.cos(v)).T)
return (x, y, z) | Numpy equivalent of Matlab's ellipsoid function | cython/gtsam/utils/plot.py | ellipsoid | berndpfrommer/gtsam | 1 | python | def ellipsoid(xc, yc, zc, rx, ry, rz, n):
u = np.linspace(0, (2 * np.pi), (n + 1))
v = np.linspace(0, np.pi, (n + 1))
x = ((- rx) * np.outer(np.cos(u), np.sin(v)).T)
y = ((- ry) * np.outer(np.sin(u), np.sin(v)).T)
z = ((- rz) * np.outer(np.ones_like(u), np.cos(v)).T)
return (x, y, z) | def ellipsoid(xc, yc, zc, rx, ry, rz, n):
u = np.linspace(0, (2 * np.pi), (n + 1))
v = np.linspace(0, np.pi, (n + 1))
x = ((- rx) * np.outer(np.cos(u), np.sin(v)).T)
y = ((- ry) * np.outer(np.sin(u), np.sin(v)).T)
z = ((- rz) * np.outer(np.ones_like(u), np.cos(v)).T)
return (x, y, z)<|docstring|>Numpy equivalent of Matlab's ellipsoid function<|endoftext|> |
88a72916de6ae5bd5a2d344843ae2376eeb802442dae53412345408c52d65785 | def plot_covariance_ellipse_3d(axes, origin, P, scale=1, n=8, alpha=0.5):
'\n Plots a Gaussian as an uncertainty ellipse\n\n Based on Maybeck Vol 1, page 366\n k=2.296 corresponds to 1 std, 68.26% of all probability\n k=11.82 corresponds to 3 std, 99.74% of all probability\n '
k = 11.82
(U, S, _) = np.linalg.svd(P)
radii = (k * np.sqrt(S))
radii = (radii * scale)
(rx, ry, rz) = radii
(xc, yc, zc) = ellipsoid(0, 0, 0, rx, ry, rz, n)
data = ((np.kron(U[(:, 0:1)], xc) + np.kron(U[(:, 1:2)], yc)) + np.kron(U[(:, 2:3)], zc))
n = data.shape[1]
x = (data[(0:n, :)] + origin[0])
y = (data[(n:(2 * n), :)] + origin[1])
z = (data[((2 * n):, :)] + origin[2])
axes.plot_surface(x, y, z, alpha=alpha, cmap='hot') | Plots a Gaussian as an uncertainty ellipse
Based on Maybeck Vol 1, page 366
k=2.296 corresponds to 1 std, 68.26% of all probability
k=11.82 corresponds to 3 std, 99.74% of all probability | cython/gtsam/utils/plot.py | plot_covariance_ellipse_3d | berndpfrommer/gtsam | 1 | python | def plot_covariance_ellipse_3d(axes, origin, P, scale=1, n=8, alpha=0.5):
'\n Plots a Gaussian as an uncertainty ellipse\n\n Based on Maybeck Vol 1, page 366\n k=2.296 corresponds to 1 std, 68.26% of all probability\n k=11.82 corresponds to 3 std, 99.74% of all probability\n '
k = 11.82
(U, S, _) = np.linalg.svd(P)
radii = (k * np.sqrt(S))
radii = (radii * scale)
(rx, ry, rz) = radii
(xc, yc, zc) = ellipsoid(0, 0, 0, rx, ry, rz, n)
data = ((np.kron(U[(:, 0:1)], xc) + np.kron(U[(:, 1:2)], yc)) + np.kron(U[(:, 2:3)], zc))
n = data.shape[1]
x = (data[(0:n, :)] + origin[0])
y = (data[(n:(2 * n), :)] + origin[1])
z = (data[((2 * n):, :)] + origin[2])
axes.plot_surface(x, y, z, alpha=alpha, cmap='hot') | def plot_covariance_ellipse_3d(axes, origin, P, scale=1, n=8, alpha=0.5):
'\n Plots a Gaussian as an uncertainty ellipse\n\n Based on Maybeck Vol 1, page 366\n k=2.296 corresponds to 1 std, 68.26% of all probability\n k=11.82 corresponds to 3 std, 99.74% of all probability\n '
k = 11.82
(U, S, _) = np.linalg.svd(P)
radii = (k * np.sqrt(S))
radii = (radii * scale)
(rx, ry, rz) = radii
(xc, yc, zc) = ellipsoid(0, 0, 0, rx, ry, rz, n)
data = ((np.kron(U[(:, 0:1)], xc) + np.kron(U[(:, 1:2)], yc)) + np.kron(U[(:, 2:3)], zc))
n = data.shape[1]
x = (data[(0:n, :)] + origin[0])
y = (data[(n:(2 * n), :)] + origin[1])
z = (data[((2 * n):, :)] + origin[2])
axes.plot_surface(x, y, z, alpha=alpha, cmap='hot')<|docstring|>Plots a Gaussian as an uncertainty ellipse
Based on Maybeck Vol 1, page 366
k=2.296 corresponds to 1 std, 68.26% of all probability
k=11.82 corresponds to 3 std, 99.74% of all probability<|endoftext|> |
4aefbb97782313c6fbbc25675bc031e3969120db0cd94f3669f60ebba45180a0 | def plot_pose2_on_axes(axes, pose, axis_length=0.1, covariance=None):
"Plot a 2D pose on given axis 'axes' with given 'axis_length'."
gRp = pose.rotation().matrix()
t = pose.translation()
origin = np.array([t.x(), t.y()])
x_axis = (origin + (gRp[(:, 0)] * axis_length))
line = np.append(origin[np.newaxis], x_axis[np.newaxis], axis=0)
axes.plot(line[(:, 0)], line[(:, 1)], 'r-')
y_axis = (origin + (gRp[(:, 1)] * axis_length))
line = np.append(origin[np.newaxis], y_axis[np.newaxis], axis=0)
axes.plot(line[(:, 0)], line[(:, 1)], 'g-')
if (covariance is not None):
pPp = covariance[(0:2, 0:2)]
gPp = np.matmul(np.matmul(gRp, pPp), gRp.T)
(w, v) = np.linalg.eig(gPp)
k = 5.0
angle = np.arctan2(v[(1, 0)], v[(0, 0)])
e1 = patches.Ellipse(origin, np.sqrt((w[0] * k)), np.sqrt((w[1] * k)), np.rad2deg(angle), fill=False)
axes.add_patch(e1) | Plot a 2D pose on given axis 'axes' with given 'axis_length'. | cython/gtsam/utils/plot.py | plot_pose2_on_axes | berndpfrommer/gtsam | 1 | python | def plot_pose2_on_axes(axes, pose, axis_length=0.1, covariance=None):
gRp = pose.rotation().matrix()
t = pose.translation()
origin = np.array([t.x(), t.y()])
x_axis = (origin + (gRp[(:, 0)] * axis_length))
line = np.append(origin[np.newaxis], x_axis[np.newaxis], axis=0)
axes.plot(line[(:, 0)], line[(:, 1)], 'r-')
y_axis = (origin + (gRp[(:, 1)] * axis_length))
line = np.append(origin[np.newaxis], y_axis[np.newaxis], axis=0)
axes.plot(line[(:, 0)], line[(:, 1)], 'g-')
if (covariance is not None):
pPp = covariance[(0:2, 0:2)]
gPp = np.matmul(np.matmul(gRp, pPp), gRp.T)
(w, v) = np.linalg.eig(gPp)
k = 5.0
angle = np.arctan2(v[(1, 0)], v[(0, 0)])
e1 = patches.Ellipse(origin, np.sqrt((w[0] * k)), np.sqrt((w[1] * k)), np.rad2deg(angle), fill=False)
axes.add_patch(e1) | def plot_pose2_on_axes(axes, pose, axis_length=0.1, covariance=None):
gRp = pose.rotation().matrix()
t = pose.translation()
origin = np.array([t.x(), t.y()])
x_axis = (origin + (gRp[(:, 0)] * axis_length))
line = np.append(origin[np.newaxis], x_axis[np.newaxis], axis=0)
axes.plot(line[(:, 0)], line[(:, 1)], 'r-')
y_axis = (origin + (gRp[(:, 1)] * axis_length))
line = np.append(origin[np.newaxis], y_axis[np.newaxis], axis=0)
axes.plot(line[(:, 0)], line[(:, 1)], 'g-')
if (covariance is not None):
pPp = covariance[(0:2, 0:2)]
gPp = np.matmul(np.matmul(gRp, pPp), gRp.T)
(w, v) = np.linalg.eig(gPp)
k = 5.0
angle = np.arctan2(v[(1, 0)], v[(0, 0)])
e1 = patches.Ellipse(origin, np.sqrt((w[0] * k)), np.sqrt((w[1] * k)), np.rad2deg(angle), fill=False)
axes.add_patch(e1)<|docstring|>Plot a 2D pose on given axis 'axes' with given 'axis_length'.<|endoftext|> |
ffe45dc22eae8dc6c62f49afd5e1b4e9c6048477b7f0a0a16850055591085127 | def plot_pose2(fignum, pose, axis_length=0.1, covariance=None):
"Plot a 2D pose on given figure with given 'axis_length'."
fig = plt.figure(fignum)
axes = fig.gca()
plot_pose2_on_axes(axes, pose, axis_length, covariance) | Plot a 2D pose on given figure with given 'axis_length'. | cython/gtsam/utils/plot.py | plot_pose2 | berndpfrommer/gtsam | 1 | python | def plot_pose2(fignum, pose, axis_length=0.1, covariance=None):
fig = plt.figure(fignum)
axes = fig.gca()
plot_pose2_on_axes(axes, pose, axis_length, covariance) | def plot_pose2(fignum, pose, axis_length=0.1, covariance=None):
fig = plt.figure(fignum)
axes = fig.gca()
plot_pose2_on_axes(axes, pose, axis_length, covariance)<|docstring|>Plot a 2D pose on given figure with given 'axis_length'.<|endoftext|> |
5165aa8b04a97eed7582110f266449596e2566401df6880f55b92cfb237ed170 | def plot_point3_on_axes(axes, point, linespec, P=None):
"Plot a 3D point on given axis 'axes' with given 'linespec'."
axes.plot([point.x()], [point.y()], [point.z()], linespec)
if (P is not None):
plot_covariance_ellipse_3d(axes, point.vector(), P) | Plot a 3D point on given axis 'axes' with given 'linespec'. | cython/gtsam/utils/plot.py | plot_point3_on_axes | berndpfrommer/gtsam | 1 | python | def plot_point3_on_axes(axes, point, linespec, P=None):
axes.plot([point.x()], [point.y()], [point.z()], linespec)
if (P is not None):
plot_covariance_ellipse_3d(axes, point.vector(), P) | def plot_point3_on_axes(axes, point, linespec, P=None):
axes.plot([point.x()], [point.y()], [point.z()], linespec)
if (P is not None):
plot_covariance_ellipse_3d(axes, point.vector(), P)<|docstring|>Plot a 3D point on given axis 'axes' with given 'linespec'.<|endoftext|> |
b53a0915a94b6c3dca4efb867067531ec71a5572460a0f855f7453186dd825fa | def plot_point3(fignum, point, linespec, P=None):
"Plot a 3D point on given figure with given 'linespec'."
fig = plt.figure(fignum)
axes = fig.gca(projection='3d')
plot_point3_on_axes(axes, point, linespec, P) | Plot a 3D point on given figure with given 'linespec'. | cython/gtsam/utils/plot.py | plot_point3 | berndpfrommer/gtsam | 1 | python | def plot_point3(fignum, point, linespec, P=None):
fig = plt.figure(fignum)
axes = fig.gca(projection='3d')
plot_point3_on_axes(axes, point, linespec, P) | def plot_point3(fignum, point, linespec, P=None):
fig = plt.figure(fignum)
axes = fig.gca(projection='3d')
plot_point3_on_axes(axes, point, linespec, P)<|docstring|>Plot a 3D point on given figure with given 'linespec'.<|endoftext|> |
bed2d9dbc86e3fd6c0e9657a589bf47a3820e1f923995fbdeac6457c86b720b6 | def plot_3d_points(fignum, values, linespec='g*', marginals=None):
"\n Plots the Point3s in 'values', with optional covariances.\n Finds all the Point3 objects in the given Values object and plots them.\n If a Marginals object is given, this function will also plot marginal\n covariance ellipses for each point.\n "
keys = values.keys()
for i in range(keys.size()):
try:
key = keys.at(i)
point = values.atPoint3(key)
if (marginals is not None):
P = marginals.marginalCovariance(key)
else:
P = None
plot_point3(fignum, point, linespec, P)
except RuntimeError:
continue | Plots the Point3s in 'values', with optional covariances.
Finds all the Point3 objects in the given Values object and plots them.
If a Marginals object is given, this function will also plot marginal
covariance ellipses for each point. | cython/gtsam/utils/plot.py | plot_3d_points | berndpfrommer/gtsam | 1 | python | def plot_3d_points(fignum, values, linespec='g*', marginals=None):
"\n Plots the Point3s in 'values', with optional covariances.\n Finds all the Point3 objects in the given Values object and plots them.\n If a Marginals object is given, this function will also plot marginal\n covariance ellipses for each point.\n "
keys = values.keys()
for i in range(keys.size()):
try:
key = keys.at(i)
point = values.atPoint3(key)
if (marginals is not None):
P = marginals.marginalCovariance(key)
else:
P = None
plot_point3(fignum, point, linespec, P)
except RuntimeError:
continue | def plot_3d_points(fignum, values, linespec='g*', marginals=None):
"\n Plots the Point3s in 'values', with optional covariances.\n Finds all the Point3 objects in the given Values object and plots them.\n If a Marginals object is given, this function will also plot marginal\n covariance ellipses for each point.\n "
keys = values.keys()
for i in range(keys.size()):
try:
key = keys.at(i)
point = values.atPoint3(key)
if (marginals is not None):
P = marginals.marginalCovariance(key)
else:
P = None
plot_point3(fignum, point, linespec, P)
except RuntimeError:
continue<|docstring|>Plots the Point3s in 'values', with optional covariances.
Finds all the Point3 objects in the given Values object and plots them.
If a Marginals object is given, this function will also plot marginal
covariance ellipses for each point.<|endoftext|> |
24bdc00ba49fda296b4b5fc413179191297c9617824e1e7ccd46efe1c7fdb796 | def plot_pose3_on_axes(axes, pose, axis_length=0.1, P=None, scale=1):
"Plot a 3D pose on given axis 'axes' with given 'axis_length'."
gRp = pose.rotation().matrix()
origin = pose.translation().vector()
x_axis = (origin + (gRp[(:, 0)] * axis_length))
line = np.append(origin[np.newaxis], x_axis[np.newaxis], axis=0)
axes.plot(line[(:, 0)], line[(:, 1)], line[(:, 2)], 'r-')
y_axis = (origin + (gRp[(:, 1)] * axis_length))
line = np.append(origin[np.newaxis], y_axis[np.newaxis], axis=0)
axes.plot(line[(:, 0)], line[(:, 1)], line[(:, 2)], 'g-')
z_axis = (origin + (gRp[(:, 2)] * axis_length))
line = np.append(origin[np.newaxis], z_axis[np.newaxis], axis=0)
axes.plot(line[(:, 0)], line[(:, 1)], line[(:, 2)], 'b-')
if (P is not None):
pPp = P[(3:6, 3:6)]
gPp = ((gRp @ pPp) @ gRp.T)
plot_covariance_ellipse_3d(axes, origin, gPp) | Plot a 3D pose on given axis 'axes' with given 'axis_length'. | cython/gtsam/utils/plot.py | plot_pose3_on_axes | berndpfrommer/gtsam | 1 | python | def plot_pose3_on_axes(axes, pose, axis_length=0.1, P=None, scale=1):
gRp = pose.rotation().matrix()
origin = pose.translation().vector()
x_axis = (origin + (gRp[(:, 0)] * axis_length))
line = np.append(origin[np.newaxis], x_axis[np.newaxis], axis=0)
axes.plot(line[(:, 0)], line[(:, 1)], line[(:, 2)], 'r-')
y_axis = (origin + (gRp[(:, 1)] * axis_length))
line = np.append(origin[np.newaxis], y_axis[np.newaxis], axis=0)
axes.plot(line[(:, 0)], line[(:, 1)], line[(:, 2)], 'g-')
z_axis = (origin + (gRp[(:, 2)] * axis_length))
line = np.append(origin[np.newaxis], z_axis[np.newaxis], axis=0)
axes.plot(line[(:, 0)], line[(:, 1)], line[(:, 2)], 'b-')
if (P is not None):
pPp = P[(3:6, 3:6)]
gPp = ((gRp @ pPp) @ gRp.T)
plot_covariance_ellipse_3d(axes, origin, gPp) | def plot_pose3_on_axes(axes, pose, axis_length=0.1, P=None, scale=1):
gRp = pose.rotation().matrix()
origin = pose.translation().vector()
x_axis = (origin + (gRp[(:, 0)] * axis_length))
line = np.append(origin[np.newaxis], x_axis[np.newaxis], axis=0)
axes.plot(line[(:, 0)], line[(:, 1)], line[(:, 2)], 'r-')
y_axis = (origin + (gRp[(:, 1)] * axis_length))
line = np.append(origin[np.newaxis], y_axis[np.newaxis], axis=0)
axes.plot(line[(:, 0)], line[(:, 1)], line[(:, 2)], 'g-')
z_axis = (origin + (gRp[(:, 2)] * axis_length))
line = np.append(origin[np.newaxis], z_axis[np.newaxis], axis=0)
axes.plot(line[(:, 0)], line[(:, 1)], line[(:, 2)], 'b-')
if (P is not None):
pPp = P[(3:6, 3:6)]
gPp = ((gRp @ pPp) @ gRp.T)
plot_covariance_ellipse_3d(axes, origin, gPp)<|docstring|>Plot a 3D pose on given axis 'axes' with given 'axis_length'.<|endoftext|> |
0b74907837bd24e4c158b060bac6208460039c0673de40b18ea676a0c6f3f5ae | def plot_pose3(fignum, pose, axis_length=0.1, P=None):
"Plot a 3D pose on given figure with given 'axis_length'."
fig = plt.figure(fignum)
axes = fig.gca(projection='3d')
plot_pose3_on_axes(axes, pose, P=P, axis_length=axis_length) | Plot a 3D pose on given figure with given 'axis_length'. | cython/gtsam/utils/plot.py | plot_pose3 | berndpfrommer/gtsam | 1 | python | def plot_pose3(fignum, pose, axis_length=0.1, P=None):
fig = plt.figure(fignum)
axes = fig.gca(projection='3d')
plot_pose3_on_axes(axes, pose, P=P, axis_length=axis_length) | def plot_pose3(fignum, pose, axis_length=0.1, P=None):
fig = plt.figure(fignum)
axes = fig.gca(projection='3d')
plot_pose3_on_axes(axes, pose, P=P, axis_length=axis_length)<|docstring|>Plot a 3D pose on given figure with given 'axis_length'.<|endoftext|> |
1cca7dddf03a2cd2592c27cf5b80e007f0275fddeca244cdc2403a0d7405d2e6 | def _get_album_info(album_hash):
'\n Will obtain and return the image hashes along wit the image file types by\n requesting with the Imgur API where the user tokens are found in a local\n .ini file.\n '
config = ConfigParser()
config.read('imgur_api_info.ini')
info = config['GENERAL']
url = 'https://api.imgur.com/3/album/{}/images'.format(album_hash)
auth = 'Bearer {}'.format(info['access_token'])
imgs = requests.get(url, headers={'Authorization': auth})
return [(i['link'][(i['link'].index('imgur.com/') + len('imgur.com/')):(- 4)], i['link'][(- 4):]) for i in imgs.json()['data']] | Will obtain and return the image hashes along wit the image file types by
requesting with the Imgur API where the user tokens are found in a local
.ini file. | scripts/checks.py | _get_album_info | crumpstrr33/imgur_album_downloader | 0 | python | def _get_album_info(album_hash):
'\n Will obtain and return the image hashes along wit the image file types by\n requesting with the Imgur API where the user tokens are found in a local\n .ini file.\n '
config = ConfigParser()
config.read('imgur_api_info.ini')
info = config['GENERAL']
url = 'https://api.imgur.com/3/album/{}/images'.format(album_hash)
auth = 'Bearer {}'.format(info['access_token'])
imgs = requests.get(url, headers={'Authorization': auth})
return [(i['link'][(i['link'].index('imgur.com/') + len('imgur.com/')):(- 4)], i['link'][(- 4):]) for i in imgs.json()['data']] | def _get_album_info(album_hash):
'\n Will obtain and return the image hashes along wit the image file types by\n requesting with the Imgur API where the user tokens are found in a local\n .ini file.\n '
config = ConfigParser()
config.read('imgur_api_info.ini')
info = config['GENERAL']
url = 'https://api.imgur.com/3/album/{}/images'.format(album_hash)
auth = 'Bearer {}'.format(info['access_token'])
imgs = requests.get(url, headers={'Authorization': auth})
return [(i['link'][(i['link'].index('imgur.com/') + len('imgur.com/')):(- 4)], i['link'][(- 4):]) for i in imgs.json()['data']]<|docstring|>Will obtain and return the image hashes along wit the image file types by
requesting with the Imgur API where the user tokens are found in a local
.ini file.<|endoftext|> |
25cfd8259cb414c9b5a583774fd01a05dcc0689d9ba793bcce06753186f7c408 | def check_info(new_dir, empty_dir, img_dir, album_hash):
'\n Makes checks on the hash/options chosen/directories chosen, etc. Look at\n the docstring of check.py in app.py for more info.\n '
if (len(album_hash) > 7):
return ('wrong_len_hash', None)
if (requests.head(('https://imgur.com/a/' + album_hash)).status_code != 200):
return ('dne_album', None)
img_list = _get_album_info(album_hash)
if (not len(img_list)):
return ('zero_imgs', None)
if (not os.path.isfile(os.path.join(os.getcwd(), 'imgur_api_info.ini'))):
return ('dne_ini', None)
if new_dir:
try:
os.makedirs(img_dir)
except FileExistsError:
return ('new_dir_exists', None)
elif (not os.path.isdir(img_dir)):
return ('dne_dir', None)
elif (empty_dir and os.listdir(img_dir)):
return ('nonempty_dir', None)
return ('', img_list) | Makes checks on the hash/options chosen/directories chosen, etc. Look at
the docstring of check.py in app.py for more info. | scripts/checks.py | check_info | crumpstrr33/imgur_album_downloader | 0 | python | def check_info(new_dir, empty_dir, img_dir, album_hash):
'\n Makes checks on the hash/options chosen/directories chosen, etc. Look at\n the docstring of check.py in app.py for more info.\n '
if (len(album_hash) > 7):
return ('wrong_len_hash', None)
if (requests.head(('https://imgur.com/a/' + album_hash)).status_code != 200):
return ('dne_album', None)
img_list = _get_album_info(album_hash)
if (not len(img_list)):
return ('zero_imgs', None)
if (not os.path.isfile(os.path.join(os.getcwd(), 'imgur_api_info.ini'))):
return ('dne_ini', None)
if new_dir:
try:
os.makedirs(img_dir)
except FileExistsError:
return ('new_dir_exists', None)
elif (not os.path.isdir(img_dir)):
return ('dne_dir', None)
elif (empty_dir and os.listdir(img_dir)):
return ('nonempty_dir', None)
return (, img_list) | def check_info(new_dir, empty_dir, img_dir, album_hash):
'\n Makes checks on the hash/options chosen/directories chosen, etc. Look at\n the docstring of check.py in app.py for more info.\n '
if (len(album_hash) > 7):
return ('wrong_len_hash', None)
if (requests.head(('https://imgur.com/a/' + album_hash)).status_code != 200):
return ('dne_album', None)
img_list = _get_album_info(album_hash)
if (not len(img_list)):
return ('zero_imgs', None)
if (not os.path.isfile(os.path.join(os.getcwd(), 'imgur_api_info.ini'))):
return ('dne_ini', None)
if new_dir:
try:
os.makedirs(img_dir)
except FileExistsError:
return ('new_dir_exists', None)
elif (not os.path.isdir(img_dir)):
return ('dne_dir', None)
elif (empty_dir and os.listdir(img_dir)):
return ('nonempty_dir', None)
return (, img_list)<|docstring|>Makes checks on the hash/options chosen/directories chosen, etc. Look at
the docstring of check.py in app.py for more info.<|endoftext|> |
b3f88abc2270eb7c4676ee40e2775918824450eb73ac80a44150969f4c5b8d86 | @pytest.fixture
def mock_publish(hass):
'Initialize components.'
(yield hass.loop.run_until_complete(async_mock_mqtt_component(hass))) | Initialize components. | tests/components/vacuum/test_mqtt.py | mock_publish | stealthhacker/home-assistant | 3 | python | @pytest.fixture
def mock_publish(hass):
(yield hass.loop.run_until_complete(async_mock_mqtt_component(hass))) | @pytest.fixture
def mock_publish(hass):
(yield hass.loop.run_until_complete(async_mock_mqtt_component(hass)))<|docstring|>Initialize components.<|endoftext|> |
8df6d3dfaed182408b246b0d6a25295751947d5202dc1dd32a387e35206f32cd | async def test_default_supported_features(hass, mock_publish):
'Test that the correct supported features.'
assert (await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: default_config}))
entity = hass.states.get('vacuum.mqtttest')
entity_features = entity.attributes.get(mqttvacuum.CONF_SUPPORTED_FEATURES, 0)
assert (sorted(mqttvacuum.services_to_strings(entity_features)) == sorted(['turn_on', 'turn_off', 'stop', 'return_home', 'battery', 'status', 'clean_spot'])) | Test that the correct supported features. | tests/components/vacuum/test_mqtt.py | test_default_supported_features | stealthhacker/home-assistant | 3 | python | async def test_default_supported_features(hass, mock_publish):
assert (await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: default_config}))
entity = hass.states.get('vacuum.mqtttest')
entity_features = entity.attributes.get(mqttvacuum.CONF_SUPPORTED_FEATURES, 0)
assert (sorted(mqttvacuum.services_to_strings(entity_features)) == sorted(['turn_on', 'turn_off', 'stop', 'return_home', 'battery', 'status', 'clean_spot'])) | async def test_default_supported_features(hass, mock_publish):
assert (await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: default_config}))
entity = hass.states.get('vacuum.mqtttest')
entity_features = entity.attributes.get(mqttvacuum.CONF_SUPPORTED_FEATURES, 0)
assert (sorted(mqttvacuum.services_to_strings(entity_features)) == sorted(['turn_on', 'turn_off', 'stop', 'return_home', 'battery', 'status', 'clean_spot']))<|docstring|>Test that the correct supported features.<|endoftext|> |
31168b65b22f8c6ed9723ce3742b6a66d12ea82f3997e77497564c377cd92aa8 | async def test_all_commands(hass, mock_publish):
'Test simple commands to the vacuum.'
default_config[mqttvacuum.CONF_SUPPORTED_FEATURES] = mqttvacuum.services_to_strings(mqttvacuum.ALL_SERVICES)
assert (await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: default_config}))
common.turn_on(hass, 'vacuum.mqtttest')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
mock_publish.async_publish.assert_called_once_with('vacuum/command', 'turn_on', 0, False)
mock_publish.async_publish.reset_mock()
common.turn_off(hass, 'vacuum.mqtttest')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
mock_publish.async_publish.assert_called_once_with('vacuum/command', 'turn_off', 0, False)
mock_publish.async_publish.reset_mock()
common.stop(hass, 'vacuum.mqtttest')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
mock_publish.async_publish.assert_called_once_with('vacuum/command', 'stop', 0, False)
mock_publish.async_publish.reset_mock()
common.clean_spot(hass, 'vacuum.mqtttest')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
mock_publish.async_publish.assert_called_once_with('vacuum/command', 'clean_spot', 0, False)
mock_publish.async_publish.reset_mock()
common.locate(hass, 'vacuum.mqtttest')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
mock_publish.async_publish.assert_called_once_with('vacuum/command', 'locate', 0, False)
mock_publish.async_publish.reset_mock()
common.start_pause(hass, 'vacuum.mqtttest')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
mock_publish.async_publish.assert_called_once_with('vacuum/command', 'start_pause', 0, False)
mock_publish.async_publish.reset_mock()
common.return_to_base(hass, 'vacuum.mqtttest')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
mock_publish.async_publish.assert_called_once_with('vacuum/command', 'return_to_base', 0, False)
mock_publish.async_publish.reset_mock()
common.set_fan_speed(hass, 'high', 'vacuum.mqtttest')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
mock_publish.async_publish.assert_called_once_with('vacuum/set_fan_speed', 'high', 0, False)
mock_publish.async_publish.reset_mock()
common.send_command(hass, '44 FE 93', entity_id='vacuum.mqtttest')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
mock_publish.async_publish.assert_called_once_with('vacuum/send_command', '44 FE 93', 0, False) | Test simple commands to the vacuum. | tests/components/vacuum/test_mqtt.py | test_all_commands | stealthhacker/home-assistant | 3 | python | async def test_all_commands(hass, mock_publish):
default_config[mqttvacuum.CONF_SUPPORTED_FEATURES] = mqttvacuum.services_to_strings(mqttvacuum.ALL_SERVICES)
assert (await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: default_config}))
common.turn_on(hass, 'vacuum.mqtttest')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
mock_publish.async_publish.assert_called_once_with('vacuum/command', 'turn_on', 0, False)
mock_publish.async_publish.reset_mock()
common.turn_off(hass, 'vacuum.mqtttest')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
mock_publish.async_publish.assert_called_once_with('vacuum/command', 'turn_off', 0, False)
mock_publish.async_publish.reset_mock()
common.stop(hass, 'vacuum.mqtttest')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
mock_publish.async_publish.assert_called_once_with('vacuum/command', 'stop', 0, False)
mock_publish.async_publish.reset_mock()
common.clean_spot(hass, 'vacuum.mqtttest')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
mock_publish.async_publish.assert_called_once_with('vacuum/command', 'clean_spot', 0, False)
mock_publish.async_publish.reset_mock()
common.locate(hass, 'vacuum.mqtttest')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
mock_publish.async_publish.assert_called_once_with('vacuum/command', 'locate', 0, False)
mock_publish.async_publish.reset_mock()
common.start_pause(hass, 'vacuum.mqtttest')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
mock_publish.async_publish.assert_called_once_with('vacuum/command', 'start_pause', 0, False)
mock_publish.async_publish.reset_mock()
common.return_to_base(hass, 'vacuum.mqtttest')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
mock_publish.async_publish.assert_called_once_with('vacuum/command', 'return_to_base', 0, False)
mock_publish.async_publish.reset_mock()
common.set_fan_speed(hass, 'high', 'vacuum.mqtttest')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
mock_publish.async_publish.assert_called_once_with('vacuum/set_fan_speed', 'high', 0, False)
mock_publish.async_publish.reset_mock()
common.send_command(hass, '44 FE 93', entity_id='vacuum.mqtttest')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
mock_publish.async_publish.assert_called_once_with('vacuum/send_command', '44 FE 93', 0, False) | async def test_all_commands(hass, mock_publish):
default_config[mqttvacuum.CONF_SUPPORTED_FEATURES] = mqttvacuum.services_to_strings(mqttvacuum.ALL_SERVICES)
assert (await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: default_config}))
common.turn_on(hass, 'vacuum.mqtttest')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
mock_publish.async_publish.assert_called_once_with('vacuum/command', 'turn_on', 0, False)
mock_publish.async_publish.reset_mock()
common.turn_off(hass, 'vacuum.mqtttest')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
mock_publish.async_publish.assert_called_once_with('vacuum/command', 'turn_off', 0, False)
mock_publish.async_publish.reset_mock()
common.stop(hass, 'vacuum.mqtttest')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
mock_publish.async_publish.assert_called_once_with('vacuum/command', 'stop', 0, False)
mock_publish.async_publish.reset_mock()
common.clean_spot(hass, 'vacuum.mqtttest')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
mock_publish.async_publish.assert_called_once_with('vacuum/command', 'clean_spot', 0, False)
mock_publish.async_publish.reset_mock()
common.locate(hass, 'vacuum.mqtttest')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
mock_publish.async_publish.assert_called_once_with('vacuum/command', 'locate', 0, False)
mock_publish.async_publish.reset_mock()
common.start_pause(hass, 'vacuum.mqtttest')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
mock_publish.async_publish.assert_called_once_with('vacuum/command', 'start_pause', 0, False)
mock_publish.async_publish.reset_mock()
common.return_to_base(hass, 'vacuum.mqtttest')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
mock_publish.async_publish.assert_called_once_with('vacuum/command', 'return_to_base', 0, False)
mock_publish.async_publish.reset_mock()
common.set_fan_speed(hass, 'high', 'vacuum.mqtttest')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
mock_publish.async_publish.assert_called_once_with('vacuum/set_fan_speed', 'high', 0, False)
mock_publish.async_publish.reset_mock()
common.send_command(hass, '44 FE 93', entity_id='vacuum.mqtttest')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
mock_publish.async_publish.assert_called_once_with('vacuum/send_command', '44 FE 93', 0, False)<|docstring|>Test simple commands to the vacuum.<|endoftext|> |
fcaf938661eaac51eeecf12d8d92cec365f8fc95dc8c0e6904cceb64c52c286c | async def test_status(hass, mock_publish):
'Test status updates from the vacuum.'
default_config[mqttvacuum.CONF_SUPPORTED_FEATURES] = mqttvacuum.services_to_strings(mqttvacuum.ALL_SERVICES)
assert (await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: default_config}))
message = '{\n "battery_level": 54,\n "cleaning": true,\n "docked": false,\n "charging": false,\n "fan_speed": "max"\n }'
async_fire_mqtt_message(hass, 'vacuum/state', message)
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('vacuum.mqtttest')
assert (STATE_ON == state.state)
assert ('mdi:battery-50' == state.attributes.get(ATTR_BATTERY_ICON))
assert (54 == state.attributes.get(ATTR_BATTERY_LEVEL))
assert ('max' == state.attributes.get(ATTR_FAN_SPEED))
message = '{\n "battery_level": 61,\n "docked": true,\n "cleaning": false,\n "charging": true,\n "fan_speed": "min"\n }'
async_fire_mqtt_message(hass, 'vacuum/state', message)
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('vacuum.mqtttest')
assert (STATE_OFF == state.state)
assert ('mdi:battery-charging-60' == state.attributes.get(ATTR_BATTERY_ICON))
assert (61 == state.attributes.get(ATTR_BATTERY_LEVEL))
assert ('min' == state.attributes.get(ATTR_FAN_SPEED)) | Test status updates from the vacuum. | tests/components/vacuum/test_mqtt.py | test_status | stealthhacker/home-assistant | 3 | python | async def test_status(hass, mock_publish):
default_config[mqttvacuum.CONF_SUPPORTED_FEATURES] = mqttvacuum.services_to_strings(mqttvacuum.ALL_SERVICES)
assert (await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: default_config}))
message = '{\n "battery_level": 54,\n "cleaning": true,\n "docked": false,\n "charging": false,\n "fan_speed": "max"\n }'
async_fire_mqtt_message(hass, 'vacuum/state', message)
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('vacuum.mqtttest')
assert (STATE_ON == state.state)
assert ('mdi:battery-50' == state.attributes.get(ATTR_BATTERY_ICON))
assert (54 == state.attributes.get(ATTR_BATTERY_LEVEL))
assert ('max' == state.attributes.get(ATTR_FAN_SPEED))
message = '{\n "battery_level": 61,\n "docked": true,\n "cleaning": false,\n "charging": true,\n "fan_speed": "min"\n }'
async_fire_mqtt_message(hass, 'vacuum/state', message)
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('vacuum.mqtttest')
assert (STATE_OFF == state.state)
assert ('mdi:battery-charging-60' == state.attributes.get(ATTR_BATTERY_ICON))
assert (61 == state.attributes.get(ATTR_BATTERY_LEVEL))
assert ('min' == state.attributes.get(ATTR_FAN_SPEED)) | async def test_status(hass, mock_publish):
default_config[mqttvacuum.CONF_SUPPORTED_FEATURES] = mqttvacuum.services_to_strings(mqttvacuum.ALL_SERVICES)
assert (await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: default_config}))
message = '{\n "battery_level": 54,\n "cleaning": true,\n "docked": false,\n "charging": false,\n "fan_speed": "max"\n }'
async_fire_mqtt_message(hass, 'vacuum/state', message)
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('vacuum.mqtttest')
assert (STATE_ON == state.state)
assert ('mdi:battery-50' == state.attributes.get(ATTR_BATTERY_ICON))
assert (54 == state.attributes.get(ATTR_BATTERY_LEVEL))
assert ('max' == state.attributes.get(ATTR_FAN_SPEED))
message = '{\n "battery_level": 61,\n "docked": true,\n "cleaning": false,\n "charging": true,\n "fan_speed": "min"\n }'
async_fire_mqtt_message(hass, 'vacuum/state', message)
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('vacuum.mqtttest')
assert (STATE_OFF == state.state)
assert ('mdi:battery-charging-60' == state.attributes.get(ATTR_BATTERY_ICON))
assert (61 == state.attributes.get(ATTR_BATTERY_LEVEL))
assert ('min' == state.attributes.get(ATTR_FAN_SPEED))<|docstring|>Test status updates from the vacuum.<|endoftext|> |
76e1d7c804cf068f6fbf97ed58457c0905d1a81612cfc6c0c39291048527bd83 | async def test_battery_template(hass, mock_publish):
'Test that you can use non-default templates for battery_level.'
default_config.update({mqttvacuum.CONF_SUPPORTED_FEATURES: mqttvacuum.services_to_strings(mqttvacuum.ALL_SERVICES), mqttvacuum.CONF_BATTERY_LEVEL_TOPIC: 'retroroomba/battery_level', mqttvacuum.CONF_BATTERY_LEVEL_TEMPLATE: '{{ value }}'})
assert (await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: default_config}))
async_fire_mqtt_message(hass, 'retroroomba/battery_level', '54')
(await hass.async_block_till_done())
state = hass.states.get('vacuum.mqtttest')
assert (54 == state.attributes.get(ATTR_BATTERY_LEVEL))
assert (state.attributes.get(ATTR_BATTERY_ICON) == 'mdi:battery-50') | Test that you can use non-default templates for battery_level. | tests/components/vacuum/test_mqtt.py | test_battery_template | stealthhacker/home-assistant | 3 | python | async def test_battery_template(hass, mock_publish):
default_config.update({mqttvacuum.CONF_SUPPORTED_FEATURES: mqttvacuum.services_to_strings(mqttvacuum.ALL_SERVICES), mqttvacuum.CONF_BATTERY_LEVEL_TOPIC: 'retroroomba/battery_level', mqttvacuum.CONF_BATTERY_LEVEL_TEMPLATE: '{{ value }}'})
assert (await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: default_config}))
async_fire_mqtt_message(hass, 'retroroomba/battery_level', '54')
(await hass.async_block_till_done())
state = hass.states.get('vacuum.mqtttest')
assert (54 == state.attributes.get(ATTR_BATTERY_LEVEL))
assert (state.attributes.get(ATTR_BATTERY_ICON) == 'mdi:battery-50') | async def test_battery_template(hass, mock_publish):
default_config.update({mqttvacuum.CONF_SUPPORTED_FEATURES: mqttvacuum.services_to_strings(mqttvacuum.ALL_SERVICES), mqttvacuum.CONF_BATTERY_LEVEL_TOPIC: 'retroroomba/battery_level', mqttvacuum.CONF_BATTERY_LEVEL_TEMPLATE: '{{ value }}'})
assert (await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: default_config}))
async_fire_mqtt_message(hass, 'retroroomba/battery_level', '54')
(await hass.async_block_till_done())
state = hass.states.get('vacuum.mqtttest')
assert (54 == state.attributes.get(ATTR_BATTERY_LEVEL))
assert (state.attributes.get(ATTR_BATTERY_ICON) == 'mdi:battery-50')<|docstring|>Test that you can use non-default templates for battery_level.<|endoftext|> |
d65092d71dd060814dbfdbd10da1e52abae138f7e2255005d496922fe0cc1f07 | async def test_status_invalid_json(hass, mock_publish):
'Test to make sure nothing breaks if the vacuum sends bad JSON.'
default_config[mqttvacuum.CONF_SUPPORTED_FEATURES] = mqttvacuum.services_to_strings(mqttvacuum.ALL_SERVICES)
assert (await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: default_config}))
async_fire_mqtt_message(hass, 'vacuum/state', '{"asdfasas false}')
(await hass.async_block_till_done())
state = hass.states.get('vacuum.mqtttest')
assert (STATE_OFF == state.state)
assert ('Stopped' == state.attributes.get(ATTR_STATUS)) | Test to make sure nothing breaks if the vacuum sends bad JSON. | tests/components/vacuum/test_mqtt.py | test_status_invalid_json | stealthhacker/home-assistant | 3 | python | async def test_status_invalid_json(hass, mock_publish):
default_config[mqttvacuum.CONF_SUPPORTED_FEATURES] = mqttvacuum.services_to_strings(mqttvacuum.ALL_SERVICES)
assert (await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: default_config}))
async_fire_mqtt_message(hass, 'vacuum/state', '{"asdfasas false}')
(await hass.async_block_till_done())
state = hass.states.get('vacuum.mqtttest')
assert (STATE_OFF == state.state)
assert ('Stopped' == state.attributes.get(ATTR_STATUS)) | async def test_status_invalid_json(hass, mock_publish):
default_config[mqttvacuum.CONF_SUPPORTED_FEATURES] = mqttvacuum.services_to_strings(mqttvacuum.ALL_SERVICES)
assert (await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: default_config}))
async_fire_mqtt_message(hass, 'vacuum/state', '{"asdfasas false}')
(await hass.async_block_till_done())
state = hass.states.get('vacuum.mqtttest')
assert (STATE_OFF == state.state)
assert ('Stopped' == state.attributes.get(ATTR_STATUS))<|docstring|>Test to make sure nothing breaks if the vacuum sends bad JSON.<|endoftext|> |
9f63732cda4354a4e7a0ed6114d8ae7290c5078fa120ec6c237ab43d9fd97e9c | async def test_default_availability_payload(hass, mock_publish):
'Test availability by default payload with defined topic.'
default_config.update({'availability_topic': 'availability-topic'})
assert (await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: default_config}))
state = hass.states.get('vacuum.mqtttest')
assert (STATE_UNAVAILABLE == state.state)
async_fire_mqtt_message(hass, 'availability-topic', 'online')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('vacuum.mqtttest')
assert (STATE_UNAVAILABLE != state.state)
async_fire_mqtt_message(hass, 'availability-topic', 'offline')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('vacuum.mqtttest')
assert (STATE_UNAVAILABLE == state.state) | Test availability by default payload with defined topic. | tests/components/vacuum/test_mqtt.py | test_default_availability_payload | stealthhacker/home-assistant | 3 | python | async def test_default_availability_payload(hass, mock_publish):
default_config.update({'availability_topic': 'availability-topic'})
assert (await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: default_config}))
state = hass.states.get('vacuum.mqtttest')
assert (STATE_UNAVAILABLE == state.state)
async_fire_mqtt_message(hass, 'availability-topic', 'online')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('vacuum.mqtttest')
assert (STATE_UNAVAILABLE != state.state)
async_fire_mqtt_message(hass, 'availability-topic', 'offline')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('vacuum.mqtttest')
assert (STATE_UNAVAILABLE == state.state) | async def test_default_availability_payload(hass, mock_publish):
default_config.update({'availability_topic': 'availability-topic'})
assert (await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: default_config}))
state = hass.states.get('vacuum.mqtttest')
assert (STATE_UNAVAILABLE == state.state)
async_fire_mqtt_message(hass, 'availability-topic', 'online')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('vacuum.mqtttest')
assert (STATE_UNAVAILABLE != state.state)
async_fire_mqtt_message(hass, 'availability-topic', 'offline')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('vacuum.mqtttest')
assert (STATE_UNAVAILABLE == state.state)<|docstring|>Test availability by default payload with defined topic.<|endoftext|> |
4f9ce7748c4c7b0e85298ad427b39f0bd2b9c710d1790156b39e666b9ff9f32a | async def test_custom_availability_payload(hass, mock_publish):
'Test availability by custom payload with defined topic.'
default_config.update({'availability_topic': 'availability-topic', 'payload_available': 'good', 'payload_not_available': 'nogood'})
assert (await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: default_config}))
state = hass.states.get('vacuum.mqtttest')
assert (STATE_UNAVAILABLE == state.state)
async_fire_mqtt_message(hass, 'availability-topic', 'good')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('vacuum.mqtttest')
assert (STATE_UNAVAILABLE != state.state)
async_fire_mqtt_message(hass, 'availability-topic', 'nogood')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('vacuum.mqtttest')
assert (STATE_UNAVAILABLE == state.state) | Test availability by custom payload with defined topic. | tests/components/vacuum/test_mqtt.py | test_custom_availability_payload | stealthhacker/home-assistant | 3 | python | async def test_custom_availability_payload(hass, mock_publish):
default_config.update({'availability_topic': 'availability-topic', 'payload_available': 'good', 'payload_not_available': 'nogood'})
assert (await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: default_config}))
state = hass.states.get('vacuum.mqtttest')
assert (STATE_UNAVAILABLE == state.state)
async_fire_mqtt_message(hass, 'availability-topic', 'good')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('vacuum.mqtttest')
assert (STATE_UNAVAILABLE != state.state)
async_fire_mqtt_message(hass, 'availability-topic', 'nogood')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('vacuum.mqtttest')
assert (STATE_UNAVAILABLE == state.state) | async def test_custom_availability_payload(hass, mock_publish):
default_config.update({'availability_topic': 'availability-topic', 'payload_available': 'good', 'payload_not_available': 'nogood'})
assert (await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: default_config}))
state = hass.states.get('vacuum.mqtttest')
assert (STATE_UNAVAILABLE == state.state)
async_fire_mqtt_message(hass, 'availability-topic', 'good')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('vacuum.mqtttest')
assert (STATE_UNAVAILABLE != state.state)
async_fire_mqtt_message(hass, 'availability-topic', 'nogood')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('vacuum.mqtttest')
assert (STATE_UNAVAILABLE == state.state)<|docstring|>Test availability by custom payload with defined topic.<|endoftext|> |
1e72b20f46f6cac488bef5058ed78753aaaf8afaa8705a9407e3f2dbc994968e | async def test_discovery_removal_vacuum(hass, mock_publish):
'Test removal of discovered vacuum.'
entry = MockConfigEntry(domain=mqtt.DOMAIN)
(await async_start(hass, 'homeassistant', {}, entry))
data = '{ "name": "Beer", "command_topic": "test_topic" }'
async_fire_mqtt_message(hass, 'homeassistant/vacuum/bla/config', data)
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('vacuum.beer')
assert (state is not None)
assert (state.name == 'Beer')
async_fire_mqtt_message(hass, 'homeassistant/vacuum/bla/config', '')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('vacuum.beer')
assert (state is None) | Test removal of discovered vacuum. | tests/components/vacuum/test_mqtt.py | test_discovery_removal_vacuum | stealthhacker/home-assistant | 3 | python | async def test_discovery_removal_vacuum(hass, mock_publish):
entry = MockConfigEntry(domain=mqtt.DOMAIN)
(await async_start(hass, 'homeassistant', {}, entry))
data = '{ "name": "Beer", "command_topic": "test_topic" }'
async_fire_mqtt_message(hass, 'homeassistant/vacuum/bla/config', data)
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('vacuum.beer')
assert (state is not None)
assert (state.name == 'Beer')
async_fire_mqtt_message(hass, 'homeassistant/vacuum/bla/config', )
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('vacuum.beer')
assert (state is None) | async def test_discovery_removal_vacuum(hass, mock_publish):
entry = MockConfigEntry(domain=mqtt.DOMAIN)
(await async_start(hass, 'homeassistant', {}, entry))
data = '{ "name": "Beer", "command_topic": "test_topic" }'
async_fire_mqtt_message(hass, 'homeassistant/vacuum/bla/config', data)
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('vacuum.beer')
assert (state is not None)
assert (state.name == 'Beer')
async_fire_mqtt_message(hass, 'homeassistant/vacuum/bla/config', )
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('vacuum.beer')
assert (state is None)<|docstring|>Test removal of discovered vacuum.<|endoftext|> |
b514c791659cfb5b66baa78a29967f9895925e0227da5e644a8c5017e232104c | async def test_discovery_update_vacuum(hass, mock_publish):
'Test update of discovered vacuum.'
entry = MockConfigEntry(domain=mqtt.DOMAIN)
(await async_start(hass, 'homeassistant', {}, entry))
data1 = '{ "name": "Beer", "command_topic": "test_topic" }'
data2 = '{ "name": "Milk", "command_topic": "test_topic" }'
async_fire_mqtt_message(hass, 'homeassistant/vacuum/bla/config', data1)
(await hass.async_block_till_done())
state = hass.states.get('vacuum.beer')
assert (state is not None)
assert (state.name == 'Beer')
async_fire_mqtt_message(hass, 'homeassistant/vacuum/bla/config', data2)
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('vacuum.beer')
assert (state is not None)
assert (state.name == 'Milk')
state = hass.states.get('vacuum.milk')
assert (state is None) | Test update of discovered vacuum. | tests/components/vacuum/test_mqtt.py | test_discovery_update_vacuum | stealthhacker/home-assistant | 3 | python | async def test_discovery_update_vacuum(hass, mock_publish):
entry = MockConfigEntry(domain=mqtt.DOMAIN)
(await async_start(hass, 'homeassistant', {}, entry))
data1 = '{ "name": "Beer", "command_topic": "test_topic" }'
data2 = '{ "name": "Milk", "command_topic": "test_topic" }'
async_fire_mqtt_message(hass, 'homeassistant/vacuum/bla/config', data1)
(await hass.async_block_till_done())
state = hass.states.get('vacuum.beer')
assert (state is not None)
assert (state.name == 'Beer')
async_fire_mqtt_message(hass, 'homeassistant/vacuum/bla/config', data2)
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('vacuum.beer')
assert (state is not None)
assert (state.name == 'Milk')
state = hass.states.get('vacuum.milk')
assert (state is None) | async def test_discovery_update_vacuum(hass, mock_publish):
entry = MockConfigEntry(domain=mqtt.DOMAIN)
(await async_start(hass, 'homeassistant', {}, entry))
data1 = '{ "name": "Beer", "command_topic": "test_topic" }'
data2 = '{ "name": "Milk", "command_topic": "test_topic" }'
async_fire_mqtt_message(hass, 'homeassistant/vacuum/bla/config', data1)
(await hass.async_block_till_done())
state = hass.states.get('vacuum.beer')
assert (state is not None)
assert (state.name == 'Beer')
async_fire_mqtt_message(hass, 'homeassistant/vacuum/bla/config', data2)
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('vacuum.beer')
assert (state is not None)
assert (state.name == 'Milk')
state = hass.states.get('vacuum.milk')
assert (state is None)<|docstring|>Test update of discovered vacuum.<|endoftext|> |
ddacfa2c98ae55c89199cb518c1cd5431993a4609320e5ffce0a99e293892089 | async def test_unique_id(hass, mock_publish):
'Test unique id option only creates one vacuum per unique_id.'
(await async_mock_mqtt_component(hass))
assert (await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: [{'platform': 'mqtt', 'name': 'Test 1', 'command_topic': 'command-topic', 'unique_id': 'TOTALLY_UNIQUE'}, {'platform': 'mqtt', 'name': 'Test 2', 'command_topic': 'command-topic', 'unique_id': 'TOTALLY_UNIQUE'}]}))
async_fire_mqtt_message(hass, 'test-topic', 'payload')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
assert (len(hass.states.async_entity_ids()) == 2) | Test unique id option only creates one vacuum per unique_id. | tests/components/vacuum/test_mqtt.py | test_unique_id | stealthhacker/home-assistant | 3 | python | async def test_unique_id(hass, mock_publish):
(await async_mock_mqtt_component(hass))
assert (await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: [{'platform': 'mqtt', 'name': 'Test 1', 'command_topic': 'command-topic', 'unique_id': 'TOTALLY_UNIQUE'}, {'platform': 'mqtt', 'name': 'Test 2', 'command_topic': 'command-topic', 'unique_id': 'TOTALLY_UNIQUE'}]}))
async_fire_mqtt_message(hass, 'test-topic', 'payload')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
assert (len(hass.states.async_entity_ids()) == 2) | async def test_unique_id(hass, mock_publish):
(await async_mock_mqtt_component(hass))
assert (await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: [{'platform': 'mqtt', 'name': 'Test 1', 'command_topic': 'command-topic', 'unique_id': 'TOTALLY_UNIQUE'}, {'platform': 'mqtt', 'name': 'Test 2', 'command_topic': 'command-topic', 'unique_id': 'TOTALLY_UNIQUE'}]}))
async_fire_mqtt_message(hass, 'test-topic', 'payload')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
assert (len(hass.states.async_entity_ids()) == 2)<|docstring|>Test unique id option only creates one vacuum per unique_id.<|endoftext|> |
4f2bcba99268b8d63fa094fde7a4a86714113bd0654a286f18915be4aefd112d | async def test_entity_device_info_with_identifier(hass, mock_publish):
'Test MQTT vacuum device registry integration.'
entry = MockConfigEntry(domain=mqtt.DOMAIN)
entry.add_to_hass(hass)
(await async_start(hass, 'homeassistant', {}, entry))
registry = (await hass.helpers.device_registry.async_get_registry())
data = json.dumps({'platform': 'mqtt', 'name': 'Test 1', 'command_topic': 'test-command-topic', 'device': {'identifiers': ['helloworld'], 'connections': [['mac', '02:5b:26:a8:dc:12']], 'manufacturer': 'Whatever', 'name': 'Beer', 'model': 'Glass', 'sw_version': '0.1-beta'}, 'unique_id': 'veryunique'})
async_fire_mqtt_message(hass, 'homeassistant/vacuum/bla/config', data)
(await hass.async_block_till_done())
(await hass.async_block_till_done())
device = registry.async_get_device({('mqtt', 'helloworld')}, set())
assert (device is not None)
assert (device.identifiers == {('mqtt', 'helloworld')})
assert (device.connections == {('mac', '02:5b:26:a8:dc:12')})
assert (device.manufacturer == 'Whatever')
assert (device.name == 'Beer')
assert (device.model == 'Glass')
assert (device.sw_version == '0.1-beta') | Test MQTT vacuum device registry integration. | tests/components/vacuum/test_mqtt.py | test_entity_device_info_with_identifier | stealthhacker/home-assistant | 3 | python | async def test_entity_device_info_with_identifier(hass, mock_publish):
entry = MockConfigEntry(domain=mqtt.DOMAIN)
entry.add_to_hass(hass)
(await async_start(hass, 'homeassistant', {}, entry))
registry = (await hass.helpers.device_registry.async_get_registry())
data = json.dumps({'platform': 'mqtt', 'name': 'Test 1', 'command_topic': 'test-command-topic', 'device': {'identifiers': ['helloworld'], 'connections': [['mac', '02:5b:26:a8:dc:12']], 'manufacturer': 'Whatever', 'name': 'Beer', 'model': 'Glass', 'sw_version': '0.1-beta'}, 'unique_id': 'veryunique'})
async_fire_mqtt_message(hass, 'homeassistant/vacuum/bla/config', data)
(await hass.async_block_till_done())
(await hass.async_block_till_done())
device = registry.async_get_device({('mqtt', 'helloworld')}, set())
assert (device is not None)
assert (device.identifiers == {('mqtt', 'helloworld')})
assert (device.connections == {('mac', '02:5b:26:a8:dc:12')})
assert (device.manufacturer == 'Whatever')
assert (device.name == 'Beer')
assert (device.model == 'Glass')
assert (device.sw_version == '0.1-beta') | async def test_entity_device_info_with_identifier(hass, mock_publish):
entry = MockConfigEntry(domain=mqtt.DOMAIN)
entry.add_to_hass(hass)
(await async_start(hass, 'homeassistant', {}, entry))
registry = (await hass.helpers.device_registry.async_get_registry())
data = json.dumps({'platform': 'mqtt', 'name': 'Test 1', 'command_topic': 'test-command-topic', 'device': {'identifiers': ['helloworld'], 'connections': [['mac', '02:5b:26:a8:dc:12']], 'manufacturer': 'Whatever', 'name': 'Beer', 'model': 'Glass', 'sw_version': '0.1-beta'}, 'unique_id': 'veryunique'})
async_fire_mqtt_message(hass, 'homeassistant/vacuum/bla/config', data)
(await hass.async_block_till_done())
(await hass.async_block_till_done())
device = registry.async_get_device({('mqtt', 'helloworld')}, set())
assert (device is not None)
assert (device.identifiers == {('mqtt', 'helloworld')})
assert (device.connections == {('mac', '02:5b:26:a8:dc:12')})
assert (device.manufacturer == 'Whatever')
assert (device.name == 'Beer')
assert (device.model == 'Glass')
assert (device.sw_version == '0.1-beta')<|docstring|>Test MQTT vacuum device registry integration.<|endoftext|> |
08d60ce6138c1e3a1edd156d9ca5a8190e38097006ed3b7c865ef81c87493f21 | def validate_schema(schema: GraphQLSchema) -> List[GraphQLError]:
'Validate a GraphQL schema.\n\n Implements the "Type Validation" sub-sections of the specification\'s "Type System"\n section.\n\n Validation runs synchronously, returning a list of encountered errors, or an empty\n list if no errors were encountered and the Schema is valid.\n '
assert_schema(schema)
errors = schema._validation_errors
if (errors is None):
context = SchemaValidationContext(schema)
context.validate_root_types()
context.validate_directives()
context.validate_types()
errors = context.errors
schema._validation_errors = errors
return errors | Validate a GraphQL schema.
Implements the "Type Validation" sub-sections of the specification's "Type System"
section.
Validation runs synchronously, returning a list of encountered errors, or an empty
list if no errors were encountered and the Schema is valid. | src/graphql/type/validate.py | validate_schema | wuyuanyi135/graphql-core | 1 | python | def validate_schema(schema: GraphQLSchema) -> List[GraphQLError]:
'Validate a GraphQL schema.\n\n Implements the "Type Validation" sub-sections of the specification\'s "Type System"\n section.\n\n Validation runs synchronously, returning a list of encountered errors, or an empty\n list if no errors were encountered and the Schema is valid.\n '
assert_schema(schema)
errors = schema._validation_errors
if (errors is None):
context = SchemaValidationContext(schema)
context.validate_root_types()
context.validate_directives()
context.validate_types()
errors = context.errors
schema._validation_errors = errors
return errors | def validate_schema(schema: GraphQLSchema) -> List[GraphQLError]:
'Validate a GraphQL schema.\n\n Implements the "Type Validation" sub-sections of the specification\'s "Type System"\n section.\n\n Validation runs synchronously, returning a list of encountered errors, or an empty\n list if no errors were encountered and the Schema is valid.\n '
assert_schema(schema)
errors = schema._validation_errors
if (errors is None):
context = SchemaValidationContext(schema)
context.validate_root_types()
context.validate_directives()
context.validate_types()
errors = context.errors
schema._validation_errors = errors
return errors<|docstring|>Validate a GraphQL schema.
Implements the "Type Validation" sub-sections of the specification's "Type System"
section.
Validation runs synchronously, returning a list of encountered errors, or an empty
list if no errors were encountered and the Schema is valid.<|endoftext|> |
bf09ac97ae745699479c34304ada6e37cd7a4a2ef9af5971e65910971c25319a | def assert_valid_schema(schema: GraphQLSchema) -> None:
'Utility function which asserts a schema is valid.\n\n Throws a TypeError if the schema is invalid.\n '
errors = validate_schema(schema)
if errors:
raise TypeError('\n\n'.join((error.message for error in errors))) | Utility function which asserts a schema is valid.
Throws a TypeError if the schema is invalid. | src/graphql/type/validate.py | assert_valid_schema | wuyuanyi135/graphql-core | 1 | python | def assert_valid_schema(schema: GraphQLSchema) -> None:
'Utility function which asserts a schema is valid.\n\n Throws a TypeError if the schema is invalid.\n '
errors = validate_schema(schema)
if errors:
raise TypeError('\n\n'.join((error.message for error in errors))) | def assert_valid_schema(schema: GraphQLSchema) -> None:
'Utility function which asserts a schema is valid.\n\n Throws a TypeError if the schema is invalid.\n '
errors = validate_schema(schema)
if errors:
raise TypeError('\n\n'.join((error.message for error in errors)))<|docstring|>Utility function which asserts a schema is valid.
Throws a TypeError if the schema is invalid.<|endoftext|> |
a046009f8bf54be25bfdd2047430f3af3d62ecca96dc89febe7942cb99883623 | def __call__(self, input_obj: GraphQLInputObjectType) -> None:
'Detect cycles recursively.'
name = input_obj.name
if (name in self.visited_types):
return
self.visited_types.add(name)
self.field_path_index_by_type_name[name] = len(self.field_path)
for (field_name, field) in input_obj.fields.items():
if (is_non_null_type(field.type) and is_input_object_type(field.type.of_type)):
field_type = cast(GraphQLInputObjectType, field.type.of_type)
cycle_index = self.field_path_index_by_type_name.get(field_type.name)
self.field_path.append((field_name, field))
if (cycle_index is None):
self(field_type)
else:
cycle_path = self.field_path[cycle_index:]
field_names = map(itemgetter(0), cycle_path)
self.context.report_error(f"Cannot reference Input Object '{field_type.name}' within itself through a series of non-null fields: '{'.'.join(field_names)}'.", cast(Collection[Node], map(attrgetter('ast_node'), map(itemgetter(1), cycle_path))))
self.field_path.pop()
del self.field_path_index_by_type_name[name] | Detect cycles recursively. | src/graphql/type/validate.py | __call__ | wuyuanyi135/graphql-core | 1 | python | def __call__(self, input_obj: GraphQLInputObjectType) -> None:
name = input_obj.name
if (name in self.visited_types):
return
self.visited_types.add(name)
self.field_path_index_by_type_name[name] = len(self.field_path)
for (field_name, field) in input_obj.fields.items():
if (is_non_null_type(field.type) and is_input_object_type(field.type.of_type)):
field_type = cast(GraphQLInputObjectType, field.type.of_type)
cycle_index = self.field_path_index_by_type_name.get(field_type.name)
self.field_path.append((field_name, field))
if (cycle_index is None):
self(field_type)
else:
cycle_path = self.field_path[cycle_index:]
field_names = map(itemgetter(0), cycle_path)
self.context.report_error(f"Cannot reference Input Object '{field_type.name}' within itself through a series of non-null fields: '{'.'.join(field_names)}'.", cast(Collection[Node], map(attrgetter('ast_node'), map(itemgetter(1), cycle_path))))
self.field_path.pop()
del self.field_path_index_by_type_name[name] | def __call__(self, input_obj: GraphQLInputObjectType) -> None:
name = input_obj.name
if (name in self.visited_types):
return
self.visited_types.add(name)
self.field_path_index_by_type_name[name] = len(self.field_path)
for (field_name, field) in input_obj.fields.items():
if (is_non_null_type(field.type) and is_input_object_type(field.type.of_type)):
field_type = cast(GraphQLInputObjectType, field.type.of_type)
cycle_index = self.field_path_index_by_type_name.get(field_type.name)
self.field_path.append((field_name, field))
if (cycle_index is None):
self(field_type)
else:
cycle_path = self.field_path[cycle_index:]
field_names = map(itemgetter(0), cycle_path)
self.context.report_error(f"Cannot reference Input Object '{field_type.name}' within itself through a series of non-null fields: '{'.'.join(field_names)}'.", cast(Collection[Node], map(attrgetter('ast_node'), map(itemgetter(1), cycle_path))))
self.field_path.pop()
del self.field_path_index_by_type_name[name]<|docstring|>Detect cycles recursively.<|endoftext|> |
7987e3c6f87c3e3ebcdd74f69e0a03f9b459c70255fcd992e4740c496a3651d3 | def dump_bins(df_bins: pd.DataFrame, contig_fasta: Path, operating_dir: Path):
'\n Dump bins to a set of fasta files, each containing contigs of that bin.\n\n :param df_bins: Binning result dataset with BIN and CONTIG_NAME columns.\n :param contig_fasta: Contig file used for feature generation.\n :param operating_dir: Directory to dump bins.\n '
num_clusters: List[int] = df_bins['BIN'].unique()
bin_assignments: Dict[(str, int)] = df_bins.set_index('CONTIG_NAME').T.to_dict('records')[0]
bin_fasta_files = {i: open((operating_dir / f'bin_{i}.fasta'), 'w') for i in num_clusters}
try:
with open(contig_fasta, mode='r') as fr:
for record in SeqIO.parse(fr, 'fasta'):
identifier = str(record.id)
if (identifier in bin_assignments):
assigned_bin = bin_assignments[identifier]
SeqIO.write(record, bin_fasta_files[assigned_bin], 'fasta')
finally:
for file in bin_fasta_files.values():
file.close() | Dump bins to a set of fasta files, each containing contigs of that bin.
:param df_bins: Binning result dataset with BIN and CONTIG_NAME columns.
:param contig_fasta: Contig file used for feature generation.
:param operating_dir: Directory to dump bins. | ch_bin/core/clustering/dump_bins.py | dump_bins | kdsuneraavinash/CH-Bin | 0 | python | def dump_bins(df_bins: pd.DataFrame, contig_fasta: Path, operating_dir: Path):
'\n Dump bins to a set of fasta files, each containing contigs of that bin.\n\n :param df_bins: Binning result dataset with BIN and CONTIG_NAME columns.\n :param contig_fasta: Contig file used for feature generation.\n :param operating_dir: Directory to dump bins.\n '
num_clusters: List[int] = df_bins['BIN'].unique()
bin_assignments: Dict[(str, int)] = df_bins.set_index('CONTIG_NAME').T.to_dict('records')[0]
bin_fasta_files = {i: open((operating_dir / f'bin_{i}.fasta'), 'w') for i in num_clusters}
try:
with open(contig_fasta, mode='r') as fr:
for record in SeqIO.parse(fr, 'fasta'):
identifier = str(record.id)
if (identifier in bin_assignments):
assigned_bin = bin_assignments[identifier]
SeqIO.write(record, bin_fasta_files[assigned_bin], 'fasta')
finally:
for file in bin_fasta_files.values():
file.close() | def dump_bins(df_bins: pd.DataFrame, contig_fasta: Path, operating_dir: Path):
'\n Dump bins to a set of fasta files, each containing contigs of that bin.\n\n :param df_bins: Binning result dataset with BIN and CONTIG_NAME columns.\n :param contig_fasta: Contig file used for feature generation.\n :param operating_dir: Directory to dump bins.\n '
num_clusters: List[int] = df_bins['BIN'].unique()
bin_assignments: Dict[(str, int)] = df_bins.set_index('CONTIG_NAME').T.to_dict('records')[0]
bin_fasta_files = {i: open((operating_dir / f'bin_{i}.fasta'), 'w') for i in num_clusters}
try:
with open(contig_fasta, mode='r') as fr:
for record in SeqIO.parse(fr, 'fasta'):
identifier = str(record.id)
if (identifier in bin_assignments):
assigned_bin = bin_assignments[identifier]
SeqIO.write(record, bin_fasta_files[assigned_bin], 'fasta')
finally:
for file in bin_fasta_files.values():
file.close()<|docstring|>Dump bins to a set of fasta files, each containing contigs of that bin.
:param df_bins: Binning result dataset with BIN and CONTIG_NAME columns.
:param contig_fasta: Contig file used for feature generation.
:param operating_dir: Directory to dump bins.<|endoftext|> |
0e78e20e8e52c64ff802301c5fa749e2fe8e8ecffd26e20caba9bf1ecece7807 | def check_1d(inp):
"\n Check input to be a vector. Converts lists to np.ndarray.\n\n Parameters\n ----------\n inp : obj\n Input vector\n\n Returns\n -------\n numpy.ndarray or None\n Input vector or None\n\n Examples\n --------\n >>> check_1d([0, 1, 2, 3])\n [0, 1, 2, 3]\n\n >>> check_1d('test')\n None\n\n "
if isinstance(inp, list):
return check_1d(np.array(inp))
if isinstance(inp, np.ndarray):
if (inp.ndim == 1):
return inp | Check input to be a vector. Converts lists to np.ndarray.
Parameters
----------
inp : obj
Input vector
Returns
-------
numpy.ndarray or None
Input vector or None
Examples
--------
>>> check_1d([0, 1, 2, 3])
[0, 1, 2, 3]
>>> check_1d('test')
None | netlsd/util.py | check_1d | xgfs/NetLSD | 49 | python | def check_1d(inp):
"\n Check input to be a vector. Converts lists to np.ndarray.\n\n Parameters\n ----------\n inp : obj\n Input vector\n\n Returns\n -------\n numpy.ndarray or None\n Input vector or None\n\n Examples\n --------\n >>> check_1d([0, 1, 2, 3])\n [0, 1, 2, 3]\n\n >>> check_1d('test')\n None\n\n "
if isinstance(inp, list):
return check_1d(np.array(inp))
if isinstance(inp, np.ndarray):
if (inp.ndim == 1):
return inp | def check_1d(inp):
"\n Check input to be a vector. Converts lists to np.ndarray.\n\n Parameters\n ----------\n inp : obj\n Input vector\n\n Returns\n -------\n numpy.ndarray or None\n Input vector or None\n\n Examples\n --------\n >>> check_1d([0, 1, 2, 3])\n [0, 1, 2, 3]\n\n >>> check_1d('test')\n None\n\n "
if isinstance(inp, list):
return check_1d(np.array(inp))
if isinstance(inp, np.ndarray):
if (inp.ndim == 1):
return inp<|docstring|>Check input to be a vector. Converts lists to np.ndarray.
Parameters
----------
inp : obj
Input vector
Returns
-------
numpy.ndarray or None
Input vector or None
Examples
--------
>>> check_1d([0, 1, 2, 3])
[0, 1, 2, 3]
>>> check_1d('test')
None<|endoftext|> |
13bc6472ed5c6705457a16f5403026680ebff23e2ce578955527fe7db4cde7d9 | def check_2d(inp):
"\n Check input to be a matrix. Converts lists of lists to np.ndarray.\n\n Also allows the input to be a scipy sparse matrix.\n \n Parameters\n ----------\n inp : obj\n Input matrix\n\n Returns\n -------\n numpy.ndarray, scipy.sparse or None\n Input matrix or None\n\n Examples\n --------\n >>> check_2d([[0, 1], [2, 3]])\n [[0, 1], [2, 3]]\n\n >>> check_2d('test')\n None\n\n "
if isinstance(inp, list):
return check_2d(np.array(inp))
if isinstance(inp, (np.ndarray, np.matrixlib.defmatrix.matrix)):
if (inp.ndim == 2):
return inp
if sps.issparse(inp):
if (inp.ndim == 2):
return inp | Check input to be a matrix. Converts lists of lists to np.ndarray.
Also allows the input to be a scipy sparse matrix.
Parameters
----------
inp : obj
Input matrix
Returns
-------
numpy.ndarray, scipy.sparse or None
Input matrix or None
Examples
--------
>>> check_2d([[0, 1], [2, 3]])
[[0, 1], [2, 3]]
>>> check_2d('test')
None | netlsd/util.py | check_2d | xgfs/NetLSD | 49 | python | def check_2d(inp):
"\n Check input to be a matrix. Converts lists of lists to np.ndarray.\n\n Also allows the input to be a scipy sparse matrix.\n \n Parameters\n ----------\n inp : obj\n Input matrix\n\n Returns\n -------\n numpy.ndarray, scipy.sparse or None\n Input matrix or None\n\n Examples\n --------\n >>> check_2d([[0, 1], [2, 3]])\n [[0, 1], [2, 3]]\n\n >>> check_2d('test')\n None\n\n "
if isinstance(inp, list):
return check_2d(np.array(inp))
if isinstance(inp, (np.ndarray, np.matrixlib.defmatrix.matrix)):
if (inp.ndim == 2):
return inp
if sps.issparse(inp):
if (inp.ndim == 2):
return inp | def check_2d(inp):
"\n Check input to be a matrix. Converts lists of lists to np.ndarray.\n\n Also allows the input to be a scipy sparse matrix.\n \n Parameters\n ----------\n inp : obj\n Input matrix\n\n Returns\n -------\n numpy.ndarray, scipy.sparse or None\n Input matrix or None\n\n Examples\n --------\n >>> check_2d([[0, 1], [2, 3]])\n [[0, 1], [2, 3]]\n\n >>> check_2d('test')\n None\n\n "
if isinstance(inp, list):
return check_2d(np.array(inp))
if isinstance(inp, (np.ndarray, np.matrixlib.defmatrix.matrix)):
if (inp.ndim == 2):
return inp
if sps.issparse(inp):
if (inp.ndim == 2):
return inp<|docstring|>Check input to be a matrix. Converts lists of lists to np.ndarray.
Also allows the input to be a scipy sparse matrix.
Parameters
----------
inp : obj
Input matrix
Returns
-------
numpy.ndarray, scipy.sparse or None
Input matrix or None
Examples
--------
>>> check_2d([[0, 1], [2, 3]])
[[0, 1], [2, 3]]
>>> check_2d('test')
None<|endoftext|> |
4074d598713bd23fb627fb5c8811c7f2dc083f17cabf01c5acd85e7dd9ac2ed3 | def graph_to_laplacian(G, normalized=True):
"\n Converts a graph from popular Python packages to Laplacian representation.\n\n Currently support NetworkX, graph_tool and igraph.\n \n Parameters\n ----------\n G : obj\n Input graph\n normalized : bool\n Whether to use normalized Laplacian.\n Normalized and unnormalized Laplacians capture different properties of graphs, e.g. normalized Laplacian spectrum can determine whether a graph is bipartite, but not the number of its edges. We recommend using normalized Laplacian.\n\n Returns\n -------\n scipy.sparse\n Laplacian matrix of the input graph\n\n Examples\n --------\n >>> graph_to_laplacian(nx.complete_graph(3), 'unnormalized').todense()\n [[ 2, -1, -1], [-1, 2, -1], [-1, -1, 2]]\n\n >>> graph_to_laplacian('test')\n None\n\n "
try:
import networkx as nx
if isinstance(G, nx.Graph):
if normalized:
return nx.normalized_laplacian_matrix(G)
else:
return nx.laplacian_matrix(G)
except ImportError:
pass
try:
import graph_tool.all as gt
if isinstance(G, gt.Graph):
if normalized:
return gt.laplacian_type(G, normalized=True)
else:
return gt.laplacian(G)
except ImportError:
pass
try:
import igraph as ig
if isinstance(G, ig.Graph):
if normalized:
return np.array(G.laplacian(normalized=True))
else:
return np.array(G.laplacian())
except ImportError:
pass | Converts a graph from popular Python packages to Laplacian representation.
Currently support NetworkX, graph_tool and igraph.
Parameters
----------
G : obj
Input graph
normalized : bool
Whether to use normalized Laplacian.
Normalized and unnormalized Laplacians capture different properties of graphs, e.g. normalized Laplacian spectrum can determine whether a graph is bipartite, but not the number of its edges. We recommend using normalized Laplacian.
Returns
-------
scipy.sparse
Laplacian matrix of the input graph
Examples
--------
>>> graph_to_laplacian(nx.complete_graph(3), 'unnormalized').todense()
[[ 2, -1, -1], [-1, 2, -1], [-1, -1, 2]]
>>> graph_to_laplacian('test')
None | netlsd/util.py | graph_to_laplacian | xgfs/NetLSD | 49 | python | def graph_to_laplacian(G, normalized=True):
"\n Converts a graph from popular Python packages to Laplacian representation.\n\n Currently support NetworkX, graph_tool and igraph.\n \n Parameters\n ----------\n G : obj\n Input graph\n normalized : bool\n Whether to use normalized Laplacian.\n Normalized and unnormalized Laplacians capture different properties of graphs, e.g. normalized Laplacian spectrum can determine whether a graph is bipartite, but not the number of its edges. We recommend using normalized Laplacian.\n\n Returns\n -------\n scipy.sparse\n Laplacian matrix of the input graph\n\n Examples\n --------\n >>> graph_to_laplacian(nx.complete_graph(3), 'unnormalized').todense()\n [[ 2, -1, -1], [-1, 2, -1], [-1, -1, 2]]\n\n >>> graph_to_laplacian('test')\n None\n\n "
try:
import networkx as nx
if isinstance(G, nx.Graph):
if normalized:
return nx.normalized_laplacian_matrix(G)
else:
return nx.laplacian_matrix(G)
except ImportError:
pass
try:
import graph_tool.all as gt
if isinstance(G, gt.Graph):
if normalized:
return gt.laplacian_type(G, normalized=True)
else:
return gt.laplacian(G)
except ImportError:
pass
try:
import igraph as ig
if isinstance(G, ig.Graph):
if normalized:
return np.array(G.laplacian(normalized=True))
else:
return np.array(G.laplacian())
except ImportError:
pass | def graph_to_laplacian(G, normalized=True):
"\n Converts a graph from popular Python packages to Laplacian representation.\n\n Currently support NetworkX, graph_tool and igraph.\n \n Parameters\n ----------\n G : obj\n Input graph\n normalized : bool\n Whether to use normalized Laplacian.\n Normalized and unnormalized Laplacians capture different properties of graphs, e.g. normalized Laplacian spectrum can determine whether a graph is bipartite, but not the number of its edges. We recommend using normalized Laplacian.\n\n Returns\n -------\n scipy.sparse\n Laplacian matrix of the input graph\n\n Examples\n --------\n >>> graph_to_laplacian(nx.complete_graph(3), 'unnormalized').todense()\n [[ 2, -1, -1], [-1, 2, -1], [-1, -1, 2]]\n\n >>> graph_to_laplacian('test')\n None\n\n "
try:
import networkx as nx
if isinstance(G, nx.Graph):
if normalized:
return nx.normalized_laplacian_matrix(G)
else:
return nx.laplacian_matrix(G)
except ImportError:
pass
try:
import graph_tool.all as gt
if isinstance(G, gt.Graph):
if normalized:
return gt.laplacian_type(G, normalized=True)
else:
return gt.laplacian(G)
except ImportError:
pass
try:
import igraph as ig
if isinstance(G, ig.Graph):
if normalized:
return np.array(G.laplacian(normalized=True))
else:
return np.array(G.laplacian())
except ImportError:
pass<|docstring|>Converts a graph from popular Python packages to Laplacian representation.
Currently support NetworkX, graph_tool and igraph.
Parameters
----------
G : obj
Input graph
normalized : bool
Whether to use normalized Laplacian.
Normalized and unnormalized Laplacians capture different properties of graphs, e.g. normalized Laplacian spectrum can determine whether a graph is bipartite, but not the number of its edges. We recommend using normalized Laplacian.
Returns
-------
scipy.sparse
Laplacian matrix of the input graph
Examples
--------
>>> graph_to_laplacian(nx.complete_graph(3), 'unnormalized').todense()
[[ 2, -1, -1], [-1, 2, -1], [-1, -1, 2]]
>>> graph_to_laplacian('test')
None<|endoftext|> |
afbff1da5738e10b001e264cb1f358e86cbc89b95965a085903be011f3dcb6a2 | def mat_to_laplacian(mat, normalized):
'\n Converts a sparse or dence adjacency matrix to Laplacian.\n \n Parameters\n ----------\n mat : obj\n Input adjacency matrix. If it is a Laplacian matrix already, return it.\n normalized : bool\n Whether to use normalized Laplacian.\n Normalized and unnormalized Laplacians capture different properties of graphs, e.g. normalized Laplacian spectrum can determine whether a graph is bipartite, but not the number of its edges. We recommend using normalized Laplacian.\n\n Returns\n -------\n obj\n Laplacian of the input adjacency matrix\n\n Examples\n --------\n >>> mat_to_laplacian(numpy.array([[0, 1, 1], [1, 0, 1], [1, 1, 0]]), False)\n [[ 2, -1, -1], [-1, 2, -1], [-1, -1, 2]]\n\n '
if sps.issparse(mat):
if np.all((mat.diagonal() >= 0)):
if np.all(((mat - sps.diags(mat.diagonal())).data <= 0)):
return mat
elif np.all((np.diag(mat) >= 0)):
if np.all(((mat - np.diag(mat)) <= 0)):
return mat
deg = np.squeeze(np.asarray(mat.sum(axis=1)))
if sps.issparse(mat):
L = (sps.diags(deg) - mat)
else:
L = (np.diag(deg) - mat)
if (not normalized):
return L
with np.errstate(divide='ignore'):
sqrt_deg = (1.0 / np.sqrt(deg))
sqrt_deg[(sqrt_deg == np.inf)] = 0
if sps.issparse(mat):
sqrt_deg_mat = sps.diags(sqrt_deg)
else:
sqrt_deg_mat = np.diag(sqrt_deg)
return sqrt_deg_mat.dot(L).dot(sqrt_deg_mat) | Converts a sparse or dence adjacency matrix to Laplacian.
Parameters
----------
mat : obj
Input adjacency matrix. If it is a Laplacian matrix already, return it.
normalized : bool
Whether to use normalized Laplacian.
Normalized and unnormalized Laplacians capture different properties of graphs, e.g. normalized Laplacian spectrum can determine whether a graph is bipartite, but not the number of its edges. We recommend using normalized Laplacian.
Returns
-------
obj
Laplacian of the input adjacency matrix
Examples
--------
>>> mat_to_laplacian(numpy.array([[0, 1, 1], [1, 0, 1], [1, 1, 0]]), False)
[[ 2, -1, -1], [-1, 2, -1], [-1, -1, 2]] | netlsd/util.py | mat_to_laplacian | xgfs/NetLSD | 49 | python | def mat_to_laplacian(mat, normalized):
'\n Converts a sparse or dence adjacency matrix to Laplacian.\n \n Parameters\n ----------\n mat : obj\n Input adjacency matrix. If it is a Laplacian matrix already, return it.\n normalized : bool\n Whether to use normalized Laplacian.\n Normalized and unnormalized Laplacians capture different properties of graphs, e.g. normalized Laplacian spectrum can determine whether a graph is bipartite, but not the number of its edges. We recommend using normalized Laplacian.\n\n Returns\n -------\n obj\n Laplacian of the input adjacency matrix\n\n Examples\n --------\n >>> mat_to_laplacian(numpy.array([[0, 1, 1], [1, 0, 1], [1, 1, 0]]), False)\n [[ 2, -1, -1], [-1, 2, -1], [-1, -1, 2]]\n\n '
if sps.issparse(mat):
if np.all((mat.diagonal() >= 0)):
if np.all(((mat - sps.diags(mat.diagonal())).data <= 0)):
return mat
elif np.all((np.diag(mat) >= 0)):
if np.all(((mat - np.diag(mat)) <= 0)):
return mat
deg = np.squeeze(np.asarray(mat.sum(axis=1)))
if sps.issparse(mat):
L = (sps.diags(deg) - mat)
else:
L = (np.diag(deg) - mat)
if (not normalized):
return L
with np.errstate(divide='ignore'):
sqrt_deg = (1.0 / np.sqrt(deg))
sqrt_deg[(sqrt_deg == np.inf)] = 0
if sps.issparse(mat):
sqrt_deg_mat = sps.diags(sqrt_deg)
else:
sqrt_deg_mat = np.diag(sqrt_deg)
return sqrt_deg_mat.dot(L).dot(sqrt_deg_mat) | def mat_to_laplacian(mat, normalized):
'\n Converts a sparse or dence adjacency matrix to Laplacian.\n \n Parameters\n ----------\n mat : obj\n Input adjacency matrix. If it is a Laplacian matrix already, return it.\n normalized : bool\n Whether to use normalized Laplacian.\n Normalized and unnormalized Laplacians capture different properties of graphs, e.g. normalized Laplacian spectrum can determine whether a graph is bipartite, but not the number of its edges. We recommend using normalized Laplacian.\n\n Returns\n -------\n obj\n Laplacian of the input adjacency matrix\n\n Examples\n --------\n >>> mat_to_laplacian(numpy.array([[0, 1, 1], [1, 0, 1], [1, 1, 0]]), False)\n [[ 2, -1, -1], [-1, 2, -1], [-1, -1, 2]]\n\n '
if sps.issparse(mat):
if np.all((mat.diagonal() >= 0)):
if np.all(((mat - sps.diags(mat.diagonal())).data <= 0)):
return mat
elif np.all((np.diag(mat) >= 0)):
if np.all(((mat - np.diag(mat)) <= 0)):
return mat
deg = np.squeeze(np.asarray(mat.sum(axis=1)))
if sps.issparse(mat):
L = (sps.diags(deg) - mat)
else:
L = (np.diag(deg) - mat)
if (not normalized):
return L
with np.errstate(divide='ignore'):
sqrt_deg = (1.0 / np.sqrt(deg))
sqrt_deg[(sqrt_deg == np.inf)] = 0
if sps.issparse(mat):
sqrt_deg_mat = sps.diags(sqrt_deg)
else:
sqrt_deg_mat = np.diag(sqrt_deg)
return sqrt_deg_mat.dot(L).dot(sqrt_deg_mat)<|docstring|>Converts a sparse or dence adjacency matrix to Laplacian.
Parameters
----------
mat : obj
Input adjacency matrix. If it is a Laplacian matrix already, return it.
normalized : bool
Whether to use normalized Laplacian.
Normalized and unnormalized Laplacians capture different properties of graphs, e.g. normalized Laplacian spectrum can determine whether a graph is bipartite, but not the number of its edges. We recommend using normalized Laplacian.
Returns
-------
obj
Laplacian of the input adjacency matrix
Examples
--------
>>> mat_to_laplacian(numpy.array([[0, 1, 1], [1, 0, 1], [1, 1, 0]]), False)
[[ 2, -1, -1], [-1, 2, -1], [-1, -1, 2]]<|endoftext|> |
b1a26a50a6d039862ff3b4a223a605abe99413a83e88bc5ebb2a0f4a03624046 | def updown_linear_approx(eigvals_lower, eigvals_upper, nv):
'\n Approximates Laplacian spectrum using upper and lower parts of the eigenspectrum.\n \n Parameters\n ----------\n eigvals_lower : numpy.ndarray\n Lower part of the spectrum, sorted\n eigvals_upper : numpy.ndarray\n Upper part of the spectrum, sorted\n nv : int\n Total number of nodes (eigenvalues) in the graph.\n\n Returns\n -------\n numpy.ndarray\n Vector of approximated eigenvalues\n\n Examples\n --------\n >>> updown_linear_approx([1, 2, 3], [7, 8, 9], 9)\n array([1, 2, 3, 4, 5, 6, 7, 8, 9])\n\n '
nal = len(eigvals_lower)
nau = len(eigvals_upper)
if (nv < (nal + nau)):
raise ValueError('Number of supplied eigenvalues ({0} lower and {1} upper) is higher than number of nodes ({2})!'.format(nal, nau, nv))
ret = np.zeros(nv)
ret[:nal] = eigvals_lower
ret[(- nau):] = eigvals_upper
ret[(nal - 1):((- nau) + 1)] = np.linspace(eigvals_lower[(- 1)], eigvals_upper[0], (((nv - nal) - nau) + 2))
return ret | Approximates Laplacian spectrum using upper and lower parts of the eigenspectrum.
Parameters
----------
eigvals_lower : numpy.ndarray
Lower part of the spectrum, sorted
eigvals_upper : numpy.ndarray
Upper part of the spectrum, sorted
nv : int
Total number of nodes (eigenvalues) in the graph.
Returns
-------
numpy.ndarray
Vector of approximated eigenvalues
Examples
--------
>>> updown_linear_approx([1, 2, 3], [7, 8, 9], 9)
array([1, 2, 3, 4, 5, 6, 7, 8, 9]) | netlsd/util.py | updown_linear_approx | xgfs/NetLSD | 49 | python | def updown_linear_approx(eigvals_lower, eigvals_upper, nv):
'\n Approximates Laplacian spectrum using upper and lower parts of the eigenspectrum.\n \n Parameters\n ----------\n eigvals_lower : numpy.ndarray\n Lower part of the spectrum, sorted\n eigvals_upper : numpy.ndarray\n Upper part of the spectrum, sorted\n nv : int\n Total number of nodes (eigenvalues) in the graph.\n\n Returns\n -------\n numpy.ndarray\n Vector of approximated eigenvalues\n\n Examples\n --------\n >>> updown_linear_approx([1, 2, 3], [7, 8, 9], 9)\n array([1, 2, 3, 4, 5, 6, 7, 8, 9])\n\n '
nal = len(eigvals_lower)
nau = len(eigvals_upper)
if (nv < (nal + nau)):
raise ValueError('Number of supplied eigenvalues ({0} lower and {1} upper) is higher than number of nodes ({2})!'.format(nal, nau, nv))
ret = np.zeros(nv)
ret[:nal] = eigvals_lower
ret[(- nau):] = eigvals_upper
ret[(nal - 1):((- nau) + 1)] = np.linspace(eigvals_lower[(- 1)], eigvals_upper[0], (((nv - nal) - nau) + 2))
return ret | def updown_linear_approx(eigvals_lower, eigvals_upper, nv):
'\n Approximates Laplacian spectrum using upper and lower parts of the eigenspectrum.\n \n Parameters\n ----------\n eigvals_lower : numpy.ndarray\n Lower part of the spectrum, sorted\n eigvals_upper : numpy.ndarray\n Upper part of the spectrum, sorted\n nv : int\n Total number of nodes (eigenvalues) in the graph.\n\n Returns\n -------\n numpy.ndarray\n Vector of approximated eigenvalues\n\n Examples\n --------\n >>> updown_linear_approx([1, 2, 3], [7, 8, 9], 9)\n array([1, 2, 3, 4, 5, 6, 7, 8, 9])\n\n '
nal = len(eigvals_lower)
nau = len(eigvals_upper)
if (nv < (nal + nau)):
raise ValueError('Number of supplied eigenvalues ({0} lower and {1} upper) is higher than number of nodes ({2})!'.format(nal, nau, nv))
ret = np.zeros(nv)
ret[:nal] = eigvals_lower
ret[(- nau):] = eigvals_upper
ret[(nal - 1):((- nau) + 1)] = np.linspace(eigvals_lower[(- 1)], eigvals_upper[0], (((nv - nal) - nau) + 2))
return ret<|docstring|>Approximates Laplacian spectrum using upper and lower parts of the eigenspectrum.
Parameters
----------
eigvals_lower : numpy.ndarray
Lower part of the spectrum, sorted
eigvals_upper : numpy.ndarray
Upper part of the spectrum, sorted
nv : int
Total number of nodes (eigenvalues) in the graph.
Returns
-------
numpy.ndarray
Vector of approximated eigenvalues
Examples
--------
>>> updown_linear_approx([1, 2, 3], [7, 8, 9], 9)
array([1, 2, 3, 4, 5, 6, 7, 8, 9])<|endoftext|> |
770b7ad7429d651f12d002729120e94a580856b9525f131d9f7782b100ed1a80 | def eigenvalues_auto(mat, n_eivals='auto'):
"\n Automatically computes the spectrum of a given Laplacian matrix.\n \n Parameters\n ----------\n mat : numpy.ndarray or scipy.sparse\n Laplacian matrix\n n_eivals : string or int or tuple\n Number of eigenvalues to compute / use for approximation.\n If string, we expect either 'full' or 'auto', otherwise error will be raised. 'auto' lets the program decide based on the faithful usage. 'full' computes all eigenvalues.\n If int, compute n_eivals eigenvalues from each side and approximate using linear growth approximation.\n If tuple, we expect two ints, first for lower part of approximation, and second for the upper part.\n\n Returns\n -------\n np.ndarray\n Vector of approximated eigenvalues\n\n Examples\n --------\n >>> eigenvalues_auto(numpy.array([[ 2, -1, -1], [-1, 2, -1], [-1, -1, 2]]), 'auto')\n array([0, 3, 3])\n\n "
do_full = True
n_lower = 150
n_upper = 150
nv = mat.shape[0]
if (n_eivals == 'auto'):
if (mat.shape[0] > 1024):
do_full = False
if (n_eivals == 'full'):
do_full = True
if isinstance(n_eivals, int):
n_lower = n_upper = n_eivals
do_full = False
if isinstance(n_eivals, tuple):
(n_lower, n_upper) = n_eivals
do_full = False
if (do_full and sps.issparse(mat)):
mat = mat.todense()
if sps.issparse(mat):
if (n_lower == n_upper):
tr_eivals = spsl.eigsh(mat, (2 * n_lower), which='BE', return_eigenvectors=False)
return updown_linear_approx(tr_eivals[:n_upper], tr_eivals[n_upper:], nv)
else:
lo_eivals = spsl.eigsh(mat, n_lower, which='SM', return_eigenvectors=False)[::(- 1)]
up_eivals = spsl.eigsh(mat, n_upper, which='LM', return_eigenvectors=False)
return updown_linear_approx(lo_eivals, up_eivals, nv)
elif do_full:
return spl.eigvalsh(mat)
else:
lo_eivals = spl.eigvalsh(mat, eigvals=(0, (n_lower - 1)))
up_eivals = spl.eigvalsh(mat, eigvals=(((nv - n_upper) - 1), (nv - 1)))
return updown_linear_approx(lo_eivals, up_eivals, nv) | Automatically computes the spectrum of a given Laplacian matrix.
Parameters
----------
mat : numpy.ndarray or scipy.sparse
Laplacian matrix
n_eivals : string or int or tuple
Number of eigenvalues to compute / use for approximation.
If string, we expect either 'full' or 'auto', otherwise error will be raised. 'auto' lets the program decide based on the faithful usage. 'full' computes all eigenvalues.
If int, compute n_eivals eigenvalues from each side and approximate using linear growth approximation.
If tuple, we expect two ints, first for lower part of approximation, and second for the upper part.
Returns
-------
np.ndarray
Vector of approximated eigenvalues
Examples
--------
>>> eigenvalues_auto(numpy.array([[ 2, -1, -1], [-1, 2, -1], [-1, -1, 2]]), 'auto')
array([0, 3, 3]) | netlsd/util.py | eigenvalues_auto | xgfs/NetLSD | 49 | python | def eigenvalues_auto(mat, n_eivals='auto'):
"\n Automatically computes the spectrum of a given Laplacian matrix.\n \n Parameters\n ----------\n mat : numpy.ndarray or scipy.sparse\n Laplacian matrix\n n_eivals : string or int or tuple\n Number of eigenvalues to compute / use for approximation.\n If string, we expect either 'full' or 'auto', otherwise error will be raised. 'auto' lets the program decide based on the faithful usage. 'full' computes all eigenvalues.\n If int, compute n_eivals eigenvalues from each side and approximate using linear growth approximation.\n If tuple, we expect two ints, first for lower part of approximation, and second for the upper part.\n\n Returns\n -------\n np.ndarray\n Vector of approximated eigenvalues\n\n Examples\n --------\n >>> eigenvalues_auto(numpy.array([[ 2, -1, -1], [-1, 2, -1], [-1, -1, 2]]), 'auto')\n array([0, 3, 3])\n\n "
do_full = True
n_lower = 150
n_upper = 150
nv = mat.shape[0]
if (n_eivals == 'auto'):
if (mat.shape[0] > 1024):
do_full = False
if (n_eivals == 'full'):
do_full = True
if isinstance(n_eivals, int):
n_lower = n_upper = n_eivals
do_full = False
if isinstance(n_eivals, tuple):
(n_lower, n_upper) = n_eivals
do_full = False
if (do_full and sps.issparse(mat)):
mat = mat.todense()
if sps.issparse(mat):
if (n_lower == n_upper):
tr_eivals = spsl.eigsh(mat, (2 * n_lower), which='BE', return_eigenvectors=False)
return updown_linear_approx(tr_eivals[:n_upper], tr_eivals[n_upper:], nv)
else:
lo_eivals = spsl.eigsh(mat, n_lower, which='SM', return_eigenvectors=False)[::(- 1)]
up_eivals = spsl.eigsh(mat, n_upper, which='LM', return_eigenvectors=False)
return updown_linear_approx(lo_eivals, up_eivals, nv)
elif do_full:
return spl.eigvalsh(mat)
else:
lo_eivals = spl.eigvalsh(mat, eigvals=(0, (n_lower - 1)))
up_eivals = spl.eigvalsh(mat, eigvals=(((nv - n_upper) - 1), (nv - 1)))
return updown_linear_approx(lo_eivals, up_eivals, nv) | def eigenvalues_auto(mat, n_eivals='auto'):
"\n Automatically computes the spectrum of a given Laplacian matrix.\n \n Parameters\n ----------\n mat : numpy.ndarray or scipy.sparse\n Laplacian matrix\n n_eivals : string or int or tuple\n Number of eigenvalues to compute / use for approximation.\n If string, we expect either 'full' or 'auto', otherwise error will be raised. 'auto' lets the program decide based on the faithful usage. 'full' computes all eigenvalues.\n If int, compute n_eivals eigenvalues from each side and approximate using linear growth approximation.\n If tuple, we expect two ints, first for lower part of approximation, and second for the upper part.\n\n Returns\n -------\n np.ndarray\n Vector of approximated eigenvalues\n\n Examples\n --------\n >>> eigenvalues_auto(numpy.array([[ 2, -1, -1], [-1, 2, -1], [-1, -1, 2]]), 'auto')\n array([0, 3, 3])\n\n "
do_full = True
n_lower = 150
n_upper = 150
nv = mat.shape[0]
if (n_eivals == 'auto'):
if (mat.shape[0] > 1024):
do_full = False
if (n_eivals == 'full'):
do_full = True
if isinstance(n_eivals, int):
n_lower = n_upper = n_eivals
do_full = False
if isinstance(n_eivals, tuple):
(n_lower, n_upper) = n_eivals
do_full = False
if (do_full and sps.issparse(mat)):
mat = mat.todense()
if sps.issparse(mat):
if (n_lower == n_upper):
tr_eivals = spsl.eigsh(mat, (2 * n_lower), which='BE', return_eigenvectors=False)
return updown_linear_approx(tr_eivals[:n_upper], tr_eivals[n_upper:], nv)
else:
lo_eivals = spsl.eigsh(mat, n_lower, which='SM', return_eigenvectors=False)[::(- 1)]
up_eivals = spsl.eigsh(mat, n_upper, which='LM', return_eigenvectors=False)
return updown_linear_approx(lo_eivals, up_eivals, nv)
elif do_full:
return spl.eigvalsh(mat)
else:
lo_eivals = spl.eigvalsh(mat, eigvals=(0, (n_lower - 1)))
up_eivals = spl.eigvalsh(mat, eigvals=(((nv - n_upper) - 1), (nv - 1)))
return updown_linear_approx(lo_eivals, up_eivals, nv)<|docstring|>Automatically computes the spectrum of a given Laplacian matrix.
Parameters
----------
mat : numpy.ndarray or scipy.sparse
Laplacian matrix
n_eivals : string or int or tuple
Number of eigenvalues to compute / use for approximation.
If string, we expect either 'full' or 'auto', otherwise error will be raised. 'auto' lets the program decide based on the faithful usage. 'full' computes all eigenvalues.
If int, compute n_eivals eigenvalues from each side and approximate using linear growth approximation.
If tuple, we expect two ints, first for lower part of approximation, and second for the upper part.
Returns
-------
np.ndarray
Vector of approximated eigenvalues
Examples
--------
>>> eigenvalues_auto(numpy.array([[ 2, -1, -1], [-1, 2, -1], [-1, -1, 2]]), 'auto')
array([0, 3, 3])<|endoftext|> |
d58a85b4582622e5dd93d445e5def6c12a3e3f08fd094b58cab8f59ef14cead9 | def setup():
'\n Setup.\n :return: None.\n '
pass | Setup.
:return: None. | tests/pptc/test_triangulator.py | setup | schreck61/py-bbn | 0 | python | def setup():
'\n Setup.\n :return: None.\n '
pass | def setup():
'\n Setup.\n :return: None.\n '
pass<|docstring|>Setup.
:return: None.<|endoftext|> |
79222015af3d05cc58d65a83a5d85b017912be9c1680fef9d497761d0b77eadf | def teardown():
'\n Teardown.\n :return: None.\n '
pass | Teardown.
:return: None. | tests/pptc/test_triangulator.py | teardown | schreck61/py-bbn | 0 | python | def teardown():
'\n Teardown.\n :return: None.\n '
pass | def teardown():
'\n Teardown.\n :return: None.\n '
pass<|docstring|>Teardown.
:return: None.<|endoftext|> |
6f946e2c68971a93371a3cce8e0368426b5eb467e1877dfdccdf651c6aed7a91 | @with_setup(setup, teardown)
def test_triangulator():
'\n Tests triangulation.\n :return: None.\n '
bbn = BbnUtil.get_huang_graph()
PotentialInitializer.init(bbn)
ug = Moralizer.moralize(bbn)
cliques = Triangulator.triangulate(ug) | Tests triangulation.
:return: None. | tests/pptc/test_triangulator.py | test_triangulator | schreck61/py-bbn | 0 | python | @with_setup(setup, teardown)
def test_triangulator():
'\n Tests triangulation.\n :return: None.\n '
bbn = BbnUtil.get_huang_graph()
PotentialInitializer.init(bbn)
ug = Moralizer.moralize(bbn)
cliques = Triangulator.triangulate(ug) | @with_setup(setup, teardown)
def test_triangulator():
'\n Tests triangulation.\n :return: None.\n '
bbn = BbnUtil.get_huang_graph()
PotentialInitializer.init(bbn)
ug = Moralizer.moralize(bbn)
cliques = Triangulator.triangulate(ug)<|docstring|>Tests triangulation.
:return: None.<|endoftext|> |
ed45e4014dbca4402501b242762eebdae69862668547c586f2c9c7687f438fb0 | def maxProfit1(self, prices):
'\n :type prices: List[int]\n :rtype: int\n '
(first_buy, first_sell, second_buy, second_sell) = ((- sys.maxsize), 0, (- sys.maxsize), 0)
for price in prices:
first_buy = max(first_buy, (- price))
first_sell = max(first_sell, (price + first_buy))
second_buy = max(second_buy, (first_sell - price))
second_sell = max(second_sell, (price + second_buy))
return second_sell | :type prices: List[int]
:rtype: int | toTheMoon/leetcode_123_BestTimetoBuyandSellStockIII.py | maxProfit1 | jercas/offer66-leetcode-newcode | 0 | python | def maxProfit1(self, prices):
'\n :type prices: List[int]\n :rtype: int\n '
(first_buy, first_sell, second_buy, second_sell) = ((- sys.maxsize), 0, (- sys.maxsize), 0)
for price in prices:
first_buy = max(first_buy, (- price))
first_sell = max(first_sell, (price + first_buy))
second_buy = max(second_buy, (first_sell - price))
second_sell = max(second_sell, (price + second_buy))
return second_sell | def maxProfit1(self, prices):
'\n :type prices: List[int]\n :rtype: int\n '
(first_buy, first_sell, second_buy, second_sell) = ((- sys.maxsize), 0, (- sys.maxsize), 0)
for price in prices:
first_buy = max(first_buy, (- price))
first_sell = max(first_sell, (price + first_buy))
second_buy = max(second_buy, (first_sell - price))
second_sell = max(second_sell, (price + second_buy))
return second_sell<|docstring|>:type prices: List[int]
:rtype: int<|endoftext|> |
113e0322f55ccf30243b2643137c9ec03a5ace7f8e7589d7ea1aef26e18ea038 | def maxProfit2(self, prices):
'\n :type prices: List[int]\n :rtype: int\n '
if (len(prices) <= 1):
return 0
(left, right) = (np.zeros(len(prices)), np.zeros(len(prices)))
(min_price, max_price) = (prices[0], prices[(- 1)])
for i in range(1, len(prices)):
left[i] = max(left[(i - 1)], (prices[i] - min_price))
min_price = min(min_price, prices[i])
for j in range((len(prices) - 2), (- 1), (- 1)):
right[j] = max(right[(j + 1)], (max_price - prices[j]))
max_price = max(max_price, prices[j])
res = [(left[k] + right[k]) for k in range(len(prices))]
return max(res) | :type prices: List[int]
:rtype: int | toTheMoon/leetcode_123_BestTimetoBuyandSellStockIII.py | maxProfit2 | jercas/offer66-leetcode-newcode | 0 | python | def maxProfit2(self, prices):
'\n :type prices: List[int]\n :rtype: int\n '
if (len(prices) <= 1):
return 0
(left, right) = (np.zeros(len(prices)), np.zeros(len(prices)))
(min_price, max_price) = (prices[0], prices[(- 1)])
for i in range(1, len(prices)):
left[i] = max(left[(i - 1)], (prices[i] - min_price))
min_price = min(min_price, prices[i])
for j in range((len(prices) - 2), (- 1), (- 1)):
right[j] = max(right[(j + 1)], (max_price - prices[j]))
max_price = max(max_price, prices[j])
res = [(left[k] + right[k]) for k in range(len(prices))]
return max(res) | def maxProfit2(self, prices):
'\n :type prices: List[int]\n :rtype: int\n '
if (len(prices) <= 1):
return 0
(left, right) = (np.zeros(len(prices)), np.zeros(len(prices)))
(min_price, max_price) = (prices[0], prices[(- 1)])
for i in range(1, len(prices)):
left[i] = max(left[(i - 1)], (prices[i] - min_price))
min_price = min(min_price, prices[i])
for j in range((len(prices) - 2), (- 1), (- 1)):
right[j] = max(right[(j + 1)], (max_price - prices[j]))
max_price = max(max_price, prices[j])
res = [(left[k] + right[k]) for k in range(len(prices))]
return max(res)<|docstring|>:type prices: List[int]
:rtype: int<|endoftext|> |
74ff21391d708bf52671e424e44e26a6782c4cc02fe53b22b7049f44f28f8d0e | def __init__(self, empty_size=334, **kwargs):
' Initialize the SparseCache\n 334 is the file size of a 256x256 transparent PNG\n '
self.empty_size = empty_size
return Disk.__init__(self, **kwargs) | Initialize the SparseCache
334 is the file size of a 256x256 transparent PNG | stamen/__init__.py | __init__ | stamen/tilestache-goodies | 2 | python | def __init__(self, empty_size=334, **kwargs):
' Initialize the SparseCache\n 334 is the file size of a 256x256 transparent PNG\n '
self.empty_size = empty_size
return Disk.__init__(self, **kwargs) | def __init__(self, empty_size=334, **kwargs):
' Initialize the SparseCache\n 334 is the file size of a 256x256 transparent PNG\n '
self.empty_size = empty_size
return Disk.__init__(self, **kwargs)<|docstring|>Initialize the SparseCache
334 is the file size of a 256x256 transparent PNG<|endoftext|> |
5a1f58ede29dccdfabf21300bf85ab2842ed876e0ad14a3ea151f04edbd57f66 | def read(self, layer, coord, format):
' Read a cached tile.\n '
fullpath = self._fullpath(layer, coord, format)
if (not exists(fullpath)):
return None
if (os.stat(fullpath).st_size == self.empty_size):
raise TheTileLeftANote(status_code=404, emit_content_type=False)
return Disk.read(self, layer, coord, format) | Read a cached tile. | stamen/__init__.py | read | stamen/tilestache-goodies | 2 | python | def read(self, layer, coord, format):
' \n '
fullpath = self._fullpath(layer, coord, format)
if (not exists(fullpath)):
return None
if (os.stat(fullpath).st_size == self.empty_size):
raise TheTileLeftANote(status_code=404, emit_content_type=False)
return Disk.read(self, layer, coord, format) | def read(self, layer, coord, format):
' \n '
fullpath = self._fullpath(layer, coord, format)
if (not exists(fullpath)):
return None
if (os.stat(fullpath).st_size == self.empty_size):
raise TheTileLeftANote(status_code=404, emit_content_type=False)
return Disk.read(self, layer, coord, format)<|docstring|>Read a cached tile.<|endoftext|> |
ccfa2cc670887b4873352fef724dacf6af38965b0990f0106e57362ec9696cce | def send_request(self, method=None, params={}, request=None, loop=None):
'Send request object and parse response.'
if (loop is None):
loop = self._loop
if (not request):
assert (method is not None), 'No method specified!'
request = self._create_request(method, params)
self.last_request = request
response = self._send_and_recv_data(request, loop)
for i in range(10000):
if (response is None):
self.logger.error('No data received.')
return
if self._check_response(json.loads(request), response):
(error_flag, error_msg) = self._check_error(response)
if (i > 0):
self.logger.info('%d non-response messages skipped.', i)
return self._parse_response(error_flag, error_msg, response)
else:
while self._loop.is_running():
pass
response = loop.run_until_complete(self._recv_data())
self.logger.error('ID mismatch! Could not find response message.')
return | Send request object and parse response. | cortex2/lib/WebsocketClient/websocket_client.py | send_request | lowenhere/emotiv-cortex2-python-client | 10 | python | def send_request(self, method=None, params={}, request=None, loop=None):
if (loop is None):
loop = self._loop
if (not request):
assert (method is not None), 'No method specified!'
request = self._create_request(method, params)
self.last_request = request
response = self._send_and_recv_data(request, loop)
for i in range(10000):
if (response is None):
self.logger.error('No data received.')
return
if self._check_response(json.loads(request), response):
(error_flag, error_msg) = self._check_error(response)
if (i > 0):
self.logger.info('%d non-response messages skipped.', i)
return self._parse_response(error_flag, error_msg, response)
else:
while self._loop.is_running():
pass
response = loop.run_until_complete(self._recv_data())
self.logger.error('ID mismatch! Could not find response message.')
return | def send_request(self, method=None, params={}, request=None, loop=None):
if (loop is None):
loop = self._loop
if (not request):
assert (method is not None), 'No method specified!'
request = self._create_request(method, params)
self.last_request = request
response = self._send_and_recv_data(request, loop)
for i in range(10000):
if (response is None):
self.logger.error('No data received.')
return
if self._check_response(json.loads(request), response):
(error_flag, error_msg) = self._check_error(response)
if (i > 0):
self.logger.info('%d non-response messages skipped.', i)
return self._parse_response(error_flag, error_msg, response)
else:
while self._loop.is_running():
pass
response = loop.run_until_complete(self._recv_data())
self.logger.error('ID mismatch! Could not find response message.')
return<|docstring|>Send request object and parse response.<|endoftext|> |
679dbdb6b4325519819ac2acc4ac61558a8074186ab76a2e0cd371b56a79100c | async def _send_data(self, data):
'Send data to websocket.'
try:
(await asyncio.wait_for(self._websocket.send(data), timeout=self.timeout))
except asyncio.TimeoutError:
self.logger.warning('Asyncio Timeout: Sending took too long!')
except:
self.logger.error('Websocket connection broke! Reconnecting...')
websockets.connect(self._url, ssl=self._ssl_context)
(await asyncio.sleep(1)) | Send data to websocket. | cortex2/lib/WebsocketClient/websocket_client.py | _send_data | lowenhere/emotiv-cortex2-python-client | 10 | python | async def _send_data(self, data):
try:
(await asyncio.wait_for(self._websocket.send(data), timeout=self.timeout))
except asyncio.TimeoutError:
self.logger.warning('Asyncio Timeout: Sending took too long!')
except:
self.logger.error('Websocket connection broke! Reconnecting...')
websockets.connect(self._url, ssl=self._ssl_context)
(await asyncio.sleep(1)) | async def _send_data(self, data):
try:
(await asyncio.wait_for(self._websocket.send(data), timeout=self.timeout))
except asyncio.TimeoutError:
self.logger.warning('Asyncio Timeout: Sending took too long!')
except:
self.logger.error('Websocket connection broke! Reconnecting...')
websockets.connect(self._url, ssl=self._ssl_context)
(await asyncio.sleep(1))<|docstring|>Send data to websocket.<|endoftext|> |
020599afe0ed4b1f539ca8d85efe32e30205433b10ddf46a35479322e8dc7f6a | async def _recv_data(self):
'Receive data from websocket.'
try:
recv = (await asyncio.wait_for(self._websocket.recv(), timeout=self.timeout))
if (type(recv) is str):
return json.loads(recv)
else:
return
except asyncio.TimeoutError:
self.logger.warning('Asyncio Timeout: Receiving took too long!')
return
except Exception as e:
self.logger.error('Websocket connection broke! Reconnecting...')
self.logger.info(str(e))
websockets.connect(self._url, ssl=self._ssl_context)
(await asyncio.sleep(1)) | Receive data from websocket. | cortex2/lib/WebsocketClient/websocket_client.py | _recv_data | lowenhere/emotiv-cortex2-python-client | 10 | python | async def _recv_data(self):
try:
recv = (await asyncio.wait_for(self._websocket.recv(), timeout=self.timeout))
if (type(recv) is str):
return json.loads(recv)
else:
return
except asyncio.TimeoutError:
self.logger.warning('Asyncio Timeout: Receiving took too long!')
return
except Exception as e:
self.logger.error('Websocket connection broke! Reconnecting...')
self.logger.info(str(e))
websockets.connect(self._url, ssl=self._ssl_context)
(await asyncio.sleep(1)) | async def _recv_data(self):
try:
recv = (await asyncio.wait_for(self._websocket.recv(), timeout=self.timeout))
if (type(recv) is str):
return json.loads(recv)
else:
return
except asyncio.TimeoutError:
self.logger.warning('Asyncio Timeout: Receiving took too long!')
return
except Exception as e:
self.logger.error('Websocket connection broke! Reconnecting...')
self.logger.info(str(e))
websockets.connect(self._url, ssl=self._ssl_context)
(await asyncio.sleep(1))<|docstring|>Receive data from websocket.<|endoftext|> |
b81dfb94b0a4fe2c8234a983366045cfecd1e333d3af4e0137ea81c531489eb8 | def _send_and_recv_data(self, data, loop=None):
'Send and receive data to and from websocket.'
if (loop is None):
loop = self._loop
while self._loop.is_running():
pass
loop.run_until_complete(self._send_data(data))
return loop.run_until_complete(self._recv_data()) | Send and receive data to and from websocket. | cortex2/lib/WebsocketClient/websocket_client.py | _send_and_recv_data | lowenhere/emotiv-cortex2-python-client | 10 | python | def _send_and_recv_data(self, data, loop=None):
if (loop is None):
loop = self._loop
while self._loop.is_running():
pass
loop.run_until_complete(self._send_data(data))
return loop.run_until_complete(self._recv_data()) | def _send_and_recv_data(self, data, loop=None):
if (loop is None):
loop = self._loop
while self._loop.is_running():
pass
loop.run_until_complete(self._send_data(data))
return loop.run_until_complete(self._recv_data())<|docstring|>Send and receive data to and from websocket.<|endoftext|> |
3e20c68a5dcb9b082cccaf708a83c385bd26e632cf300e35707bcf8bbcb2912c | def _generate_id(self, size=6, chars=(string.ascii_letters + string.digits)):
'Generate random ID.'
return ''.join((secrets.choice(chars) for _ in range(size))) | Generate random ID. | cortex2/lib/WebsocketClient/websocket_client.py | _generate_id | lowenhere/emotiv-cortex2-python-client | 10 | python | def _generate_id(self, size=6, chars=(string.ascii_letters + string.digits)):
return .join((secrets.choice(chars) for _ in range(size))) | def _generate_id(self, size=6, chars=(string.ascii_letters + string.digits)):
return .join((secrets.choice(chars) for _ in range(size)))<|docstring|>Generate random ID.<|endoftext|> |
ec6d9dd76532f846a649a90178957952481d3aae514275f413bea306eb6ff91f | def _check_response(self, send_dict, response):
'Check if response key matches.'
if (not self.check_response):
return True
try:
if (send_dict[self.response_key] == response[self.response_key]):
return True
else:
return False
except:
return False | Check if response key matches. | cortex2/lib/WebsocketClient/websocket_client.py | _check_response | lowenhere/emotiv-cortex2-python-client | 10 | python | def _check_response(self, send_dict, response):
if (not self.check_response):
return True
try:
if (send_dict[self.response_key] == response[self.response_key]):
return True
else:
return False
except:
return False | def _check_response(self, send_dict, response):
if (not self.check_response):
return True
try:
if (send_dict[self.response_key] == response[self.response_key]):
return True
else:
return False
except:
return False<|docstring|>Check if response key matches.<|endoftext|> |
96d690f9fcb4adc50cbe83e5f8d44b97e583d309e367138d7144b57f7619041c | def _parse_response(self, error_flag, error_msg, response):
'Return error message, result, or full result.'
if error_flag:
return error_msg
elif ('result' in response):
return response['result']
else:
return response | Return error message, result, or full result. | cortex2/lib/WebsocketClient/websocket_client.py | _parse_response | lowenhere/emotiv-cortex2-python-client | 10 | python | def _parse_response(self, error_flag, error_msg, response):
if error_flag:
return error_msg
elif ('result' in response):
return response['result']
else:
return response | def _parse_response(self, error_flag, error_msg, response):
if error_flag:
return error_msg
elif ('result' in response):
return response['result']
else:
return response<|docstring|>Return error message, result, or full result.<|endoftext|> |
9a75fa1c49443940f81850b18b326c5f104d8acad1bec8640138962d593b066a | def _create_request(self, method, params={}):
'Create request object.'
if self.check_response:
return json.dumps({'jsonrpc': '2.0', 'method': method, 'params': params, 'id': self._generate_id()})
else:
return json.dumps({'jsonrpc': '2.0', 'method': method, 'params': params}) | Create request object. | cortex2/lib/WebsocketClient/websocket_client.py | _create_request | lowenhere/emotiv-cortex2-python-client | 10 | python | def _create_request(self, method, params={}):
if self.check_response:
return json.dumps({'jsonrpc': '2.0', 'method': method, 'params': params, 'id': self._generate_id()})
else:
return json.dumps({'jsonrpc': '2.0', 'method': method, 'params': params}) | def _create_request(self, method, params={}):
if self.check_response:
return json.dumps({'jsonrpc': '2.0', 'method': method, 'params': params, 'id': self._generate_id()})
else:
return json.dumps({'jsonrpc': '2.0', 'method': method, 'params': params})<|docstring|>Create request object.<|endoftext|> |
80e7be21938f2253a0fd472a7cd1fad94acb6d1a2894b7e58d41f44a830b808c | def __init__(self, g):
'\n Attributes intialised in __init__()\n base_url a string extracted from hydra.template\n parameters set of strings extracted from hydra.template\n variables set of strings extracted from hydra.variables\n defaults dictionary of required parameters and default values\n '
self.path_params = None
self._parse_template(g)
self._parse_parameters(g) | Attributes intialised in __init__()
base_url a string extracted from hydra.template
parameters set of strings extracted from hydra.template
variables set of strings extracted from hydra.variables
defaults dictionary of required parameters and default values | tools/check_webservice/webservice.py | __init__ | sgrellet/EPOS-DCAT-AP | 0 | python | def __init__(self, g):
'\n Attributes intialised in __init__()\n base_url a string extracted from hydra.template\n parameters set of strings extracted from hydra.template\n variables set of strings extracted from hydra.variables\n defaults dictionary of required parameters and default values\n '
self.path_params = None
self._parse_template(g)
self._parse_parameters(g) | def __init__(self, g):
'\n Attributes intialised in __init__()\n base_url a string extracted from hydra.template\n parameters set of strings extracted from hydra.template\n variables set of strings extracted from hydra.variables\n defaults dictionary of required parameters and default values\n '
self.path_params = None
self._parse_template(g)
self._parse_parameters(g)<|docstring|>Attributes intialised in __init__()
base_url a string extracted from hydra.template
parameters set of strings extracted from hydra.template
variables set of strings extracted from hydra.variables
defaults dictionary of required parameters and default values<|endoftext|> |
852cb52507db97377c78cf97a01cdb74d2fff4dd3a6bb8e8f3a0dd3e5d23f572 | def fourier_coefficient(self, paramA, paramB):
'\n Metoda ta tworzy słownik, gdzi kluczem jest wektor sieci odwrotnej, a wartością współczynnik Fouriera.\n :return: Słownik\n '
index0 = (self.vectors_count - 1)
tab = ((paramA - paramB) * self.coefficient1d())
tab[index0] += paramB
assert (tab[index0].imag == 0.0)
return tab | Metoda ta tworzy słownik, gdzi kluczem jest wektor sieci odwrotnej, a wartością współczynnik Fouriera.
:return: Słownik | src/eig_problem/LoadFFT.py | fourier_coefficient | szymag/ZFN | 2 | python | def fourier_coefficient(self, paramA, paramB):
'\n Metoda ta tworzy słownik, gdzi kluczem jest wektor sieci odwrotnej, a wartością współczynnik Fouriera.\n :return: Słownik\n '
index0 = (self.vectors_count - 1)
tab = ((paramA - paramB) * self.coefficient1d())
tab[index0] += paramB
assert (tab[index0].imag == 0.0)
return tab | def fourier_coefficient(self, paramA, paramB):
'\n Metoda ta tworzy słownik, gdzi kluczem jest wektor sieci odwrotnej, a wartością współczynnik Fouriera.\n :return: Słownik\n '
index0 = (self.vectors_count - 1)
tab = ((paramA - paramB) * self.coefficient1d())
tab[index0] += paramB
assert (tab[index0].imag == 0.0)
return tab<|docstring|>Metoda ta tworzy słownik, gdzi kluczem jest wektor sieci odwrotnej, a wartością współczynnik Fouriera.
:return: Słownik<|endoftext|> |
5becb7dc8238364d20a7770cc6c400a26f5fee915020a363440f32d2e2a3cfdc | def aitoff_galactic(RA, DEC, marker_size=3.5):
'\n Input coordinates must be in the unit of degree.\n '
n = len(RA)
c_icrs = SkyCoord(ra=(RA * u.degree), dec=(DEC * u.degree), frame='icrs')
c_galactic = c_icrs.galactic
l = c_galactic.l.rad
for i in range(n):
if (l[i] > math.pi):
l[i] = (- ((2 * math.pi) - l[i]))
b = c_galactic.b.rad
fig = plt.figure('spec_view', figsize=(16, 10))
ax = fig.add_subplot(111, projection='aitoff')
ax.set_title('Galactic projection')
ax.grid(True)
ax.plot(l, b, 'ro', markersize=marker_size) | Input coordinates must be in the unit of degree. | tools/projection.py | aitoff_galactic | yaoyuhan/starpy | 0 | python | def aitoff_galactic(RA, DEC, marker_size=3.5):
'\n \n '
n = len(RA)
c_icrs = SkyCoord(ra=(RA * u.degree), dec=(DEC * u.degree), frame='icrs')
c_galactic = c_icrs.galactic
l = c_galactic.l.rad
for i in range(n):
if (l[i] > math.pi):
l[i] = (- ((2 * math.pi) - l[i]))
b = c_galactic.b.rad
fig = plt.figure('spec_view', figsize=(16, 10))
ax = fig.add_subplot(111, projection='aitoff')
ax.set_title('Galactic projection')
ax.grid(True)
ax.plot(l, b, 'ro', markersize=marker_size) | def aitoff_galactic(RA, DEC, marker_size=3.5):
'\n \n '
n = len(RA)
c_icrs = SkyCoord(ra=(RA * u.degree), dec=(DEC * u.degree), frame='icrs')
c_galactic = c_icrs.galactic
l = c_galactic.l.rad
for i in range(n):
if (l[i] > math.pi):
l[i] = (- ((2 * math.pi) - l[i]))
b = c_galactic.b.rad
fig = plt.figure('spec_view', figsize=(16, 10))
ax = fig.add_subplot(111, projection='aitoff')
ax.set_title('Galactic projection')
ax.grid(True)
ax.plot(l, b, 'ro', markersize=marker_size)<|docstring|>Input coordinates must be in the unit of degree.<|endoftext|> |
87c2ddd2193d54b957ba8be8eec9ec95a650dca033e81e86f6c90584292178d1 | def diag(self):
'Diagonalise the operator\n\n Return eigenvalues and corresponding eigenvectors for this operator.\n\n Returns:\n (ndarray, ndarray) -- Eigenvalues and eigenvector matrix, as\n returned by numpy.linalg.eigh\n '
try:
dd = self._diagdata
if np.all((dd['matrix'] == self._matrix)):
return dd['eigh']
except AttributeError:
pass
eigh = np.linalg.eigh(self._matrix)
self._diagdata = {'matrix': self._matrix.copy(), 'eigh': eigh}
return eigh | Diagonalise the operator
Return eigenvalues and corresponding eigenvectors for this operator.
Returns:
(ndarray, ndarray) -- Eigenvalues and eigenvector matrix, as
returned by numpy.linalg.eigh | muspinsim/spinop.py | diag | muon-spectroscopy-computational-project/muspinsim | 2 | python | def diag(self):
'Diagonalise the operator\n\n Return eigenvalues and corresponding eigenvectors for this operator.\n\n Returns:\n (ndarray, ndarray) -- Eigenvalues and eigenvector matrix, as\n returned by numpy.linalg.eigh\n '
try:
dd = self._diagdata
if np.all((dd['matrix'] == self._matrix)):
return dd['eigh']
except AttributeError:
pass
eigh = np.linalg.eigh(self._matrix)
self._diagdata = {'matrix': self._matrix.copy(), 'eigh': eigh}
return eigh | def diag(self):
'Diagonalise the operator\n\n Return eigenvalues and corresponding eigenvectors for this operator.\n\n Returns:\n (ndarray, ndarray) -- Eigenvalues and eigenvector matrix, as\n returned by numpy.linalg.eigh\n '
try:
dd = self._diagdata
if np.all((dd['matrix'] == self._matrix)):
return dd['eigh']
except AttributeError:
pass
eigh = np.linalg.eigh(self._matrix)
self._diagdata = {'matrix': self._matrix.copy(), 'eigh': eigh}
return eigh<|docstring|>Diagonalise the operator
Return eigenvalues and corresponding eigenvectors for this operator.
Returns:
(ndarray, ndarray) -- Eigenvalues and eigenvector matrix, as
returned by numpy.linalg.eigh<|endoftext|> |
bf3694830d2cf35f93466ebc9d83eb382656c2adf6675b144377bee1295bf227 | def __init__(self, matrix, dim=None, hermtol=1e-06):
"Create a Operator object\n\n Create an object representing a spin operator. These can\n be manipulated by e.g. multiplying them by a scalar or among themselves\n (equivalent to a dot product), or adding and subtracting them.\n\n Arguments:\n matrix {ndarray} -- Matrix describing the operator (must be a\n square 2D array)\n\n Keyword Arguments:\n dim {(int,...)} -- Tuple of the dimensions of the operator. For example,\n (2,2) corresponds to two 1/2 spins and a 4x4 matrix.\n If not specified, it's taken from the size of\n the matrix (default: {None})\n hermtol {float} -- Tolerance used to check for hermitianity of the\n matrix (default: {1e-6})\n\n Raises:\n ValueError -- Any of the passed values are invalid\n "
matrix = (np.array(matrix) + 0j)
if (not (matrix.shape[0] == matrix.shape[1])):
raise ValueError('Matrix passed to Operator must be square')
if (dim is None):
dim = (matrix.shape[0],)
elif (np.prod(dim) != matrix.shape[0]):
raise ValueError('Dimensions are not compatible with matrix')
self._dim = tuple(dim)
self._matrix = matrix
self._htol = hermtol
super(Operator, self).__init__() | Create a Operator object
Create an object representing a spin operator. These can
be manipulated by e.g. multiplying them by a scalar or among themselves
(equivalent to a dot product), or adding and subtracting them.
Arguments:
matrix {ndarray} -- Matrix describing the operator (must be a
square 2D array)
Keyword Arguments:
dim {(int,...)} -- Tuple of the dimensions of the operator. For example,
(2,2) corresponds to two 1/2 spins and a 4x4 matrix.
If not specified, it's taken from the size of
the matrix (default: {None})
hermtol {float} -- Tolerance used to check for hermitianity of the
matrix (default: {1e-6})
Raises:
ValueError -- Any of the passed values are invalid | muspinsim/spinop.py | __init__ | muon-spectroscopy-computational-project/muspinsim | 2 | python | def __init__(self, matrix, dim=None, hermtol=1e-06):
"Create a Operator object\n\n Create an object representing a spin operator. These can\n be manipulated by e.g. multiplying them by a scalar or among themselves\n (equivalent to a dot product), or adding and subtracting them.\n\n Arguments:\n matrix {ndarray} -- Matrix describing the operator (must be a\n square 2D array)\n\n Keyword Arguments:\n dim {(int,...)} -- Tuple of the dimensions of the operator. For example,\n (2,2) corresponds to two 1/2 spins and a 4x4 matrix.\n If not specified, it's taken from the size of\n the matrix (default: {None})\n hermtol {float} -- Tolerance used to check for hermitianity of the\n matrix (default: {1e-6})\n\n Raises:\n ValueError -- Any of the passed values are invalid\n "
matrix = (np.array(matrix) + 0j)
if (not (matrix.shape[0] == matrix.shape[1])):
raise ValueError('Matrix passed to Operator must be square')
if (dim is None):
dim = (matrix.shape[0],)
elif (np.prod(dim) != matrix.shape[0]):
raise ValueError('Dimensions are not compatible with matrix')
self._dim = tuple(dim)
self._matrix = matrix
self._htol = hermtol
super(Operator, self).__init__() | def __init__(self, matrix, dim=None, hermtol=1e-06):
"Create a Operator object\n\n Create an object representing a spin operator. These can\n be manipulated by e.g. multiplying them by a scalar or among themselves\n (equivalent to a dot product), or adding and subtracting them.\n\n Arguments:\n matrix {ndarray} -- Matrix describing the operator (must be a\n square 2D array)\n\n Keyword Arguments:\n dim {(int,...)} -- Tuple of the dimensions of the operator. For example,\n (2,2) corresponds to two 1/2 spins and a 4x4 matrix.\n If not specified, it's taken from the size of\n the matrix (default: {None})\n hermtol {float} -- Tolerance used to check for hermitianity of the\n matrix (default: {1e-6})\n\n Raises:\n ValueError -- Any of the passed values are invalid\n "
matrix = (np.array(matrix) + 0j)
if (not (matrix.shape[0] == matrix.shape[1])):
raise ValueError('Matrix passed to Operator must be square')
if (dim is None):
dim = (matrix.shape[0],)
elif (np.prod(dim) != matrix.shape[0]):
raise ValueError('Dimensions are not compatible with matrix')
self._dim = tuple(dim)
self._matrix = matrix
self._htol = hermtol
super(Operator, self).__init__()<|docstring|>Create a Operator object
Create an object representing a spin operator. These can
be manipulated by e.g. multiplying them by a scalar or among themselves
(equivalent to a dot product), or adding and subtracting them.
Arguments:
matrix {ndarray} -- Matrix describing the operator (must be a
square 2D array)
Keyword Arguments:
dim {(int,...)} -- Tuple of the dimensions of the operator. For example,
(2,2) corresponds to two 1/2 spins and a 4x4 matrix.
If not specified, it's taken from the size of
the matrix (default: {None})
hermtol {float} -- Tolerance used to check for hermitianity of the
matrix (default: {1e-6})
Raises:
ValueError -- Any of the passed values are invalid<|endoftext|> |
f09105b8559fa210e3ee488b2094c1f1c580b749c9d534b2810c8ce432e9ca0b | def dagger(self):
'Return the transpose conjugate of this Operator\n\n Return the transpose conjugate of this Operator\n\n Returns:\n Operator -- Transpose conjugate of this operator\n '
MyClass = self.__class__
ans = MyClass.__new__(MyClass)
ans._dim = tuple(self._dim)
ans._matrix = self.matrix.conj().T
return ans | Return the transpose conjugate of this Operator
Return the transpose conjugate of this Operator
Returns:
Operator -- Transpose conjugate of this operator | muspinsim/spinop.py | dagger | muon-spectroscopy-computational-project/muspinsim | 2 | python | def dagger(self):
'Return the transpose conjugate of this Operator\n\n Return the transpose conjugate of this Operator\n\n Returns:\n Operator -- Transpose conjugate of this operator\n '
MyClass = self.__class__
ans = MyClass.__new__(MyClass)
ans._dim = tuple(self._dim)
ans._matrix = self.matrix.conj().T
return ans | def dagger(self):
'Return the transpose conjugate of this Operator\n\n Return the transpose conjugate of this Operator\n\n Returns:\n Operator -- Transpose conjugate of this operator\n '
MyClass = self.__class__
ans = MyClass.__new__(MyClass)
ans._dim = tuple(self._dim)
ans._matrix = self.matrix.conj().T
return ans<|docstring|>Return the transpose conjugate of this Operator
Return the transpose conjugate of this Operator
Returns:
Operator -- Transpose conjugate of this operator<|endoftext|> |
efc28496fa98fcbc92083a024a7905c3971af8ad5e49f72c77f93ffa8fd695e8 | def kron(self, x):
'Tensor product between this and another Operator\n\n Performs a tensor product between this and another Operator,\n raising the overall rank of the tensor they represent.\n\n Arguments:\n x {Operator} -- Other operator\n\n Returns:\n Operator -- Result\n\n Raises:\n ValueError -- Thrown if x is not the right type of object\n '
if (not isinstance(x, Operator)):
raise ValueError('Can only perform Kronecker product with another Operator')
ans = self.__class__.__new__(self.__class__)
ans._dim = (self._dim + x._dim)
ans._matrix = np.kron(self._matrix, x._matrix)
return ans | Tensor product between this and another Operator
Performs a tensor product between this and another Operator,
raising the overall rank of the tensor they represent.
Arguments:
x {Operator} -- Other operator
Returns:
Operator -- Result
Raises:
ValueError -- Thrown if x is not the right type of object | muspinsim/spinop.py | kron | muon-spectroscopy-computational-project/muspinsim | 2 | python | def kron(self, x):
'Tensor product between this and another Operator\n\n Performs a tensor product between this and another Operator,\n raising the overall rank of the tensor they represent.\n\n Arguments:\n x {Operator} -- Other operator\n\n Returns:\n Operator -- Result\n\n Raises:\n ValueError -- Thrown if x is not the right type of object\n '
if (not isinstance(x, Operator)):
raise ValueError('Can only perform Kronecker product with another Operator')
ans = self.__class__.__new__(self.__class__)
ans._dim = (self._dim + x._dim)
ans._matrix = np.kron(self._matrix, x._matrix)
return ans | def kron(self, x):
'Tensor product between this and another Operator\n\n Performs a tensor product between this and another Operator,\n raising the overall rank of the tensor they represent.\n\n Arguments:\n x {Operator} -- Other operator\n\n Returns:\n Operator -- Result\n\n Raises:\n ValueError -- Thrown if x is not the right type of object\n '
if (not isinstance(x, Operator)):
raise ValueError('Can only perform Kronecker product with another Operator')
ans = self.__class__.__new__(self.__class__)
ans._dim = (self._dim + x._dim)
ans._matrix = np.kron(self._matrix, x._matrix)
return ans<|docstring|>Tensor product between this and another Operator
Performs a tensor product between this and another Operator,
raising the overall rank of the tensor they represent.
Arguments:
x {Operator} -- Other operator
Returns:
Operator -- Result
Raises:
ValueError -- Thrown if x is not the right type of object<|endoftext|> |
9957656b8882d5c02f884b3cf3523fe0c615866582e30e85de10b34242d40655 | def hilbert_schmidt(self, x):
'Hilbert-Schmidt product between this and another Operator\n\n\n Performs a Hilbert-Schmidt product between this and another Operator,\n that acts as an inner product.\n\n Arguments:\n x {Operator} -- Other operator\n\n Returns:\n number -- Result\n\n Raises:\n ValueError -- Thrown if x is not the right type of object\n '
if (not isinstance(x, Operator)):
raise ValueError('Can only perform Hilbert-Schmidt product with another Operator')
if (not (x.dimension == self.dimension)):
raise ValueError('Operators must have the same dimension to perform Hilbert-Schmidt product')
A = self.matrix
B = x.matrix
return np.trace(np.dot(A.conj().T, B)) | Hilbert-Schmidt product between this and another Operator
Performs a Hilbert-Schmidt product between this and another Operator,
that acts as an inner product.
Arguments:
x {Operator} -- Other operator
Returns:
number -- Result
Raises:
ValueError -- Thrown if x is not the right type of object | muspinsim/spinop.py | hilbert_schmidt | muon-spectroscopy-computational-project/muspinsim | 2 | python | def hilbert_schmidt(self, x):
'Hilbert-Schmidt product between this and another Operator\n\n\n Performs a Hilbert-Schmidt product between this and another Operator,\n that acts as an inner product.\n\n Arguments:\n x {Operator} -- Other operator\n\n Returns:\n number -- Result\n\n Raises:\n ValueError -- Thrown if x is not the right type of object\n '
if (not isinstance(x, Operator)):
raise ValueError('Can only perform Hilbert-Schmidt product with another Operator')
if (not (x.dimension == self.dimension)):
raise ValueError('Operators must have the same dimension to perform Hilbert-Schmidt product')
A = self.matrix
B = x.matrix
return np.trace(np.dot(A.conj().T, B)) | def hilbert_schmidt(self, x):
'Hilbert-Schmidt product between this and another Operator\n\n\n Performs a Hilbert-Schmidt product between this and another Operator,\n that acts as an inner product.\n\n Arguments:\n x {Operator} -- Other operator\n\n Returns:\n number -- Result\n\n Raises:\n ValueError -- Thrown if x is not the right type of object\n '
if (not isinstance(x, Operator)):
raise ValueError('Can only perform Hilbert-Schmidt product with another Operator')
if (not (x.dimension == self.dimension)):
raise ValueError('Operators must have the same dimension to perform Hilbert-Schmidt product')
A = self.matrix
B = x.matrix
return np.trace(np.dot(A.conj().T, B))<|docstring|>Hilbert-Schmidt product between this and another Operator
Performs a Hilbert-Schmidt product between this and another Operator,
that acts as an inner product.
Arguments:
x {Operator} -- Other operator
Returns:
number -- Result
Raises:
ValueError -- Thrown if x is not the right type of object<|endoftext|> |
975aaaa05133e88793e45f00c399cd1e26ea2b85317c13fdfd72b4510c48786d | def basis_change(self, basis):
'Return a version of this Operator with different basis\n\n Transform this Operator to use a different basis. The basis\n must be a matrix of orthogonal vectors. Passing as basis\n the eigenvectors of the operator will diagonalise it.\n\n Arguments:\n basis {ndarray} -- Basis to transform the operator to.\n\n Returns:\n Operator -- Basis transformed version of this operator\n '
ans = self.clone()
ans._matrix = np.linalg.multi_dot([basis.T.conj(), ans._matrix, basis])
return ans | Return a version of this Operator with different basis
Transform this Operator to use a different basis. The basis
must be a matrix of orthogonal vectors. Passing as basis
the eigenvectors of the operator will diagonalise it.
Arguments:
basis {ndarray} -- Basis to transform the operator to.
Returns:
Operator -- Basis transformed version of this operator | muspinsim/spinop.py | basis_change | muon-spectroscopy-computational-project/muspinsim | 2 | python | def basis_change(self, basis):
'Return a version of this Operator with different basis\n\n Transform this Operator to use a different basis. The basis\n must be a matrix of orthogonal vectors. Passing as basis\n the eigenvectors of the operator will diagonalise it.\n\n Arguments:\n basis {ndarray} -- Basis to transform the operator to.\n\n Returns:\n Operator -- Basis transformed version of this operator\n '
ans = self.clone()
ans._matrix = np.linalg.multi_dot([basis.T.conj(), ans._matrix, basis])
return ans | def basis_change(self, basis):
'Return a version of this Operator with different basis\n\n Transform this Operator to use a different basis. The basis\n must be a matrix of orthogonal vectors. Passing as basis\n the eigenvectors of the operator will diagonalise it.\n\n Arguments:\n basis {ndarray} -- Basis to transform the operator to.\n\n Returns:\n Operator -- Basis transformed version of this operator\n '
ans = self.clone()
ans._matrix = np.linalg.multi_dot([basis.T.conj(), ans._matrix, basis])
return ans<|docstring|>Return a version of this Operator with different basis
Transform this Operator to use a different basis. The basis
must be a matrix of orthogonal vectors. Passing as basis
the eigenvectors of the operator will diagonalise it.
Arguments:
basis {ndarray} -- Basis to transform the operator to.
Returns:
Operator -- Basis transformed version of this operator<|endoftext|> |
06b5b74cb393425d206b3f4aad259d52ff2f2f91a541d266582e217da0f207bd | @classmethod
def from_axes(self, Is=0.5, axes='x'):
"Construct a SpinOperator from spins and axes\n\n Construct a SpinOperator from a list of spin values and directions. For\n example, Is=[0.5, 0.5] axes=['x', 'z'] will create a SxIz operator between\n two spin 1/2 particles.\n\n Keyword Arguments:\n Is {[number]} -- List of spins (must be half-integers). Can pass a\n number if it's only one value (default: {0.5})\n axes {[str]} -- List of axes, can pass a single character if it's\n only one value. Each value can be x, y, z, +, -,\n or 0 (for the identity operator) (default: {'x'})\n\n Returns:\n SpinOperator -- Operator built according to specifications\n\n Raises:\n ValueError -- Any of the values passed is invalid\n "
if isinstance(Is, Number):
Is = [Is]
if ((len(Is) != len(axes)) or (len(Is) == 0)):
raise ValueError('Arrays of moments and axes must have same length > 0')
dim = tuple((int(((2 * I) + 1)) for I in Is))
matrices = []
for (I, axis) in zip(Is, axes):
if ((I % 0.5) or (I < 0.5)):
raise ValueError('{0} is not a valid spin value'.format(I))
if (not (axis in 'xyz+-0')):
raise ValueError('{0} is not a valid spin axis'.format(axis))
mvals = _mvals(I)
o = {'x': _Sx, 'y': _Sy, 'z': _Sz, '+': _Sp, '-': _Sm, '0': _S0}[axis](mvals)
matrices.append(o)
M = matrices[0]
for m in matrices[1:]:
M = np.kron(M, m)
return self(M, dim=dim) | Construct a SpinOperator from spins and axes
Construct a SpinOperator from a list of spin values and directions. For
example, Is=[0.5, 0.5] axes=['x', 'z'] will create a SxIz operator between
two spin 1/2 particles.
Keyword Arguments:
Is {[number]} -- List of spins (must be half-integers). Can pass a
number if it's only one value (default: {0.5})
axes {[str]} -- List of axes, can pass a single character if it's
only one value. Each value can be x, y, z, +, -,
or 0 (for the identity operator) (default: {'x'})
Returns:
SpinOperator -- Operator built according to specifications
Raises:
ValueError -- Any of the values passed is invalid | muspinsim/spinop.py | from_axes | muon-spectroscopy-computational-project/muspinsim | 2 | python | @classmethod
def from_axes(self, Is=0.5, axes='x'):
"Construct a SpinOperator from spins and axes\n\n Construct a SpinOperator from a list of spin values and directions. For\n example, Is=[0.5, 0.5] axes=['x', 'z'] will create a SxIz operator between\n two spin 1/2 particles.\n\n Keyword Arguments:\n Is {[number]} -- List of spins (must be half-integers). Can pass a\n number if it's only one value (default: {0.5})\n axes {[str]} -- List of axes, can pass a single character if it's\n only one value. Each value can be x, y, z, +, -,\n or 0 (for the identity operator) (default: {'x'})\n\n Returns:\n SpinOperator -- Operator built according to specifications\n\n Raises:\n ValueError -- Any of the values passed is invalid\n "
if isinstance(Is, Number):
Is = [Is]
if ((len(Is) != len(axes)) or (len(Is) == 0)):
raise ValueError('Arrays of moments and axes must have same length > 0')
dim = tuple((int(((2 * I) + 1)) for I in Is))
matrices = []
for (I, axis) in zip(Is, axes):
if ((I % 0.5) or (I < 0.5)):
raise ValueError('{0} is not a valid spin value'.format(I))
if (not (axis in 'xyz+-0')):
raise ValueError('{0} is not a valid spin axis'.format(axis))
mvals = _mvals(I)
o = {'x': _Sx, 'y': _Sy, 'z': _Sz, '+': _Sp, '-': _Sm, '0': _S0}[axis](mvals)
matrices.append(o)
M = matrices[0]
for m in matrices[1:]:
M = np.kron(M, m)
return self(M, dim=dim) | @classmethod
def from_axes(self, Is=0.5, axes='x'):
"Construct a SpinOperator from spins and axes\n\n Construct a SpinOperator from a list of spin values and directions. For\n example, Is=[0.5, 0.5] axes=['x', 'z'] will create a SxIz operator between\n two spin 1/2 particles.\n\n Keyword Arguments:\n Is {[number]} -- List of spins (must be half-integers). Can pass a\n number if it's only one value (default: {0.5})\n axes {[str]} -- List of axes, can pass a single character if it's\n only one value. Each value can be x, y, z, +, -,\n or 0 (for the identity operator) (default: {'x'})\n\n Returns:\n SpinOperator -- Operator built according to specifications\n\n Raises:\n ValueError -- Any of the values passed is invalid\n "
if isinstance(Is, Number):
Is = [Is]
if ((len(Is) != len(axes)) or (len(Is) == 0)):
raise ValueError('Arrays of moments and axes must have same length > 0')
dim = tuple((int(((2 * I) + 1)) for I in Is))
matrices = []
for (I, axis) in zip(Is, axes):
if ((I % 0.5) or (I < 0.5)):
raise ValueError('{0} is not a valid spin value'.format(I))
if (not (axis in 'xyz+-0')):
raise ValueError('{0} is not a valid spin axis'.format(axis))
mvals = _mvals(I)
o = {'x': _Sx, 'y': _Sy, 'z': _Sz, '+': _Sp, '-': _Sm, '0': _S0}[axis](mvals)
matrices.append(o)
M = matrices[0]
for m in matrices[1:]:
M = np.kron(M, m)
return self(M, dim=dim)<|docstring|>Construct a SpinOperator from spins and axes
Construct a SpinOperator from a list of spin values and directions. For
example, Is=[0.5, 0.5] axes=['x', 'z'] will create a SxIz operator between
two spin 1/2 particles.
Keyword Arguments:
Is {[number]} -- List of spins (must be half-integers). Can pass a
number if it's only one value (default: {0.5})
axes {[str]} -- List of axes, can pass a single character if it's
only one value. Each value can be x, y, z, +, -,
or 0 (for the identity operator) (default: {'x'})
Returns:
SpinOperator -- Operator built according to specifications
Raises:
ValueError -- Any of the values passed is invalid<|endoftext|> |
8db31ad294b86538af96a0e204fe4e73804aca0f64c0923415abaf20be011188 | def __init__(self, matrix, dim=None):
"Create a DensityOperator object\n\n Create an object representing a density operator. These can\n be manipulated by e.g. multiplying them by a scalar or among themselves\n (equivalent to a dot product), or adding and subtracting them.\n\n Arguments:\n matrix {ndarray} -- Matrix describing the operator (must be a\n square hermitian 2D array and have non-zero\n trace; will be normalised to have trace 1)\n\n Keyword Arguments:\n dim {(int,...)} -- Tuple of the dimensions of the operator. For example,\n (2,2) corresponds to two 1/2 spins and a 4x4 matrix.\n If not specified, it's taken from the size of\n the matrix (default: {None})\n\n Raises:\n ValueError -- Any of the passed values are invalid\n "
super(DensityOperator, self).__init__(matrix, dim)
tr = np.trace(self._matrix)
if (tr == 0):
raise ValueError('Can not define a DensityOperator with zero trace')
else:
self.normalize()
if (not self.is_hermitian):
raise ValueError('DensityOperator must be hermitian!') | Create a DensityOperator object
Create an object representing a density operator. These can
be manipulated by e.g. multiplying them by a scalar or among themselves
(equivalent to a dot product), or adding and subtracting them.
Arguments:
matrix {ndarray} -- Matrix describing the operator (must be a
square hermitian 2D array and have non-zero
trace; will be normalised to have trace 1)
Keyword Arguments:
dim {(int,...)} -- Tuple of the dimensions of the operator. For example,
(2,2) corresponds to two 1/2 spins and a 4x4 matrix.
If not specified, it's taken from the size of
the matrix (default: {None})
Raises:
ValueError -- Any of the passed values are invalid | muspinsim/spinop.py | __init__ | muon-spectroscopy-computational-project/muspinsim | 2 | python | def __init__(self, matrix, dim=None):
"Create a DensityOperator object\n\n Create an object representing a density operator. These can\n be manipulated by e.g. multiplying them by a scalar or among themselves\n (equivalent to a dot product), or adding and subtracting them.\n\n Arguments:\n matrix {ndarray} -- Matrix describing the operator (must be a\n square hermitian 2D array and have non-zero\n trace; will be normalised to have trace 1)\n\n Keyword Arguments:\n dim {(int,...)} -- Tuple of the dimensions of the operator. For example,\n (2,2) corresponds to two 1/2 spins and a 4x4 matrix.\n If not specified, it's taken from the size of\n the matrix (default: {None})\n\n Raises:\n ValueError -- Any of the passed values are invalid\n "
super(DensityOperator, self).__init__(matrix, dim)
tr = np.trace(self._matrix)
if (tr == 0):
raise ValueError('Can not define a DensityOperator with zero trace')
else:
self.normalize()
if (not self.is_hermitian):
raise ValueError('DensityOperator must be hermitian!') | def __init__(self, matrix, dim=None):
"Create a DensityOperator object\n\n Create an object representing a density operator. These can\n be manipulated by e.g. multiplying them by a scalar or among themselves\n (equivalent to a dot product), or adding and subtracting them.\n\n Arguments:\n matrix {ndarray} -- Matrix describing the operator (must be a\n square hermitian 2D array and have non-zero\n trace; will be normalised to have trace 1)\n\n Keyword Arguments:\n dim {(int,...)} -- Tuple of the dimensions of the operator. For example,\n (2,2) corresponds to two 1/2 spins and a 4x4 matrix.\n If not specified, it's taken from the size of\n the matrix (default: {None})\n\n Raises:\n ValueError -- Any of the passed values are invalid\n "
super(DensityOperator, self).__init__(matrix, dim)
tr = np.trace(self._matrix)
if (tr == 0):
raise ValueError('Can not define a DensityOperator with zero trace')
else:
self.normalize()
if (not self.is_hermitian):
raise ValueError('DensityOperator must be hermitian!')<|docstring|>Create a DensityOperator object
Create an object representing a density operator. These can
be manipulated by e.g. multiplying them by a scalar or among themselves
(equivalent to a dot product), or adding and subtracting them.
Arguments:
matrix {ndarray} -- Matrix describing the operator (must be a
square hermitian 2D array and have non-zero
trace; will be normalised to have trace 1)
Keyword Arguments:
dim {(int,...)} -- Tuple of the dimensions of the operator. For example,
(2,2) corresponds to two 1/2 spins and a 4x4 matrix.
If not specified, it's taken from the size of
the matrix (default: {None})
Raises:
ValueError -- Any of the passed values are invalid<|endoftext|> |
db43ae03e27c0b71057197e4d2318c003ac7627b9f230b5dcb0f59600c2cf1f8 | @classmethod
def from_vectors(self, Is=0.5, vectors=[0, 0, 1], gammas=0):
"Construct a density matrix state from real space vectors\n\n Construct a density matrix state by specifying a number of spins and\n real space directions. The state is initialised as the tensor product\n of independent spin states each pointing in the specified direction.\n A parameter gamma can be used to include decoherence effects and thus\n dampen or zero out all off-diagonal elements.\n\n Keyword Arguments:\n Is {[number]} -- List of spins (must be half-integers). Can pass a\n number if it's only one value (default: {0.5})\n vectors {[ndarray]} -- List of vectors. Can pass a single 3D vector\n if it's only one value (default: {[0, 0, 1]})\n gammas {[number]} -- List of gamma factors. Can pass a single number\n if it's only one value. All off-diagonal\n elements for each corresponding density matrix\n will be multiplied by 1-gamma. (default: {0})\n\n Returns:\n DensityOperator -- The composite density operator\n\n Raises:\n ValueError -- Any of the passed values are invalid\n "
if isinstance(Is, Number):
Is = [Is]
if (len(np.array(vectors).shape) == 1):
vectors = [vectors]
if isinstance(gammas, Number):
gammas = [gammas]
if ((len(Is) != len(vectors)) or (len(Is) != len(gammas)) or (len(Is) == 0)):
raise ValueError('Arrays of moments, axes and gammas must have same length > 0')
dim = tuple((int(((2 * I) + 1)) for I in Is))
matrices = []
for (I, vec, gamma) in zip(Is, vectors, gammas):
if ((I % 0.5) or (I < 0.5)):
raise ValueError('{0} is not a valid spin value'.format(I))
if (not (len(vec) == 3)):
raise ValueError('{0} is not a valid 3D vector'.format(vec))
if ((gamma < 0) or (gamma > 1)):
raise ValueError('{0} is not a valid gamma value'.format(gamma))
mvals = _mvals(I)
S = [_Sx(mvals), _Sy(mvals), _Sz(mvals)]
o = sum([(S[i] * vec[i]) for i in range(3)])
(evals, evecs) = np.linalg.eigh(o)
psi = evecs[(:, np.argmax(evals))]
m = (psi[(:, None)] * psi[(None, :)].conj())
m *= (((1 - gamma) * np.ones(m.shape)) + (gamma * np.eye(m.shape[0])))
matrices.append(m)
M = matrices[0]
for m in matrices[1:]:
M = np.kron(M, m)
return self(M, dim=dim) | Construct a density matrix state from real space vectors
Construct a density matrix state by specifying a number of spins and
real space directions. The state is initialised as the tensor product
of independent spin states each pointing in the specified direction.
A parameter gamma can be used to include decoherence effects and thus
dampen or zero out all off-diagonal elements.
Keyword Arguments:
Is {[number]} -- List of spins (must be half-integers). Can pass a
number if it's only one value (default: {0.5})
vectors {[ndarray]} -- List of vectors. Can pass a single 3D vector
if it's only one value (default: {[0, 0, 1]})
gammas {[number]} -- List of gamma factors. Can pass a single number
if it's only one value. All off-diagonal
elements for each corresponding density matrix
will be multiplied by 1-gamma. (default: {0})
Returns:
DensityOperator -- The composite density operator
Raises:
ValueError -- Any of the passed values are invalid | muspinsim/spinop.py | from_vectors | muon-spectroscopy-computational-project/muspinsim | 2 | python | @classmethod
def from_vectors(self, Is=0.5, vectors=[0, 0, 1], gammas=0):
"Construct a density matrix state from real space vectors\n\n Construct a density matrix state by specifying a number of spins and\n real space directions. The state is initialised as the tensor product\n of independent spin states each pointing in the specified direction.\n A parameter gamma can be used to include decoherence effects and thus\n dampen or zero out all off-diagonal elements.\n\n Keyword Arguments:\n Is {[number]} -- List of spins (must be half-integers). Can pass a\n number if it's only one value (default: {0.5})\n vectors {[ndarray]} -- List of vectors. Can pass a single 3D vector\n if it's only one value (default: {[0, 0, 1]})\n gammas {[number]} -- List of gamma factors. Can pass a single number\n if it's only one value. All off-diagonal\n elements for each corresponding density matrix\n will be multiplied by 1-gamma. (default: {0})\n\n Returns:\n DensityOperator -- The composite density operator\n\n Raises:\n ValueError -- Any of the passed values are invalid\n "
if isinstance(Is, Number):
Is = [Is]
if (len(np.array(vectors).shape) == 1):
vectors = [vectors]
if isinstance(gammas, Number):
gammas = [gammas]
if ((len(Is) != len(vectors)) or (len(Is) != len(gammas)) or (len(Is) == 0)):
raise ValueError('Arrays of moments, axes and gammas must have same length > 0')
dim = tuple((int(((2 * I) + 1)) for I in Is))
matrices = []
for (I, vec, gamma) in zip(Is, vectors, gammas):
if ((I % 0.5) or (I < 0.5)):
raise ValueError('{0} is not a valid spin value'.format(I))
if (not (len(vec) == 3)):
raise ValueError('{0} is not a valid 3D vector'.format(vec))
if ((gamma < 0) or (gamma > 1)):
raise ValueError('{0} is not a valid gamma value'.format(gamma))
mvals = _mvals(I)
S = [_Sx(mvals), _Sy(mvals), _Sz(mvals)]
o = sum([(S[i] * vec[i]) for i in range(3)])
(evals, evecs) = np.linalg.eigh(o)
psi = evecs[(:, np.argmax(evals))]
m = (psi[(:, None)] * psi[(None, :)].conj())
m *= (((1 - gamma) * np.ones(m.shape)) + (gamma * np.eye(m.shape[0])))
matrices.append(m)
M = matrices[0]
for m in matrices[1:]:
M = np.kron(M, m)
return self(M, dim=dim) | @classmethod
def from_vectors(self, Is=0.5, vectors=[0, 0, 1], gammas=0):
"Construct a density matrix state from real space vectors\n\n Construct a density matrix state by specifying a number of spins and\n real space directions. The state is initialised as the tensor product\n of independent spin states each pointing in the specified direction.\n A parameter gamma can be used to include decoherence effects and thus\n dampen or zero out all off-diagonal elements.\n\n Keyword Arguments:\n Is {[number]} -- List of spins (must be half-integers). Can pass a\n number if it's only one value (default: {0.5})\n vectors {[ndarray]} -- List of vectors. Can pass a single 3D vector\n if it's only one value (default: {[0, 0, 1]})\n gammas {[number]} -- List of gamma factors. Can pass a single number\n if it's only one value. All off-diagonal\n elements for each corresponding density matrix\n will be multiplied by 1-gamma. (default: {0})\n\n Returns:\n DensityOperator -- The composite density operator\n\n Raises:\n ValueError -- Any of the passed values are invalid\n "
if isinstance(Is, Number):
Is = [Is]
if (len(np.array(vectors).shape) == 1):
vectors = [vectors]
if isinstance(gammas, Number):
gammas = [gammas]
if ((len(Is) != len(vectors)) or (len(Is) != len(gammas)) or (len(Is) == 0)):
raise ValueError('Arrays of moments, axes and gammas must have same length > 0')
dim = tuple((int(((2 * I) + 1)) for I in Is))
matrices = []
for (I, vec, gamma) in zip(Is, vectors, gammas):
if ((I % 0.5) or (I < 0.5)):
raise ValueError('{0} is not a valid spin value'.format(I))
if (not (len(vec) == 3)):
raise ValueError('{0} is not a valid 3D vector'.format(vec))
if ((gamma < 0) or (gamma > 1)):
raise ValueError('{0} is not a valid gamma value'.format(gamma))
mvals = _mvals(I)
S = [_Sx(mvals), _Sy(mvals), _Sz(mvals)]
o = sum([(S[i] * vec[i]) for i in range(3)])
(evals, evecs) = np.linalg.eigh(o)
psi = evecs[(:, np.argmax(evals))]
m = (psi[(:, None)] * psi[(None, :)].conj())
m *= (((1 - gamma) * np.ones(m.shape)) + (gamma * np.eye(m.shape[0])))
matrices.append(m)
M = matrices[0]
for m in matrices[1:]:
M = np.kron(M, m)
return self(M, dim=dim)<|docstring|>Construct a density matrix state from real space vectors
Construct a density matrix state by specifying a number of spins and
real space directions. The state is initialised as the tensor product
of independent spin states each pointing in the specified direction.
A parameter gamma can be used to include decoherence effects and thus
dampen or zero out all off-diagonal elements.
Keyword Arguments:
Is {[number]} -- List of spins (must be half-integers). Can pass a
number if it's only one value (default: {0.5})
vectors {[ndarray]} -- List of vectors. Can pass a single 3D vector
if it's only one value (default: {[0, 0, 1]})
gammas {[number]} -- List of gamma factors. Can pass a single number
if it's only one value. All off-diagonal
elements for each corresponding density matrix
will be multiplied by 1-gamma. (default: {0})
Returns:
DensityOperator -- The composite density operator
Raises:
ValueError -- Any of the passed values are invalid<|endoftext|> |
80b0c1bf304bbe975aca5772b0488c72fdd958e6a0bab6e465ce2db9938ef976 | def normalize(self):
'Normalize this DensityOperator to have trace equal to one.'
self._matrix /= self.trace | Normalize this DensityOperator to have trace equal to one. | muspinsim/spinop.py | normalize | muon-spectroscopy-computational-project/muspinsim | 2 | python | def normalize(self):
self._matrix /= self.trace | def normalize(self):
self._matrix /= self.trace<|docstring|>Normalize this DensityOperator to have trace equal to one.<|endoftext|> |
cbe1a05ac9994d75199f0061bcaf8fbca134739f1fd816cd4c38d353536c8160 | def partial_trace(self, tracedim=[]):
'Perform a partial trace operation\n\n Perform a partial trace over the specified dimensions and return the\n resulting DensityOperator.\n\n Keyword Arguments:\n tracedim {[int]} -- Indices of dimensions to perform the partial\n trace over (default: {[]})\n\n Returns:\n DensityOperator -- Operator with partial trace\n '
dim = list(self._dim)
tdim = list(sorted(tracedim))
m = self._matrix.reshape((dim + dim))
while (len(tdim) > 0):
td = tdim.pop((- 1))
m = np.trace(m, axis1=td, axis2=(td + len(dim)))
dim.pop(td)
return DensityOperator(m, dim) | Perform a partial trace operation
Perform a partial trace over the specified dimensions and return the
resulting DensityOperator.
Keyword Arguments:
tracedim {[int]} -- Indices of dimensions to perform the partial
trace over (default: {[]})
Returns:
DensityOperator -- Operator with partial trace | muspinsim/spinop.py | partial_trace | muon-spectroscopy-computational-project/muspinsim | 2 | python | def partial_trace(self, tracedim=[]):
'Perform a partial trace operation\n\n Perform a partial trace over the specified dimensions and return the\n resulting DensityOperator.\n\n Keyword Arguments:\n tracedim {[int]} -- Indices of dimensions to perform the partial\n trace over (default: {[]})\n\n Returns:\n DensityOperator -- Operator with partial trace\n '
dim = list(self._dim)
tdim = list(sorted(tracedim))
m = self._matrix.reshape((dim + dim))
while (len(tdim) > 0):
td = tdim.pop((- 1))
m = np.trace(m, axis1=td, axis2=(td + len(dim)))
dim.pop(td)
return DensityOperator(m, dim) | def partial_trace(self, tracedim=[]):
'Perform a partial trace operation\n\n Perform a partial trace over the specified dimensions and return the\n resulting DensityOperator.\n\n Keyword Arguments:\n tracedim {[int]} -- Indices of dimensions to perform the partial\n trace over (default: {[]})\n\n Returns:\n DensityOperator -- Operator with partial trace\n '
dim = list(self._dim)
tdim = list(sorted(tracedim))
m = self._matrix.reshape((dim + dim))
while (len(tdim) > 0):
td = tdim.pop((- 1))
m = np.trace(m, axis1=td, axis2=(td + len(dim)))
dim.pop(td)
return DensityOperator(m, dim)<|docstring|>Perform a partial trace operation
Perform a partial trace over the specified dimensions and return the
resulting DensityOperator.
Keyword Arguments:
tracedim {[int]} -- Indices of dimensions to perform the partial
trace over (default: {[]})
Returns:
DensityOperator -- Operator with partial trace<|endoftext|> |
1e3eed146419593d1fb8ffffc91ebec97c8f3edcb16b3d93d347aec147168f09 | def expectation(self, operator):
"Compute expectation value of one operator\n\n Compute expectation value of an operator over the state defined by\n this DensityOperator.\n\n Arguments:\n operator {SpinOperator} -- Operator to compute the expectation\n value of\n\n Returns:\n number -- Expectation value\n\n Raises:\n TypeError -- The argument isn't a SpinOperator\n ValueError -- The operator isn't compatible with this one\n "
if (not isinstance(operator, SpinOperator)):
raise TypeError('Argument must be a SpinOperator')
if (not (operator.dimension == self.dimension)):
raise ValueError('SpinOperator and DensityOperator do not have compatible dimensions')
return np.sum((operator.matrix * self.matrix.T)) | Compute expectation value of one operator
Compute expectation value of an operator over the state defined by
this DensityOperator.
Arguments:
operator {SpinOperator} -- Operator to compute the expectation
value of
Returns:
number -- Expectation value
Raises:
TypeError -- The argument isn't a SpinOperator
ValueError -- The operator isn't compatible with this one | muspinsim/spinop.py | expectation | muon-spectroscopy-computational-project/muspinsim | 2 | python | def expectation(self, operator):
"Compute expectation value of one operator\n\n Compute expectation value of an operator over the state defined by\n this DensityOperator.\n\n Arguments:\n operator {SpinOperator} -- Operator to compute the expectation\n value of\n\n Returns:\n number -- Expectation value\n\n Raises:\n TypeError -- The argument isn't a SpinOperator\n ValueError -- The operator isn't compatible with this one\n "
if (not isinstance(operator, SpinOperator)):
raise TypeError('Argument must be a SpinOperator')
if (not (operator.dimension == self.dimension)):
raise ValueError('SpinOperator and DensityOperator do not have compatible dimensions')
return np.sum((operator.matrix * self.matrix.T)) | def expectation(self, operator):
"Compute expectation value of one operator\n\n Compute expectation value of an operator over the state defined by\n this DensityOperator.\n\n Arguments:\n operator {SpinOperator} -- Operator to compute the expectation\n value of\n\n Returns:\n number -- Expectation value\n\n Raises:\n TypeError -- The argument isn't a SpinOperator\n ValueError -- The operator isn't compatible with this one\n "
if (not isinstance(operator, SpinOperator)):
raise TypeError('Argument must be a SpinOperator')
if (not (operator.dimension == self.dimension)):
raise ValueError('SpinOperator and DensityOperator do not have compatible dimensions')
return np.sum((operator.matrix * self.matrix.T))<|docstring|>Compute expectation value of one operator
Compute expectation value of an operator over the state defined by
this DensityOperator.
Arguments:
operator {SpinOperator} -- Operator to compute the expectation
value of
Returns:
number -- Expectation value
Raises:
TypeError -- The argument isn't a SpinOperator
ValueError -- The operator isn't compatible with this one<|endoftext|> |
1a98bd73927181d5c77b31898208f3d99e5f208009cf54b354ecbcf12d8347cb | @classmethod
def left_multiplier(self, operator):
'Create a SuperOperator that performs a left multiplication\n\n Create a superoperator L from an operator O such that\n\n L*rho = O*rho\n\n Arguments:\n operator {Operator} -- Operator O\n\n Returns:\n SuperOperator -- SuperOperator L\n '
m = operator.matrix
d = operator.dimension
M = np.kron(m, np.eye(m.shape[0]))
return self(M, (d + d)) | Create a SuperOperator that performs a left multiplication
Create a superoperator L from an operator O such that
L*rho = O*rho
Arguments:
operator {Operator} -- Operator O
Returns:
SuperOperator -- SuperOperator L | muspinsim/spinop.py | left_multiplier | muon-spectroscopy-computational-project/muspinsim | 2 | python | @classmethod
def left_multiplier(self, operator):
'Create a SuperOperator that performs a left multiplication\n\n Create a superoperator L from an operator O such that\n\n L*rho = O*rho\n\n Arguments:\n operator {Operator} -- Operator O\n\n Returns:\n SuperOperator -- SuperOperator L\n '
m = operator.matrix
d = operator.dimension
M = np.kron(m, np.eye(m.shape[0]))
return self(M, (d + d)) | @classmethod
def left_multiplier(self, operator):
'Create a SuperOperator that performs a left multiplication\n\n Create a superoperator L from an operator O such that\n\n L*rho = O*rho\n\n Arguments:\n operator {Operator} -- Operator O\n\n Returns:\n SuperOperator -- SuperOperator L\n '
m = operator.matrix
d = operator.dimension
M = np.kron(m, np.eye(m.shape[0]))
return self(M, (d + d))<|docstring|>Create a SuperOperator that performs a left multiplication
Create a superoperator L from an operator O such that
L*rho = O*rho
Arguments:
operator {Operator} -- Operator O
Returns:
SuperOperator -- SuperOperator L<|endoftext|> |
9766b65676e246b15a2db8d130e1ce0440d43aabbbecfdbee321b05f61c770e9 | @classmethod
def right_multiplier(self, operator):
'Create a SuperOperator that performs a right multiplication\n\n Create a superoperator L from an operator O such that\n\n L*rho = rho*O\n\n Arguments:\n operator {Operator} -- Operator O\n\n Returns:\n SuperOperator -- SuperOperator L\n '
m = operator.matrix
d = operator.dimension
M = np.kron(np.eye(m.shape[0]), m.T)
return self(M, (d + d)) | Create a SuperOperator that performs a right multiplication
Create a superoperator L from an operator O such that
L*rho = rho*O
Arguments:
operator {Operator} -- Operator O
Returns:
SuperOperator -- SuperOperator L | muspinsim/spinop.py | right_multiplier | muon-spectroscopy-computational-project/muspinsim | 2 | python | @classmethod
def right_multiplier(self, operator):
'Create a SuperOperator that performs a right multiplication\n\n Create a superoperator L from an operator O such that\n\n L*rho = rho*O\n\n Arguments:\n operator {Operator} -- Operator O\n\n Returns:\n SuperOperator -- SuperOperator L\n '
m = operator.matrix
d = operator.dimension
M = np.kron(np.eye(m.shape[0]), m.T)
return self(M, (d + d)) | @classmethod
def right_multiplier(self, operator):
'Create a SuperOperator that performs a right multiplication\n\n Create a superoperator L from an operator O such that\n\n L*rho = rho*O\n\n Arguments:\n operator {Operator} -- Operator O\n\n Returns:\n SuperOperator -- SuperOperator L\n '
m = operator.matrix
d = operator.dimension
M = np.kron(np.eye(m.shape[0]), m.T)
return self(M, (d + d))<|docstring|>Create a SuperOperator that performs a right multiplication
Create a superoperator L from an operator O such that
L*rho = rho*O
Arguments:
operator {Operator} -- Operator O
Returns:
SuperOperator -- SuperOperator L<|endoftext|> |
c77013f6dcde49cba074095f9e87954c7ee13a28db1d3765e85ee3fa67a2ac9c | @classmethod
def commutator(self, operator):
'Create a SuperOperator that performs a commutation\n\n Create a superoperator L from an operator O such that\n\n L*rho = O*rho-rho*O\n\n Arguments:\n operator {Operator} -- Operator O\n\n Returns:\n SuperOperator -- SuperOperator L\n '
return (self.left_multiplier(operator) - self.right_multiplier(operator)) | Create a SuperOperator that performs a commutation
Create a superoperator L from an operator O such that
L*rho = O*rho-rho*O
Arguments:
operator {Operator} -- Operator O
Returns:
SuperOperator -- SuperOperator L | muspinsim/spinop.py | commutator | muon-spectroscopy-computational-project/muspinsim | 2 | python | @classmethod
def commutator(self, operator):
'Create a SuperOperator that performs a commutation\n\n Create a superoperator L from an operator O such that\n\n L*rho = O*rho-rho*O\n\n Arguments:\n operator {Operator} -- Operator O\n\n Returns:\n SuperOperator -- SuperOperator L\n '
return (self.left_multiplier(operator) - self.right_multiplier(operator)) | @classmethod
def commutator(self, operator):
'Create a SuperOperator that performs a commutation\n\n Create a superoperator L from an operator O such that\n\n L*rho = O*rho-rho*O\n\n Arguments:\n operator {Operator} -- Operator O\n\n Returns:\n SuperOperator -- SuperOperator L\n '
return (self.left_multiplier(operator) - self.right_multiplier(operator))<|docstring|>Create a SuperOperator that performs a commutation
Create a superoperator L from an operator O such that
L*rho = O*rho-rho*O
Arguments:
operator {Operator} -- Operator O
Returns:
SuperOperator -- SuperOperator L<|endoftext|> |
cc8ade2e8a08870241754b9ee64735e33c56840653c227e339c86cb41ba158c9 | @classmethod
def anticommutator(self, operator):
'Create a SuperOperator that performs an anticommutation\n\n Create a superoperator L from an operator O such that\n\n L*rho = O*rho+rho*O\n\n Arguments:\n operator {Operator} -- Operator O\n\n Returns:\n SuperOperator -- SuperOperator L\n '
return (self.left_multiplier(operator) + self.right_multiplier(operator)) | Create a SuperOperator that performs an anticommutation
Create a superoperator L from an operator O such that
L*rho = O*rho+rho*O
Arguments:
operator {Operator} -- Operator O
Returns:
SuperOperator -- SuperOperator L | muspinsim/spinop.py | anticommutator | muon-spectroscopy-computational-project/muspinsim | 2 | python | @classmethod
def anticommutator(self, operator):
'Create a SuperOperator that performs an anticommutation\n\n Create a superoperator L from an operator O such that\n\n L*rho = O*rho+rho*O\n\n Arguments:\n operator {Operator} -- Operator O\n\n Returns:\n SuperOperator -- SuperOperator L\n '
return (self.left_multiplier(operator) + self.right_multiplier(operator)) | @classmethod
def anticommutator(self, operator):
'Create a SuperOperator that performs an anticommutation\n\n Create a superoperator L from an operator O such that\n\n L*rho = O*rho+rho*O\n\n Arguments:\n operator {Operator} -- Operator O\n\n Returns:\n SuperOperator -- SuperOperator L\n '
return (self.left_multiplier(operator) + self.right_multiplier(operator))<|docstring|>Create a SuperOperator that performs an anticommutation
Create a superoperator L from an operator O such that
L*rho = O*rho+rho*O
Arguments:
operator {Operator} -- Operator O
Returns:
SuperOperator -- SuperOperator L<|endoftext|> |
8e7f604706912a98bd7be236af62fe19980c93a8ebe548590e678855619cec54 | @classmethod
def bracket(self, operator):
'Create a SuperOperator that performs a basis change\n\n Create a superoperator L from an operator O such that\n\n L*rho = O*rho*O^\n\n where O^ is the conjugate transpose of O.\n\n Arguments:\n operator {Operator} -- Operator O\n\n Returns:\n SuperOperator -- SuperOperator L\n '
m = operator.matrix
d = operator.dimension
M = np.kron(m, m.conj())
return self(M, (d + d)) | Create a SuperOperator that performs a basis change
Create a superoperator L from an operator O such that
L*rho = O*rho*O^
where O^ is the conjugate transpose of O.
Arguments:
operator {Operator} -- Operator O
Returns:
SuperOperator -- SuperOperator L | muspinsim/spinop.py | bracket | muon-spectroscopy-computational-project/muspinsim | 2 | python | @classmethod
def bracket(self, operator):
'Create a SuperOperator that performs a basis change\n\n Create a superoperator L from an operator O such that\n\n L*rho = O*rho*O^\n\n where O^ is the conjugate transpose of O.\n\n Arguments:\n operator {Operator} -- Operator O\n\n Returns:\n SuperOperator -- SuperOperator L\n '
m = operator.matrix
d = operator.dimension
M = np.kron(m, m.conj())
return self(M, (d + d)) | @classmethod
def bracket(self, operator):
'Create a SuperOperator that performs a basis change\n\n Create a superoperator L from an operator O such that\n\n L*rho = O*rho*O^\n\n where O^ is the conjugate transpose of O.\n\n Arguments:\n operator {Operator} -- Operator O\n\n Returns:\n SuperOperator -- SuperOperator L\n '
m = operator.matrix
d = operator.dimension
M = np.kron(m, m.conj())
return self(M, (d + d))<|docstring|>Create a SuperOperator that performs a basis change
Create a superoperator L from an operator O such that
L*rho = O*rho*O^
where O^ is the conjugate transpose of O.
Arguments:
operator {Operator} -- Operator O
Returns:
SuperOperator -- SuperOperator L<|endoftext|> |
e6ed41e44eff132b9fd58964ffbaa0d3a64c3d52a47308408f8e430c28b5e2be | def atm_print():
'Prints both English and Metric standard atmosphere tables.\n '
metric_filename = 'stdatmos_si.txt'
with open(metric_filename, 'w') as output_handle:
output_handle.write('Geometric Geopotential Speed of\n')
output_handle.write('Altitude Altitude Temperature Pressure Density Sound \n')
output_handle.write(' (m) (m) (K) (N/m**2) (kg/m**3) (m/s) \n')
output_handle.write('-----------------------------------------------------------------------\n')
for i in range(51):
h = (i * 2000.0)
(z, t, p, d) = statsi(h)
a = np.sqrt(((1.4 * 287.0528) * t))
write_string = '{0:<10}{1:<13.5f}{2:<13.5f}{3:<14.5e}{4:<13.5e}{5:<8.4f}\n'.format(h, z, t, p, d, a)
output_handle.write(write_string)
english_filename = 'stdatmos_ee.txt'
with open(english_filename, 'w') as output_handle:
output_handle.write('Geometric Geopotential Speed of\n')
output_handle.write('Altitude Altitude Temperature Pressure Density Sound \n')
output_handle.write(' (ft) (ft) (R) (lbf/ft^2) (slugs/ft^3) (ft/s) \n')
output_handle.write('------------------------------------------------------------------------\n')
for i in range(51):
h = (i * 5000.0)
(z, t, p, d) = statee(h)
a = (np.sqrt((((1.4 * 287.0528) * t) / 1.8)) / 0.3048)
write_string = '{0:<10}{1:<13.5f}{2:<13.5f}{3:<14.5e}{4:<13.5e}{5:<8.4f}\n'.format(h, z, t, p, d, a)
output_handle.write(write_string) | Prints both English and Metric standard atmosphere tables. | pylot/std_atmos.py | atm_print | luzpaz/Pylot | 24 | python | def atm_print():
'\n '
metric_filename = 'stdatmos_si.txt'
with open(metric_filename, 'w') as output_handle:
output_handle.write('Geometric Geopotential Speed of\n')
output_handle.write('Altitude Altitude Temperature Pressure Density Sound \n')
output_handle.write(' (m) (m) (K) (N/m**2) (kg/m**3) (m/s) \n')
output_handle.write('-----------------------------------------------------------------------\n')
for i in range(51):
h = (i * 2000.0)
(z, t, p, d) = statsi(h)
a = np.sqrt(((1.4 * 287.0528) * t))
write_string = '{0:<10}{1:<13.5f}{2:<13.5f}{3:<14.5e}{4:<13.5e}{5:<8.4f}\n'.format(h, z, t, p, d, a)
output_handle.write(write_string)
english_filename = 'stdatmos_ee.txt'
with open(english_filename, 'w') as output_handle:
output_handle.write('Geometric Geopotential Speed of\n')
output_handle.write('Altitude Altitude Temperature Pressure Density Sound \n')
output_handle.write(' (ft) (ft) (R) (lbf/ft^2) (slugs/ft^3) (ft/s) \n')
output_handle.write('------------------------------------------------------------------------\n')
for i in range(51):
h = (i * 5000.0)
(z, t, p, d) = statee(h)
a = (np.sqrt((((1.4 * 287.0528) * t) / 1.8)) / 0.3048)
write_string = '{0:<10}{1:<13.5f}{2:<13.5f}{3:<14.5e}{4:<13.5e}{5:<8.4f}\n'.format(h, z, t, p, d, a)
output_handle.write(write_string) | def atm_print():
'\n '
metric_filename = 'stdatmos_si.txt'
with open(metric_filename, 'w') as output_handle:
output_handle.write('Geometric Geopotential Speed of\n')
output_handle.write('Altitude Altitude Temperature Pressure Density Sound \n')
output_handle.write(' (m) (m) (K) (N/m**2) (kg/m**3) (m/s) \n')
output_handle.write('-----------------------------------------------------------------------\n')
for i in range(51):
h = (i * 2000.0)
(z, t, p, d) = statsi(h)
a = np.sqrt(((1.4 * 287.0528) * t))
write_string = '{0:<10}{1:<13.5f}{2:<13.5f}{3:<14.5e}{4:<13.5e}{5:<8.4f}\n'.format(h, z, t, p, d, a)
output_handle.write(write_string)
english_filename = 'stdatmos_ee.txt'
with open(english_filename, 'w') as output_handle:
output_handle.write('Geometric Geopotential Speed of\n')
output_handle.write('Altitude Altitude Temperature Pressure Density Sound \n')
output_handle.write(' (ft) (ft) (R) (lbf/ft^2) (slugs/ft^3) (ft/s) \n')
output_handle.write('------------------------------------------------------------------------\n')
for i in range(51):
h = (i * 5000.0)
(z, t, p, d) = statee(h)
a = (np.sqrt((((1.4 * 287.0528) * t) / 1.8)) / 0.3048)
write_string = '{0:<10}{1:<13.5f}{2:<13.5f}{3:<14.5e}{4:<13.5e}{5:<8.4f}\n'.format(h, z, t, p, d, a)
output_handle.write(write_string)<|docstring|>Prints both English and Metric standard atmosphere tables.<|endoftext|> |
570b697187ff00e76e1b9333cfcff208cc9d8eabcbad0634bd98b1f51ced6fe2 | def statsi(h):
'Calculates standard atmosphere data in SI units.\n\n Parameters\n ----------\n h : float\n geometric altitude in meters\n\n Returns\n -------\n z : float\n Geopotential altitude in meters.\n\n t : float\n Temperature in K.\n\n p : float\n Pressure in Pa.\n\n d : float\n Density in kg/m^3.\n '
zsa = np.array([0.0, 11000.0, 20000.0, 32000.0, 47000.0, 52000.0, 61000.0, 79000.0, 9.9e+20])
Tsa = np.array([288.15, 216.65, 216.65, 228.65, 270.65, 270.65, 252.65, 180.65, 180.65])
g = 9.80665
R = 287.0528
Re = 6346766.0
Psa = 101325.0
z = ((Re * h) / (Re + h))
for i in range(8):
Lt = ((- (Tsa[(i + 1)] - Tsa[i])) / (zsa[(i + 1)] - zsa[i]))
if (Lt == 0.0):
if (z <= zsa[(i + 1)]):
t = Tsa[i]
p = (Psa * np.exp(((((- g) * (z - zsa[i])) / R) / Tsa[i])))
d = ((p / R) / t)
break
else:
Psa *= np.exp(((((- g) * (zsa[(i + 1)] - zsa[i])) / R) / Tsa[i]))
else:
ex = ((g / R) / Lt)
if (z <= zsa[(i + 1)]):
t = (Tsa[i] - (Lt * (z - zsa[i])))
p = (Psa * ((t / Tsa[i]) ** ex))
d = ((p / R) / t)
break
else:
Psa *= ((Tsa[(i + 1)] / Tsa[i]) ** ex)
else:
t = Tsa[(- 1)]
p = 0.0
d = 0.0
return (z, t, p, d) | Calculates standard atmosphere data in SI units.
Parameters
----------
h : float
geometric altitude in meters
Returns
-------
z : float
Geopotential altitude in meters.
t : float
Temperature in K.
p : float
Pressure in Pa.
d : float
Density in kg/m^3. | pylot/std_atmos.py | statsi | luzpaz/Pylot | 24 | python | def statsi(h):
'Calculates standard atmosphere data in SI units.\n\n Parameters\n ----------\n h : float\n geometric altitude in meters\n\n Returns\n -------\n z : float\n Geopotential altitude in meters.\n\n t : float\n Temperature in K.\n\n p : float\n Pressure in Pa.\n\n d : float\n Density in kg/m^3.\n '
zsa = np.array([0.0, 11000.0, 20000.0, 32000.0, 47000.0, 52000.0, 61000.0, 79000.0, 9.9e+20])
Tsa = np.array([288.15, 216.65, 216.65, 228.65, 270.65, 270.65, 252.65, 180.65, 180.65])
g = 9.80665
R = 287.0528
Re = 6346766.0
Psa = 101325.0
z = ((Re * h) / (Re + h))
for i in range(8):
Lt = ((- (Tsa[(i + 1)] - Tsa[i])) / (zsa[(i + 1)] - zsa[i]))
if (Lt == 0.0):
if (z <= zsa[(i + 1)]):
t = Tsa[i]
p = (Psa * np.exp(((((- g) * (z - zsa[i])) / R) / Tsa[i])))
d = ((p / R) / t)
break
else:
Psa *= np.exp(((((- g) * (zsa[(i + 1)] - zsa[i])) / R) / Tsa[i]))
else:
ex = ((g / R) / Lt)
if (z <= zsa[(i + 1)]):
t = (Tsa[i] - (Lt * (z - zsa[i])))
p = (Psa * ((t / Tsa[i]) ** ex))
d = ((p / R) / t)
break
else:
Psa *= ((Tsa[(i + 1)] / Tsa[i]) ** ex)
else:
t = Tsa[(- 1)]
p = 0.0
d = 0.0
return (z, t, p, d) | def statsi(h):
'Calculates standard atmosphere data in SI units.\n\n Parameters\n ----------\n h : float\n geometric altitude in meters\n\n Returns\n -------\n z : float\n Geopotential altitude in meters.\n\n t : float\n Temperature in K.\n\n p : float\n Pressure in Pa.\n\n d : float\n Density in kg/m^3.\n '
zsa = np.array([0.0, 11000.0, 20000.0, 32000.0, 47000.0, 52000.0, 61000.0, 79000.0, 9.9e+20])
Tsa = np.array([288.15, 216.65, 216.65, 228.65, 270.65, 270.65, 252.65, 180.65, 180.65])
g = 9.80665
R = 287.0528
Re = 6346766.0
Psa = 101325.0
z = ((Re * h) / (Re + h))
for i in range(8):
Lt = ((- (Tsa[(i + 1)] - Tsa[i])) / (zsa[(i + 1)] - zsa[i]))
if (Lt == 0.0):
if (z <= zsa[(i + 1)]):
t = Tsa[i]
p = (Psa * np.exp(((((- g) * (z - zsa[i])) / R) / Tsa[i])))
d = ((p / R) / t)
break
else:
Psa *= np.exp(((((- g) * (zsa[(i + 1)] - zsa[i])) / R) / Tsa[i]))
else:
ex = ((g / R) / Lt)
if (z <= zsa[(i + 1)]):
t = (Tsa[i] - (Lt * (z - zsa[i])))
p = (Psa * ((t / Tsa[i]) ** ex))
d = ((p / R) / t)
break
else:
Psa *= ((Tsa[(i + 1)] / Tsa[i]) ** ex)
else:
t = Tsa[(- 1)]
p = 0.0
d = 0.0
return (z, t, p, d)<|docstring|>Calculates standard atmosphere data in SI units.
Parameters
----------
h : float
geometric altitude in meters
Returns
-------
z : float
Geopotential altitude in meters.
t : float
Temperature in K.
p : float
Pressure in Pa.
d : float
Density in kg/m^3.<|endoftext|> |
bbc2656dc1d0078b58892487f3d38455ff34781ff98adbf13af814a27e252774 | def statee(h):
'Calculates standard atmosphere data in English units.\n\n Parameters\n ----------\n h : float\n Geometric altitude in feet\n\n Returns\n -------\n z : float\n Geopotential altitude in feet.\n\n t : float\n Temperature in R.\n\n p : float\n Pressure in lbf/ft^2.\n\n d : float\n Density in slugs/ft^3.\n '
hsi = (h * 0.3048)
(zsi, tsi, psi, dsi) = statsi(hsi)
z = (zsi / 0.3048)
t = (tsi * 1.8)
p = (psi * 0.02088543)
d = (dsi * 0.00194032)
return (z, t, p, d) | Calculates standard atmosphere data in English units.
Parameters
----------
h : float
Geometric altitude in feet
Returns
-------
z : float
Geopotential altitude in feet.
t : float
Temperature in R.
p : float
Pressure in lbf/ft^2.
d : float
Density in slugs/ft^3. | pylot/std_atmos.py | statee | luzpaz/Pylot | 24 | python | def statee(h):
'Calculates standard atmosphere data in English units.\n\n Parameters\n ----------\n h : float\n Geometric altitude in feet\n\n Returns\n -------\n z : float\n Geopotential altitude in feet.\n\n t : float\n Temperature in R.\n\n p : float\n Pressure in lbf/ft^2.\n\n d : float\n Density in slugs/ft^3.\n '
hsi = (h * 0.3048)
(zsi, tsi, psi, dsi) = statsi(hsi)
z = (zsi / 0.3048)
t = (tsi * 1.8)
p = (psi * 0.02088543)
d = (dsi * 0.00194032)
return (z, t, p, d) | def statee(h):
'Calculates standard atmosphere data in English units.\n\n Parameters\n ----------\n h : float\n Geometric altitude in feet\n\n Returns\n -------\n z : float\n Geopotential altitude in feet.\n\n t : float\n Temperature in R.\n\n p : float\n Pressure in lbf/ft^2.\n\n d : float\n Density in slugs/ft^3.\n '
hsi = (h * 0.3048)
(zsi, tsi, psi, dsi) = statsi(hsi)
z = (zsi / 0.3048)
t = (tsi * 1.8)
p = (psi * 0.02088543)
d = (dsi * 0.00194032)
return (z, t, p, d)<|docstring|>Calculates standard atmosphere data in English units.
Parameters
----------
h : float
Geometric altitude in feet
Returns
-------
z : float
Geopotential altitude in feet.
t : float
Temperature in R.
p : float
Pressure in lbf/ft^2.
d : float
Density in slugs/ft^3.<|endoftext|> |
e3bd0b6d56b8f01bf9d18c3c7db7f9aefebce8faa701187ed3d57b168471aebe | @supported_devices
def test_unitary_gate(self, device):
'Test simulation with unitary gate circuit instructions.'
backend = self.backend(device=device)
circuits = ref_unitary_gate.unitary_gate_circuits_deterministic(final_measure=False)
circuits = transpile(circuits, backend, optimization_level=1)
result = backend.run(circuits, shots=1).result()
targets = ref_unitary_gate.unitary_gate_unitary_deterministic()
self.assertSuccess(result)
self.compare_unitary(result, circuits, targets) | Test simulation with unitary gate circuit instructions. | test/terra/backends/aer_simulator/test_wrapper_unitary_simulator.py | test_unitary_gate | kevinsung/qiskit-aer | 313 | python | @supported_devices
def test_unitary_gate(self, device):
backend = self.backend(device=device)
circuits = ref_unitary_gate.unitary_gate_circuits_deterministic(final_measure=False)
circuits = transpile(circuits, backend, optimization_level=1)
result = backend.run(circuits, shots=1).result()
targets = ref_unitary_gate.unitary_gate_unitary_deterministic()
self.assertSuccess(result)
self.compare_unitary(result, circuits, targets) | @supported_devices
def test_unitary_gate(self, device):
backend = self.backend(device=device)
circuits = ref_unitary_gate.unitary_gate_circuits_deterministic(final_measure=False)
circuits = transpile(circuits, backend, optimization_level=1)
result = backend.run(circuits, shots=1).result()
targets = ref_unitary_gate.unitary_gate_unitary_deterministic()
self.assertSuccess(result)
self.compare_unitary(result, circuits, targets)<|docstring|>Test simulation with unitary gate circuit instructions.<|endoftext|> |
0fc9c4d1b5599dc0df3cafad8a89cc967764f64af1168eee2aa461ad51de05f2 | @supported_devices
def test_unitary_gate_circuit_run(self, device):
'Test simulation with unitary gate circuit instructions.'
backend = self.backend(device=device)
circuits = ref_unitary_gate.unitary_gate_circuits_deterministic(final_measure=False)
circuits = transpile(circuits, backend, optimization_level=1)
result = backend.run(circuits, shots=1).result()
targets = ref_unitary_gate.unitary_gate_unitary_deterministic()
self.assertSuccess(result)
self.compare_unitary(result, circuits, targets) | Test simulation with unitary gate circuit instructions. | test/terra/backends/aer_simulator/test_wrapper_unitary_simulator.py | test_unitary_gate_circuit_run | kevinsung/qiskit-aer | 313 | python | @supported_devices
def test_unitary_gate_circuit_run(self, device):
backend = self.backend(device=device)
circuits = ref_unitary_gate.unitary_gate_circuits_deterministic(final_measure=False)
circuits = transpile(circuits, backend, optimization_level=1)
result = backend.run(circuits, shots=1).result()
targets = ref_unitary_gate.unitary_gate_unitary_deterministic()
self.assertSuccess(result)
self.compare_unitary(result, circuits, targets) | @supported_devices
def test_unitary_gate_circuit_run(self, device):
backend = self.backend(device=device)
circuits = ref_unitary_gate.unitary_gate_circuits_deterministic(final_measure=False)
circuits = transpile(circuits, backend, optimization_level=1)
result = backend.run(circuits, shots=1).result()
targets = ref_unitary_gate.unitary_gate_unitary_deterministic()
self.assertSuccess(result)
self.compare_unitary(result, circuits, targets)<|docstring|>Test simulation with unitary gate circuit instructions.<|endoftext|> |
7cfd7071898ce3540deb151ba33507f4cd693422d596a25251a8e0623fa428d4 | @supported_devices
def test_diagonal_gate(self, device):
'Test simulation with diagonal gate circuit instructions.'
backend = self.backend(device=device)
circuits = ref_diagonal_gate.diagonal_gate_circuits_deterministic(final_measure=False)
circuits = transpile(circuits, backend, optimization_level=1)
result = backend.run(circuits, shots=1).result()
targets = ref_diagonal_gate.diagonal_gate_unitary_deterministic()
self.assertSuccess(result)
self.compare_unitary(result, circuits, targets) | Test simulation with diagonal gate circuit instructions. | test/terra/backends/aer_simulator/test_wrapper_unitary_simulator.py | test_diagonal_gate | kevinsung/qiskit-aer | 313 | python | @supported_devices
def test_diagonal_gate(self, device):
backend = self.backend(device=device)
circuits = ref_diagonal_gate.diagonal_gate_circuits_deterministic(final_measure=False)
circuits = transpile(circuits, backend, optimization_level=1)
result = backend.run(circuits, shots=1).result()
targets = ref_diagonal_gate.diagonal_gate_unitary_deterministic()
self.assertSuccess(result)
self.compare_unitary(result, circuits, targets) | @supported_devices
def test_diagonal_gate(self, device):
backend = self.backend(device=device)
circuits = ref_diagonal_gate.diagonal_gate_circuits_deterministic(final_measure=False)
circuits = transpile(circuits, backend, optimization_level=1)
result = backend.run(circuits, shots=1).result()
targets = ref_diagonal_gate.diagonal_gate_unitary_deterministic()
self.assertSuccess(result)
self.compare_unitary(result, circuits, targets)<|docstring|>Test simulation with diagonal gate circuit instructions.<|endoftext|> |
ff66b973e07a3fad30ff6ca63797aae10b0e581a131055658f7f324ac2138d7c | @supported_devices
def test_qobj_global_phase(self, device):
'Test qobj global phase.'
backend = self.backend(device=device)
circuits = ref_1q_clifford.h_gate_circuits_nondeterministic(final_measure=False)
circuits = transpile(circuits, backend, optimization_level=1)
result = backend.run(circuits, shots=1).result()
targets = ref_1q_clifford.h_gate_unitary_nondeterministic()
for (iter, circuit) in enumerate(circuits):
global_phase = (((- 1) ** iter) * (pi / 4))
circuit.global_phase += global_phase
targets[iter] = (exp((1j * global_phase)) * targets[iter])
circuits = transpile(circuits, backend, optimization_level=1)
result = backend.run(circuits, shots=1).result()
self.assertSuccess(result)
self.compare_unitary(result, circuits, targets, ignore_phase=False) | Test qobj global phase. | test/terra/backends/aer_simulator/test_wrapper_unitary_simulator.py | test_qobj_global_phase | kevinsung/qiskit-aer | 313 | python | @supported_devices
def test_qobj_global_phase(self, device):
backend = self.backend(device=device)
circuits = ref_1q_clifford.h_gate_circuits_nondeterministic(final_measure=False)
circuits = transpile(circuits, backend, optimization_level=1)
result = backend.run(circuits, shots=1).result()
targets = ref_1q_clifford.h_gate_unitary_nondeterministic()
for (iter, circuit) in enumerate(circuits):
global_phase = (((- 1) ** iter) * (pi / 4))
circuit.global_phase += global_phase
targets[iter] = (exp((1j * global_phase)) * targets[iter])
circuits = transpile(circuits, backend, optimization_level=1)
result = backend.run(circuits, shots=1).result()
self.assertSuccess(result)
self.compare_unitary(result, circuits, targets, ignore_phase=False) | @supported_devices
def test_qobj_global_phase(self, device):
backend = self.backend(device=device)
circuits = ref_1q_clifford.h_gate_circuits_nondeterministic(final_measure=False)
circuits = transpile(circuits, backend, optimization_level=1)
result = backend.run(circuits, shots=1).result()
targets = ref_1q_clifford.h_gate_unitary_nondeterministic()
for (iter, circuit) in enumerate(circuits):
global_phase = (((- 1) ** iter) * (pi / 4))
circuit.global_phase += global_phase
targets[iter] = (exp((1j * global_phase)) * targets[iter])
circuits = transpile(circuits, backend, optimization_level=1)
result = backend.run(circuits, shots=1).result()
self.assertSuccess(result)
self.compare_unitary(result, circuits, targets, ignore_phase=False)<|docstring|>Test qobj global phase.<|endoftext|> |
6925afaa21d2a5fb831788300ae5bbacd8744afaf3e7b2db33c32df86293dc32 | @supported_devices
def test_legacy_method(self, device):
'Test legacy device method options.'
backend = self.backend()
legacy_method = f'unitary_{device.lower()}'
with self.assertWarns(DeprecationWarning):
backend.set_options(method=legacy_method)
self.assertEqual(backend.options.device, device) | Test legacy device method options. | test/terra/backends/aer_simulator/test_wrapper_unitary_simulator.py | test_legacy_method | kevinsung/qiskit-aer | 313 | python | @supported_devices
def test_legacy_method(self, device):
backend = self.backend()
legacy_method = f'unitary_{device.lower()}'
with self.assertWarns(DeprecationWarning):
backend.set_options(method=legacy_method)
self.assertEqual(backend.options.device, device) | @supported_devices
def test_legacy_method(self, device):
backend = self.backend()
legacy_method = f'unitary_{device.lower()}'
with self.assertWarns(DeprecationWarning):
backend.set_options(method=legacy_method)
self.assertEqual(backend.options.device, device)<|docstring|>Test legacy device method options.<|endoftext|> |
a7e45a31f9502e85a3cfbf8882dc2f88a9b580c981559dd092e0121dc07fb33c | def test_unsupported_methods(self):
'Test unsupported AerSimulator method raises AerError.'
backend = self.backend()
with self.assertWarns(DeprecationWarning):
self.assertRaises(AerError, backend.set_options, method='automatic') | Test unsupported AerSimulator method raises AerError. | test/terra/backends/aer_simulator/test_wrapper_unitary_simulator.py | test_unsupported_methods | kevinsung/qiskit-aer | 313 | python | def test_unsupported_methods(self):
backend = self.backend()
with self.assertWarns(DeprecationWarning):
self.assertRaises(AerError, backend.set_options, method='automatic') | def test_unsupported_methods(self):
backend = self.backend()
with self.assertWarns(DeprecationWarning):
self.assertRaises(AerError, backend.set_options, method='automatic')<|docstring|>Test unsupported AerSimulator method raises AerError.<|endoftext|> |