code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
---|---|---|---|---|---|
import networkx as nx
"""Test completion states"""
TEST_PASS = 1
TEST_FAIL = 0
TEST_NOT_RUN = -1
class TestDefinition:
"""
Defines a single validation test to be run, ideally run as part of a suite.
"""
def __init__(self, title, test_function, dependencies=None, required_context=None):
"""
Construct a validation test.
Args:
title (basestring): The name of the test. This must be unique across tests
that are a part of a suite.
test_function (callable): A function which must take one argument (a dict) that
contains context data required for running the test. It must return at least one
value, which will be cast as a boolean to determine pass/fail. If multiple values
are returned, the first value will be used to determine the test result, and the
rest will be stored and can be retrieved with `get_result_metadata()`.
dependencies (`iterable` of `basestring`, `TestDefinition` or `tuple`, optional): an iterable
containing test titles (as strings), test definitions (as `TestDefinition`), or
two-member tuples, where the first element is a title or test definition and the
second element is the required outcome of that test (TEST_PASS or TEST_FAIL).
For titles and definitions, the default required outcome is TEST_PASS. Default: None.
required_context (`iterable` of `basestring`, optional): an iterable containing names of keys expected
by the test in the context data. Default: None.
"""
self.status = TEST_NOT_RUN
self.dependencies = dict()
# Assign required results for dependencies
for dependency in dependencies or []:
if isinstance(dependency, (str, TestDefinition)):
self.dependencies[dependency] = TEST_PASS
elif isinstance(dependency, tuple):
if len(dependency) != 2:
raise ValueError("Dependency must be a 2-length tuple, str, or TestDefinition")
test, required_result = dependency
if required_result not in (TEST_PASS, TEST_FAIL):
raise ValueError("Dependency result must be TEST_PASS or TEST_FAIL")
self.dependencies[test] = required_result
self.title = title or ""
self.required_context = set(required_context) if required_context else set()
if not callable(test_function):
raise ValueError("test_function not a callable object")
self._func = test_function
self._result_metadata = None
def add_dependency(self, dependency, required_result=TEST_PASS):
"""
Add single test dependency and the required result.
Args:
dependency (`basestring` or `TestDefinition`): Test object or test
name that this test depends on
required_result (int, optional): Whether the dependent test should pass (1, TEST_PASS)
or fail (0, TEST_FAIL) to meet the dependency requirement. Default: TEST_PASS
"""
self.dependencies[dependency] = required_result
def remove_dependency(self, dependency):
"""
Remove single test dependency, if it exists as a dependency.
Args:
dependency (`basestring` or `TestDefinition`): Test object or test
name to remove.
"""
if dependency in self.dependencies:
self.dependencies.pop(dependency)
def add_required_context_value(self, context):
"""
Add single context variable required for the test.
Args:
context (basestring): Name of variable to include in
context data passed to function.
"""
self.required_context.add(context)
def remove_required_context_value(self, context):
"""
Remove single required context variable, if it exists.
Args:
context (basestring): Name of context variable to remove
"""
if context in self.required_context:
self.required_context.remove(context)
def run(self, context=None):
"""
Run the test function with the provided context variables.
Args:
context (dict, optional): Context values required for the test.
Returns:
(bool) result of the test
Raises:
(ValueError) if provided context does not match expected context
"""
# Check provided context to ensure it has all required values
if self.required_context:
if not context:
raise ValueError("No context provided when context values required")
if any(x not in context for x in self.required_context):
missing_context = self.required_context - self.required_context.intersection(set(context.keys()))
raise ValueError(f"Missing context values for test: {missing_context}")
# Run the test with required context
result = self._func({x: context[x] for x in self.required_context})
# Assess result
if isinstance(result, tuple):
self.status = int(bool(result[0]))
self._result_metadata = result[1:]
else:
self.status = int(bool(result))
return self.status
def get_result_metadata(self):
"""
Retrieves the metadata returned by the completed test.
Returns:
(tuple) stored metadata returned by test. None if no metadata returned.
"""
return self._result_metadata
def __eq__(self, other):
"""
Equality with strings and other `TestDefinition` objects, by comparing title.
Args:
other (`basestring` or `TestDefinition`): value to compare
Returns:
(bool) True if matching titles, False otherwise
Raises:
(NotImplemented) if type of `other` is not one of types listed
"""
if isinstance(other, str):
return self.title == other
elif isinstance(other, TestDefinition):
return self.title == other.title
raise NotImplemented
def __hash__(self):
return hash(self.title)
def __str__(self):
return self.title
def __repr__(self):
return self.title
class _FailedTest(TestDefinition):
"""
Internal class to block advancement of graph traversal when a test depends on
the failure of another test, and that test passes. Fails automatically when run
to block advancement.
"""
def __init__(self, test_to_monitor):
"""
Create a blocker node for required test failures.
Args:
test_to_monitor (`basestring` or `TestDefinition`): test to monitor
(value is rather inconsequential to operation)
"""
super().__init__(
f"Blocker for tests depending on failure of: {test_to_monitor}",
lambda x: TEST_FAIL,
dependencies=[test_to_monitor]
)
class TestSuite:
"""
A collection of tests to run as a group. Manages test dependencies.
"""
def __init__(self):
"""
Construct the suite.
"""
self._tests = set()
self._context = dict()
self._test_graph = None
self._results = None
self._has_run = False
def context_satisfies_requirements(self):
"""
Checks if provided context satisfies the requirements of all tests
in the suite.
Returns:
(bool) True if all context requirements are satisfied, and False otherwise
"""
return all(context_value in self._context
for test in self._tests
for context_value in test.required_context)
def all_dependent_test_in_suite(self):
"""
Checks if every test in the suite has its dependent tests in the suite.
Returns:
(bool) True if all dependency tests are in the suite, and False otherwise
"""
return all(dependency in self._tests
for test in self._tests
for dependency in test.dependencies.keys())
def add_test(self, test, replace=True):
"""
Add a test to the suite.
Args:
test (TestDefinition): Test object to add to the suite
replace (bool, optional): if True, replace the test if it exists. False will
raise a ValueError if a test with the same name is in the suite.
Raises:
(ValueError) if test with same title exists in the suite and `replace=False`
"""
if not replace and test in self._tests:
raise ValueError(f"Suite already contains test '{test.title}'")
self._tests.add(test)
def remove_test(self, test):
"""
Remove test from suite if it exists.
Args:
test (`basestring` or `TestDefinition`): title or test object to
remove from suite
"""
if test in self._tests:
self._tests.remove(test)
def get_context(self):
"""
Get the context values for the test suite that will be passed to each
test.
Returns:
(dict) context values, keyed by context variable names. Empty dict if none.
"""
return self._context or dict()
def set_context(self, context):
"""
Set the context values for the test suite that will be passed to each
test.
Args:
context (dict): context values, keyed by context variable names
"""
self._context = context
def run(self):
"""
Run the test suite with the given context.
Raises:
(ValueError) if context or dependencies are not satisfied
"""
if not self.context_satisfies_requirements():
raise ValueError("Context is missing required values for tests")
if not self.all_dependent_test_in_suite():
raise ValueError("Dependency test missing")
self._build_graph()
self._run_suite()
self._has_run = True
def _build_graph(self):
"""
Builds directional graph of tests for traversal.
Nodes are tests and edges are dependencies.
"""
self._test_graph = nx.DiGraph()
# Load all tests as nodes
for test in self._tests:
node_name = "PASS_" + test.title
self._test_graph.add_node(node_name, test_object=test)
# Create edges based on dependencies
for test in self._tests:
for dependency, required_result in test.dependencies.items():
child_node_name = "PASS_" + test.title
if required_result == TEST_PASS:
parent_node_name = "PASS_" + str(dependency)
elif required_result == TEST_FAIL:
# If a test requires that another test fail,
# create an interim node to block traversal if the
# dependent test passes. This node will be removed if
# the dependent test fails to allow child tests to run.
parent_node_name = "FAIL_" + str(dependency)
self._test_graph.add_node(
parent_node_name,
test_object=_FailedTest(dependency)
)
# Draw edge from dependent test to blocking node
self._test_graph.add_edge("PASS_" + str(dependency), parent_node_name)
else:
raise ValueError("Invalid required test result!")
# Draw edge from dependent test (or blocking node) to current test
self._test_graph.add_edge(parent_node_name, child_node_name)
def _run_suite(self):
"""
Run tests and record results.
"""
for test in self._get_next_test():
# print(f"Running test {test.title}")
test.run(self._context)
# print(f"Test: {test.title}, Result: {test.status}")
self._results = {test: test.status for test in self._tests}
def _get_next_test(self):
"""
Traverses graph recursively and generates tests to run. As tests pass,
the nodes are removed from the graph.
Yields:
(TestDefinition) test to run
"""
def __yield_tests_rec(graph):
queue_for_removal = set()
# Traverse all nodes by all inbound edges in the current view of the graph
for test_name, n_unmet_dependencies in graph.in_degree:
test_object = graph.nodes[test_name]['test_object']
if n_unmet_dependencies == 0 and test_object.status == TEST_NOT_RUN:
# If there are no unmet dependencies (presence of no inbound edges)
# and the test hasn't been run, then send it up to be run.
yield test_object
if test_object.status == TEST_PASS:
# If test passes, queue node for removal on next pass
queue_for_removal.add(test_name)
elif test_object.status == TEST_FAIL:
# If test fails, queue the related blocker node to be removed on next pass (if exists)
queue_for_removal.add("FAIL_" + str(test_object))
else:
raise ValueError("Invalid test result")
# Exit criterion: if no tests ran, process is finished
if not queue_for_removal:
return
# Delete queued nodes from graph
graph.remove_nodes_from(queue_for_removal)
# Clear queue to save memory
queue_for_removal.clear()
# Recurse on new view of graph
# TODO: consider not using recursion as it could get hit recursion depth limit if many
# rounds of testing have to be done
yield from __yield_tests_rec(graph)
# Begin recursion
yield from __yield_tests_rec(self._test_graph)
def has_run(self):
"""
Returns if suite has been run or not.
Returns:
(bool) True if suite has run, False otherwise
"""
return self._has_run
def get_results(self):
"""
Outputs the results of the tests.
Returns:
(dict) test results, keyed by test title. 0 = TEST_FAIL, 1 = TEST_PASS, -1 = TEST_NOT_RUN
"""
if self.has_run():
return {k.title: v for k, v in self._results.items()}
raise ValueError("Test suite not run!")
|
/saml_reader-0.0.6.tar.gz/saml_reader-0.0.6/saml_reader/validation/graph_suite.py
| 0.856752 | 0.64079 |
graph_suite.py
|
pypi
|
import re
from datetime import datetime
from saml_reader.validation.graph_suite import TestDefinition, TestSuite, TEST_FAIL
from saml_reader.validation.input_validation import MongoFederationConfig, UserInputValidator
class MongoSamlValidator:
"""
Interprets SAML and certificate data and compares it against expected patterns specific
to MongoDB Cloud and entered comparison values
"""
def __init__(self, saml, cert=None, comparison_values=None):
"""
Reads in SAML, certificate data, and any comparison values for important
parameters.
Args:
saml (BaseSamlParser): SAML response
cert (Certificate, optional): Certificate information
comparison_values (MongoFederationConfig, optional): user-entered values to check
against SAML data
"""
self._saml = saml
self._cert = cert
self._comparison_values = comparison_values or MongoFederationConfig()
self._validated = False
self._errors = []
def has_certificate(self):
"""
Determine if certificate information is available
Returns:
(bool) True if there is certificate data, False otherwise
"""
return self._cert is not None
def validate_configuration(self):
"""
Run validation tests on the loaded SAML data and comparison values.
Returns:
None
"""
self._errors = []
test_suite = MongoTestSuite(
self._saml,
comparison_values=self._comparison_values,
certificate=self._cert
)
test_suite.run()
self._build_report(test_suite)
self._validated = True
def _build_report(self, test_suite):
"""
Compiles error messages based on validation test results.
Args:
test_suite (MongoTestSuite): test suite run on SAML data
Returns:
None
"""
# We want to build the report in the order of how the tests are
# shown in the list of tests in MongoTestSuite so that results are
# printed together for related tests.
test_results = test_suite.get_results()
# Get list of failed tests in order
failed_tests = [
test for test in test_suite.get_list_of_mongo_tests()
if test_results.get(test) == TEST_FAIL
]
# Get the report messages for the failed tests
messages = ValidationReport(
self._saml, self._cert, self._comparison_values
).get_messages_by_name(
failed_tests
)
# Write report messages in order, filtering out any tests with no messages
self._errors = [
messages[test] for test in failed_tests if messages[test]
]
def validated(self):
"""
Has validation been run yet.
Returns:
(bool) True if validation has been run, False otherwise
"""
return self._validated
def get_identity_provider(self):
"""
Get identity provider from certificate. May not show exact identity provider.
Returns:
(`basestring` or `None`) Identity provider, if certificate provided, otherwise None
"""
# TODO: It would be cool to have some expected patterns based on common IdPs so we
# could identify the IdP from SAML data and/or see if data matches
# what we would expect knowing what the IdP is from the certificate
if self._cert:
return self._cert.get_organization_name() or self._cert.get_common_name()
return None
def get_certificate(self):
"""
Get SAML signing certificate.
Returns:
(`Certificate` or `None`) Certificate object, if valid one found in SAML, otherwise None
"""
return self._cert
def get_issuer(self):
"""
Get Issuer URI
Returns:
(`basestring` or `None`) Issuer URI, if found in the SAML response, otherwise None
"""
return self._saml.get_issuer_uri()
def get_audience_url(self):
"""
Get Audience URL
Returns:
(`basestring` or `None`) Audience URL, if found in the SAML response, otherwise None
"""
return self._saml.get_audience_url()
def get_assertion_consumer_service_url(self):
"""
Get Assertion Consumer Service (ACS) URL
Returns:
(`basestring` or `None`) ACS URL, if found in the SAML response, otherwise None
"""
return self._saml.get_assertion_consumer_service_url()
def get_encryption_algorithm(self):
"""
Get encryption algorithm
Returns:
(`basestring`) Encryption algorithm
"""
return self._saml.get_encryption_algorithm()
def get_name_id(self):
"""
Get Name ID
Returns:
(`basestring` or `None`) Name ID, if found in the SAML response, otherwise None
"""
return self._saml.get_subject_name_id()
def get_name_id_format(self):
"""
Get Name ID format
Returns:
(`basestring` or `None`) Name ID format, if found in the SAML response, otherwise None
"""
return self._saml.get_subject_name_id_format()
def get_claim_attributes(self):
"""
Get claim attribute names and values
Returns:
(dict) Claim attribute values, keyed by claim name.
Empty dict if no attributes found.
"""
return self._saml.get_attributes() or dict()
def get_missing_claim_attributes(self):
"""
Get required claims for MongoDB that are missing, if any
Returns:
(`set` of `basestring`) names of missing required attributes
"""
attribs = self.get_claim_attributes()
missing_required_attributes = MongoTestSuite.REQUIRED_CLAIMS - set(attribs.keys())
return missing_required_attributes
def get_error_messages(self):
"""
Get errors generated during validation
Returns:
(`list` of `str`) Error messages generated
"""
return self._errors
def get_duplicate_attribute_names(self):
"""
Get attribute names that were duplicated in the claims.
Returns:
(`set` of `str`) Duplicated claim names
"""
return self._saml.get_duplicate_attribute_names()
class MongoTestSuite(TestSuite):
"""
Test suite for SAML responses for comparison against known patterns and comparison values.
Attributes:
VALID_NAME_ID_FORMATS (`set` of `basestring`): acceptable formats for Name ID for MongoDB Cloud
REQUIRED_CLAIMS (`set` of `basestring`): claim attribute names that are required in SAML response
"""
VALID_NAME_ID_FORMATS = {
'urn:oasis:names:tc:SAML:1.0:nameid-format:unspecified',
'urn:oasis:names:tc:SAML:1.0:nameid-format:emailAddress',
'urn:oasis:names:tc:SAML:1.1:nameid-format:emailAddress',
'urn:oasis:names:tc:SAML:1.1:nameid-format:unspecified'
}
REQUIRED_CLAIMS = {
'firstName',
'lastName'
}
def __init__(self, saml, certificate=None, comparison_values=None):
"""
Create test suite with supplied SAML and comparison data.
Args:
saml (BaseSamlParser): parsed SAML data
certificate (Certificate, optional): SAML signing certificate, if one was
provided in the SAML data. Default: None (no certificate available)
comparison_values (MongoFederationConfig, optional): comparison values to
compare with data in SAML response. Default: None (no comparison
tests will be performed)
"""
super().__init__()
self.set_context({
'saml': saml,
'comparison_values': comparison_values or MongoFederationConfig(),
'certificate': certificate
})
self._tests = self._get_tests()
@staticmethod
def _get_tests():
"""
Builds test objects for testing SAML data for patterns specific to MongoDB Cloud
and against supplied comparison values.
Any future tests should be added to this function in an order which makes logical sense
with the tests around it.
Returns:
(`list` of `TestDefinition`) test objects
"""
tests = [
# Name ID and Name ID format tests
TestDefinition("exists_name_id", MongoTestSuite.verify_name_id_exists,
required_context=['saml']),
TestDefinition("regex_name_id", MongoTestSuite.verify_name_id_pattern,
dependencies=['exists_name_id'],
required_context=['saml']),
TestDefinition("exists_name_id_format", MongoTestSuite.verify_name_id_format_exists,
dependencies=['exists_name_id'],
required_context=['saml']),
TestDefinition("regex_name_id_format", MongoTestSuite.verify_name_id_format,
dependencies=['exists_name_id_format'],
required_context=['saml']),
# Claim attribute tests
TestDefinition("exists_all_required_attributes", MongoTestSuite.verify_all_required_attributes_exist,
required_context=['saml']),
TestDefinition("exists_first_name", MongoTestSuite.verify_first_name_exists,
required_context=['saml']),
TestDefinition("regex_first_name", MongoTestSuite.verify_first_name_pattern,
dependencies=['exists_first_name'],
required_context=['saml']),
TestDefinition("exists_last_name", MongoTestSuite.verify_last_name_exists,
required_context=['saml']),
TestDefinition("regex_last_name", MongoTestSuite.verify_last_name_pattern,
dependencies=['exists_last_name'],
required_context=['saml']),
TestDefinition("exists_member_of", MongoTestSuite.verify_member_of_exists,
required_context=['saml']),
TestDefinition("member_of_not_empty", MongoTestSuite.verify_member_of_not_empty,
dependencies=['exists_member_of'],
required_context=['saml']),
TestDefinition("regex_member_of", MongoTestSuite.verify_member_of_pattern,
dependencies=['member_of_not_empty'],
required_context=['saml']),
TestDefinition("not_duplicate_member_of", MongoTestSuite.verify_member_of_is_not_duplicated,
dependencies=['exists_member_of'],
required_context=['saml']),
# Claim attribute comparison tests
TestDefinition("exists_comparison_first_name", MongoTestSuite.verify_first_name_comparison_exists,
dependencies=['regex_first_name'],
required_context=['comparison_values']),
TestDefinition("compare_first_name", MongoTestSuite.verify_first_name,
dependencies=['exists_comparison_first_name'],
required_context=['saml', 'comparison_values']),
TestDefinition("exists_comparison_last_name", MongoTestSuite.verify_last_name_comparison_exists,
dependencies=['regex_last_name'],
required_context=['comparison_values']),
TestDefinition("compare_last_name", MongoTestSuite.verify_last_name,
dependencies=['exists_comparison_last_name'],
required_context=['saml', 'comparison_values']),
TestDefinition("member_of_is_expected", MongoTestSuite.verify_member_of_is_expected,
dependencies=[('exists_member_of', TEST_FAIL)],
required_context=['comparison_values']),
TestDefinition("exists_comparison_member_of", MongoTestSuite.verify_member_of_comparison_exists,
dependencies=['regex_member_of'],
required_context=['comparison_values']),
TestDefinition("compare_member_of", MongoTestSuite.verify_member_of,
dependencies=['exists_comparison_member_of'],
required_context=['saml', 'comparison_values']),
# Email and Name ID comparison tests
TestDefinition("exists_comparison_email", MongoTestSuite.verify_email_comparison_exists,
required_context=['comparison_values']),
TestDefinition("compare_email_name_id", MongoTestSuite.verify_name_id,
dependencies=['regex_name_id', 'exists_comparison_email'],
required_context=['saml', 'comparison_values']),
# Federated domain tests
TestDefinition("exists_comparison_domain", MongoTestSuite.verify_domain_comparison_exists,
required_context=['comparison_values']),
TestDefinition("compare_domain_comparison_email", MongoTestSuite.verify_domain_in_comparison_email,
dependencies=['exists_comparison_email', 'exists_comparison_domain'],
required_context=['comparison_values']),
TestDefinition("compare_domain_name_id", MongoTestSuite.verify_domain_in_name_id,
dependencies=['regex_name_id', 'exists_comparison_domain'],
required_context=['saml', 'comparison_values']),
# Issuer URI tests
TestDefinition("exists_issuer", MongoTestSuite.verify_issuer_exists,
required_context=['saml']),
TestDefinition("regex_issuer", MongoTestSuite.verify_issuer_pattern,
dependencies=['exists_issuer'],
required_context=['saml']),
TestDefinition("exists_comparison_issuer", MongoTestSuite.verify_issuer_comparison_exists,
dependencies=['regex_issuer'],
required_context=['comparison_values']),
TestDefinition("match_issuer", MongoTestSuite.verify_issuer,
dependencies=['exists_comparison_issuer'],
required_context=['saml', 'comparison_values']),
# Audience URL tests
TestDefinition("exists_audience", MongoTestSuite.verify_audience_url_exists,
required_context=['saml']),
TestDefinition("regex_audience", MongoTestSuite.verify_audience_url_pattern,
dependencies=['exists_audience'],
required_context=['saml']),
TestDefinition("exists_comparison_audience", MongoTestSuite.verify_audience_comparison_exists,
dependencies=['regex_audience'],
required_context=['comparison_values']),
TestDefinition("match_audience", MongoTestSuite.verify_audience_url,
dependencies=['exists_comparison_audience'],
required_context=['saml', 'comparison_values']),
# ACS URL tests
TestDefinition("exists_acs", MongoTestSuite.verify_assertion_consumer_service_url_exists,
required_context=['saml']),
TestDefinition("regex_acs", MongoTestSuite.verify_assertion_consumer_service_url_pattern,
dependencies=['exists_acs'],
required_context=['saml']),
TestDefinition("exists_comparison_acs",
MongoTestSuite.verify_assertion_consumer_service_url_comparison_exists,
dependencies=['regex_acs'],
required_context=['comparison_values']),
TestDefinition("match_acs", MongoTestSuite.verify_assertion_consumer_service_url,
dependencies=['exists_comparison_acs'],
required_context=['saml', 'comparison_values']),
# Encryption algorithm tests
TestDefinition("exists_encryption", MongoTestSuite.verify_encryption_algorithm_exists,
required_context=['saml']),
TestDefinition("regex_encryption", MongoTestSuite.verify_encryption_algorithm_pattern,
dependencies=['exists_encryption'],
required_context=['saml']),
TestDefinition("exists_comparison_encryption",
MongoTestSuite.verify_encryption_algorithm_comparison_exists,
dependencies=['regex_encryption'],
required_context=['comparison_values']),
TestDefinition("match_encryption", MongoTestSuite.verify_encryption_algorithm,
dependencies=['exists_comparison_encryption'],
required_context=['saml', 'comparison_values']),
# Certificate tests
TestDefinition("exists_certificate", MongoTestSuite.verify_certificate_exists,
required_context=['certificate']),
TestDefinition("certificate_not_expired", MongoTestSuite.verify_certificate_not_expired,
dependencies=['exists_certificate'],
required_context=['certificate']),
TestDefinition("exists_comparison_cert_date", MongoTestSuite.verify_certificate_expiry_comparison_exists,
dependencies=['certificate_not_expired'],
required_context=['comparison_values']),
TestDefinition("match_certificate_expiry", MongoTestSuite.verify_certificate_expiry,
dependencies=['exists_comparison_cert_date'],
required_context=['certificate', 'comparison_values']),
]
return tests
def get_list_of_mongo_tests(self):
"""
Get name of tests in order listed. Useful for compiling reports.
Returns:
(`list` of `basestring`) test titles in order
"""
return [test.title for test in self._tests]
@staticmethod
def _matches_regex(regex, value):
"""
Checks if a string matches a given regular expression
Args:
regex (basestring): regex string
value (basestring): string to check against regex
Returns:
(bool) True if `value` matches pattern `regex`, False otherwise
"""
matcher = re.compile(regex)
if matcher.fullmatch(value):
return True
return False
# Issuer URI tests
@staticmethod
def verify_issuer_exists(context):
"""
Checks if Issuer URI was found in the SAML response.
Args:
context (dict): test context dictionary
Returns:
(bool) True if found, False otherwise
"""
return context.get('saml').get_issuer_uri() is not None
@staticmethod
def verify_issuer_comparison_exists(context):
"""
Checks if there is a comparison value for the Issuer URI.
Args:
context (dict): test context dictionary
Returns:
(bool) True if a comparison value exists, False otherwise
"""
return context.get('comparison_values').get_parsed_value('issuer') is not None
@staticmethod
def verify_issuer(context):
"""
Checks Issuer URI against expected value
Args:
context (dict): test context dictionary
Returns:
(bool) True if they match, False otherwise
"""
return context.get('saml').get_issuer_uri() == context.get('comparison_values').get_parsed_value('issuer')
@staticmethod
def verify_issuer_pattern(context):
"""
Checks if Issuer URI matches the expected regular expression
Args:
context (dict): test context dictionary
Returns:
(bool) True if matches the regex, False otherwise
"""
return MongoTestSuite._matches_regex(UserInputValidator().get_validation_regex('issuer'),
context.get('saml').get_issuer_uri())
# Audience URL tests
@staticmethod
def verify_audience_url_exists(context):
"""
Checks if Audience URL was found in the SAML response.
Args:
context (dict): test context dictionary
Returns:
(bool) True if found, False otherwise
"""
return context.get('saml').get_audience_url() is not None
@staticmethod
def verify_audience_comparison_exists(context):
"""
Checks if there is a comparison value for the Audience URL.
Args:
context (dict): test context dictionary
Returns:
(bool) True if a comparison value exists, False otherwise
"""
return context.get('comparison_values').get_parsed_value('audience') is not None
@staticmethod
def verify_audience_url(context):
"""
Checks Audience URL against expected value
Args:
context (dict): test context dictionary
Returns:
(bool) True if they match, False otherwise
"""
return context.get('saml').get_audience_url() == \
context.get('comparison_values').get_parsed_value('audience')
@staticmethod
def verify_audience_url_pattern(context):
"""
Checks if Audience URL matches the expected regular expression
Args:
context (dict): test context dictionary
Returns:
(bool) True if matches the regex, False otherwise
"""
return MongoTestSuite._matches_regex(UserInputValidator().get_validation_regex('audience'),
context.get('saml').get_audience_url())
# Assertion Consumer Service URL tests
@staticmethod
def verify_assertion_consumer_service_url_exists(context):
"""
Checks if Assertion Consumer Service URL was found in the SAML response.
Args:
context (dict): test context dictionary
Returns:
(bool) True if found, False otherwise
"""
return context.get('saml').get_assertion_consumer_service_url() is not None
@staticmethod
def verify_assertion_consumer_service_url_comparison_exists(context):
"""
Checks if there is a comparison value for the Assertion Consumer Service URL.
Args:
context (dict): test context dictionary
Returns:
(bool) True if a comparison value exists, False otherwise
"""
return context.get('comparison_values').get_parsed_value('acs') is not None
@staticmethod
def verify_assertion_consumer_service_url(context):
"""
Checks Assertion Consumer Service URL against expected value
Args:
context (dict): test context dictionary
Returns:
(bool) True if they match, False otherwise
"""
return context.get('saml').get_assertion_consumer_service_url() == \
context.get('comparison_values').get_parsed_value('acs')
@staticmethod
def verify_assertion_consumer_service_url_pattern(context):
"""
Checks if Assertion Consumer Service URL matches the expected regular expression
Args:
context (dict): test context dictionary
Returns:
(bool) True if matches the regex, False otherwise
"""
return MongoTestSuite._matches_regex(UserInputValidator().get_validation_regex('acs'),
context.get('saml').get_assertion_consumer_service_url())
# Encryption algorithm tests
@staticmethod
def verify_encryption_algorithm_exists(context):
"""
Checks if encryption algorithm was found in the SAML response.
Args:
context (dict): test context dictionary
Returns:
(bool) True if found, False otherwise
"""
return context.get('saml').get_encryption_algorithm() is not None
@staticmethod
def verify_encryption_algorithm_comparison_exists(context):
"""
Checks if there is a comparison value for the Assertion Consumer Service URL.
Args:
context (dict): test context dictionary
Returns:
(bool) True if a comparison value exists, False otherwise
"""
return context.get('comparison_values').get_parsed_value('encryption') is not None
@staticmethod
def verify_encryption_algorithm(context):
"""
Checks encryption algorithm against expected value
Args:
context (dict): test context dictionary
Returns:
(bool) True if they match, False otherwise
"""
# expected encryption algorithm format expected to be "SHA1" or "SHA256"
return context.get('saml').get_encryption_algorithm() == \
context.get('comparison_values').get_parsed_value('encryption')
@staticmethod
def verify_encryption_algorithm_pattern(context):
"""
Checks if encryption algorithm matches the expected regular expression
Args:
context (dict): test context dictionary
Returns:
(bool) True if matches the regex, False otherwise
"""
return MongoTestSuite._matches_regex(UserInputValidator().get_validation_regex('encryption'),
context.get('saml').get_encryption_algorithm())
# Name ID and format tests
@staticmethod
def verify_name_id(context):
"""
Checks Name ID against expected value (case-insensitive)
Args:
context (dict): test context dictionary
Returns:
(bool) True if they match, False otherwise
"""
return context.get('saml').get_subject_name_id().lower() == \
context.get('comparison_values').get_parsed_value('email').lower()
@staticmethod
def verify_name_id_exists(context):
"""
Checks if Name ID exists in the SAML response
Args:
context (dict): test context dictionary
Returns:
(bool) True if present, False otherwise
"""
return context.get('saml').get_subject_name_id() is not None
@staticmethod
def verify_name_id_pattern(context):
"""
Checks if Name ID matches the expected regular expression
Args:
context (dict): test context dictionary
Returns:
(bool) True if matches the regex, False otherwise
"""
return MongoTestSuite._matches_regex(UserInputValidator().get_validation_regex('email'),
context.get('saml').get_subject_name_id())
@staticmethod
def verify_name_id_format_exists(context):
"""
Checks if Name ID Format was found in the SAML response.
Returns:
(bool) True if found, False otherwise
"""
return context.get('saml').get_subject_name_id_format() is not None
@staticmethod
def verify_name_id_format(context):
"""
Checks if Name ID format is one of the valid valuesI
Returns:
(bool) True if a valid value, False otherwise
"""
return context.get('saml').get_subject_name_id_format() in MongoTestSuite.VALID_NAME_ID_FORMATS
# Claim attribute tests
@staticmethod
def verify_all_required_attributes_exist(context):
"""
Check if SAML response has all required attributes.
Returns:
(bool) true if all required attributes are in SAML response, false otherwise
"""
saml_attributes = context.get('saml').get_attributes() or dict()
return all(attribute_name in saml_attributes
for attribute_name in MongoTestSuite.REQUIRED_CLAIMS)
@staticmethod
def verify_first_name_exists(context):
"""
Check if SAML response has 'firstName' claims attribute
Returns:
(bool) true if attribute is in SAML response, false otherwise
"""
return 'firstName' in (context.get('saml').get_attributes() or dict())
@staticmethod
def verify_first_name_pattern(context):
"""
Check if 'firstName' claims attribute matches regex pattern
Returns:
(bool) true if matches, false otherwise
"""
return MongoTestSuite._matches_regex(
UserInputValidator().get_validation_regex('firstName'),
context.get('saml').get_attributes().get('firstName')
)
@staticmethod
def verify_first_name_comparison_exists(context):
"""
Check if 'firstName' claims attribute has a comparison value entered
Returns:
(bool) true if comparison value exists, false otherwise
"""
return context.get('comparison_values').get_parsed_value('firstName') is not None
@staticmethod
def verify_first_name(context):
"""
Check if 'firstName' claims attribute matches comparison value entered (case-insensitive)
Returns:
(bool) true if matches, false otherwise
"""
return context.get('comparison_values').get_parsed_value('firstName').lower() == \
context.get('saml').get_attributes().get('firstName').lower()
@staticmethod
def verify_last_name_exists(context):
"""
Check if SAML response has 'lastName' claims attribute
Returns:
(bool) true if attribute is in SAML response, false otherwise
"""
return 'lastName' in (context.get('saml').get_attributes() or dict())
@staticmethod
def verify_last_name_pattern(context):
"""
Check if 'lastName' claims attribute matches regex pattern
Returns:
(bool) true if matches, false otherwise
"""
return MongoTestSuite._matches_regex(
UserInputValidator().get_validation_regex('lastName'),
context.get('saml').get_attributes().get('lastName')
)
@staticmethod
def verify_last_name_comparison_exists(context):
"""
Check if 'lastName' claims attribute has a comparison value entered
Returns:
(bool) true if comparison value exists, false otherwise
"""
return context.get('comparison_values').get_parsed_value('lastName') is not None
@staticmethod
def verify_last_name(context):
"""
Check if 'lastName' claims attribute matches comparison value entered (case-insensitive)
Returns:
(bool) true if matches, false otherwise
"""
return context.get('comparison_values').get_parsed_value('lastName').lower() == \
context.get('saml').get_attributes().get('lastName').lower()
@staticmethod
def verify_email_comparison_exists(context):
"""
Check if Name ID (email/username) has a comparison value entered
Returns:
(bool) true if comparison value exists, false otherwise
"""
return context.get('comparison_values').get_parsed_value('email') is not None
@staticmethod
def verify_member_of_exists(context):
"""
Check if SAML response has 'memberOf' claims attribute
Returns:
(bool) true if attribute is in SAML response, false otherwise
"""
return 'memberOf' in (context.get('saml').get_attributes() or dict())
@staticmethod
def verify_member_of_not_empty(context):
"""
Check if 'memberOf' claims attribute is not empty.
Returns:
(bool) true if attribute is not empty, false otherwise
"""
return len(context.get('saml').get_attributes().get('memberOf', [])) != 0
@staticmethod
def verify_member_of_pattern(context):
"""
Check if all values in 'memberOf' claims attribute matches regex pattern
Returns:
(bool) true if matches, false otherwise
"""
return all(
MongoTestSuite._matches_regex(
UserInputValidator().get_validation_regex('memberOf'), value
) for value in context.get('saml').get_attributes().get('memberOf', [])
)
@staticmethod
def verify_member_of_is_expected(context):
"""
Check if 'memberOf' claims attribute is in SAML response if customer
expects to do role mapping.
Returns:
(bool) true if attribute exists and customer expects it, false otherwise
"""
return not context.get('comparison_values').get_parsed_value('role_mapping_expected', False)
@staticmethod
def verify_member_of_is_not_duplicated(context):
"""
Check if 'memberOf' claims attribute is in SAML response if customer
expects to do role mapping.
Returns:
(bool) true if attribute exists and customer expects it, false otherwise
"""
return 'memberOf' not in context.get('saml').get_duplicate_attribute_names()
@staticmethod
def verify_member_of_comparison_exists(context):
"""
Check if 'memberOf' claims attribute has a comparison value entered
Returns:
(bool) true if comparison value exists, false otherwise
"""
return context.get('comparison_values').get_parsed_value('memberOf') is not None
@staticmethod
def verify_member_of(context):
"""
Check if 'memberOf' claims attribute contains all comparison values entered
Returns:
(bool) true if matches, false otherwise
"""
member_of_groups = context.get('saml').get_attributes().get('memberOf', [])
return all(
group in member_of_groups
for group in context.get('comparison_values').get_parsed_value('memberOf', [])
)
# Name ID, email, and domain tests
@staticmethod
def verify_domain_comparison_exists(context):
"""
Checks if a domain was specified for comparison
Returns:
(bool) True if a comparison exists, False otherwise
"""
return context.get('comparison_values').get_parsed_value('domains') is not None
@staticmethod
def verify_domain_in_name_id(context):
"""
Checks if Name ID contains one of the federated domains specified
Returns:
(bool) True if Name ID ends with one of the domains, False otherwise
"""
return any(context.get('saml').get_subject_name_id().lower().endswith('@' + domain)
for domain in context.get('comparison_values').get_parsed_value('domains'))
@staticmethod
def verify_domain_in_comparison_email(context):
"""
Checks if the email value entered for comparison contains one of the
federated domains specified
Returns:
(bool) True if email contains ends with one of the domains, False otherwise
"""
return any(context.get('comparison_values').get_parsed_value('email').lower().endswith('@' + domain)
for domain in context.get('comparison_values').get_parsed_value('domains'))
@staticmethod
def verify_certificate_exists(context):
"""
Checks if the SAML signing certificate is included in the SAML response and that it generated
a valid Certificate object.
Returns:
(bool) True if a valid Certificate object exists, False otherwise
"""
return context.get('certificate') is not None
@staticmethod
def verify_certificate_not_expired(context):
"""
Checks if the SAML signing certificate has an expiration date in the future (not including today)
Returns:
(bool) True if certificate expires in the future, False otherwise
"""
return context.get('certificate').get_expiration_date() > datetime.now().date()
@staticmethod
def verify_certificate_expiry_comparison_exists(context):
"""
Checks if the user specified a date to compare against the SAML response certificate expiration
Returns:
(bool) True if specified, False otherwise
"""
return context.get('comparison_values').get_parsed_value('cert_expiration') is not None
@staticmethod
def verify_certificate_expiry(context):
"""
Checks if the specified expiration date matches the SAML response certificate expiration
Returns:
(bool) True if they are the same, False otherwise
"""
return context.get('comparison_values').get_parsed_value('cert_expiration') == \
context.get('certificate').get_expiration_date()
class ValidationReport:
"""
Generate validation report text for tests that fail.
All future tests that need reporting should be added in this class.
"""
def __init__(self, saml, certificate, comparison_values):
"""
Build reporter object.
Args:
saml (BaseSamlParser): parsed SAML data
certificate (`Certificate` or `None`): SAML signing certificate
comparison_values (MongoFederationConfig): comparison data
"""
# TODO: We should probably overhaul this class so that it doesn't require
# valid data, meaning that it should skip building messages that depend
# on having data that isn't available (like the certificate).
self._saml = saml
self._certificate = certificate
self._comparison_values = comparison_values
self._messages = None
self._compile_messages()
def get_all_messages(self):
"""
Shows all possible messages that can be reported.
Returns:
(dict) messages keyed by test name
"""
return self._messages
def get_messages_by_name(self, tests):
"""
Get report messages for the tests named.
Args:
tests (`list` of `basestring` or `TestDefinition`): tests on which
reporting is to be done
Returns:
(dict) messages for the tests listed, keyed by value in list
"""
return {test: self._messages.get(test, "") for test in tests}
# These classes are for generating templated text for claim attributes
@staticmethod
def _get_claim_attribute_exist(attribute_name):
return f"The required '{attribute_name}' claim attribute is missing " + \
"or is misspelled (case matters!)"
def _get_claim_attribute_regex(self, attribute_name):
return f"The required '{attribute_name}' claim attribute does not " + \
"appear to be formatted correctly.\nValue: " + \
f"{self._saml.get_attributes().get(attribute_name)}"
def _get_claim_attribute_mismatch(self, attribute_name):
return f"The required '{attribute_name}' claim attribute does not match " + \
"the value entered for comparison." + \
f"\nSAML value: {self._saml.get_attributes().get(attribute_name)}" + \
f"\nSpecified comparison value: {self._comparison_values.get_parsed_value(attribute_name)}" + \
"\n\nGenerally, this means that the identity provider configuration needs\n" + \
"to be reconfigured to match the expected values"
@staticmethod
def _print_a_list(template_string, list_contents):
"""
Outputs a list of items based on a template. For example:
if `template_string` is `"\n- {}"` and `list_contents` contains
`['a', 'b', 'c']`, the outputted string will be `\n- a\n- b\n- c`.
Args:
template_string (basestring): template to repeat. Must have exactly one `{}` to
be replaced
list_contents (iterable): values to replace in template string
Returns:
(basestring) string that represents item list
"""
full_string = template_string * len(list_contents)
return full_string.format(*list_contents)
def _compile_messages(self):
"""
Generates messages for failed tests based on provided SAML, certificate, and comparison data.
Any future tests that require a report be generated should they fail should have
an entry added to the `messages` dict with the test name as the key.
Any templated text can be added as class functions.
Returns:
None
"""
messages = {
# Name ID tests
'exists_name_id':
f"The Name ID is missing from the SAML Subject.\n"
f"Please be sure the customer's identity provider is\n"
f"emitting this attribute (it is not emitted by default for Microsoft ADFS)",
'regex_name_id':
f"The Name ID does not appear to be an email address.\n"
f"Name ID: {self._saml.get_subject_name_id()}",
'compare_email_name_id':
"The Name ID does not match the provided email value:\n" +
f"Name ID value: {self._saml.get_subject_name_id()}\n" +
f"Specified email value: {self._comparison_values.get_parsed_value('email')}" +
"\n\nThe Name ID is the value that Atlas expects as the user's\n" +
"username, and if this differs from the a current user's username,\n" +
"then the identity provider is likely not sending the correct value,\n" +
"possibly because it is configured to send the wrong user attribute.\n" +
"For example, it may be sending the UPN (user principal name) instead\n" +
"of the email address. This is more common with Azure AD users, because\n" +
"the documentation indicates that customers should use 'user.userprincipalname'\n" +
"as the source attribute for the Name ID, even though there is a caveat that\n" +
"states the customer should use the source attribute that contains the user's\n" +
"username/email address. This could be contained in 'user.mail' instead.",
# Name ID Format tests
'exists_name_id_format':
"The Name ID format could not be parsed from the SAML response.",
'regex_name_id_format':
f"The Name ID format is not an acceptable format.\n" +
f"SAML value: {self._saml.get_subject_name_id_format()}\n" +
f"Acceptable formats:" +
self._print_a_list("\n - {}", MongoTestSuite.VALID_NAME_ID_FORMATS),
# Claim attribute tests
'exists_all_required_attributes': "One or more of the required claim attributes are "
"missing from the SAML response.\nThis should not cause a problem "
"for users who log in using federation but already have a MongoDB "
"Cloud account,\nbut will cause errors for any new users that attempt "
"to authenticate.",
'exists_first_name': self._get_claim_attribute_exist('firstName'),
'regex_first_name': self._get_claim_attribute_regex('firstName'),
'compare_first_name': self._get_claim_attribute_mismatch("firstName"),
'exists_last_name': self._get_claim_attribute_exist('lastName'),
'regex_last_name': self._get_claim_attribute_regex('lastName'),
'compare_last_name': self._get_claim_attribute_mismatch("lastName"),
# Role mapping tests
'member_of_is_expected':
"The customer expects to use role mapping, but the 'memberOf' attribute\n" +
"is missing from the SAML response. The identity provider needs to be configured\n" +
"to send the group names. It is possible that the user is a member of no groups and\n" +
"so the identity provider may have omitted the attribute altogether.",
'regex_member_of': self._get_claim_attribute_regex('memberOf'),
'compare_member_of':
f"The optional 'memberOf' claim attribute is missing one or more values entered for comparison." + \
f"\nSAML value:" + self._print_a_list("\n - {}", self._saml.get_attributes().get('memberOf', [])) + \
f"\nSpecified comparison value:" + self._print_a_list(
"\n - {}", self._comparison_values.get_parsed_value('memberOf', [])) + \
"\n\nGenerally, this means that the user's account in the customer Active Directory\n" + \
"needs to be added to the correct group.",
'not_duplicate_member_of':
"The 'memberOf' claim attribute is duplicated in the SAML response instead of being\n"
"sent as an attribute with multiple values inside a single claim. For example:\n\n"
"DUPLICATED:\n"
"<Attribute Name=\"memberOf\">\n"
" <AttributeValue>Value A</AttributeValue>\n"
"</Attribute>\n"
"<Attribute Name=\"memberOf\">\n"
" <AttributeValue>Value B</AttributeValue>\n"
"</Attribute>\n\n"
"MULTI-VALUED:\n"
"<Attribute Name=\"memberOf\">\n"
" <AttributeValue>Value A</AttributeValue>\n"
" <AttributeValue>Value B</AttributeValue>\n"
"</Attribute>\n\n"
"This is common with customers who use KeyCloak as their identity provider.\n"
"Advise the customer to convert duplicated attributes into multi-valued attributes.\n"
"For KeyCloak, this can be done by setting 'Single Role Attribute: ON'.",
# Federated domain tests
'compare_domain_comparison_email':
"The specified comparison email value does not contain\n" +
"one of the federated domains specified:\n" +
f"Specified email value: {self._comparison_values.get_parsed_value('email')}\n" +
f"Specified valid domains:" +
self._print_a_list("\n - {}", self._comparison_values.get_parsed_value('domains', [])) +
"\n\nIf the email specified is the user's MongoDB username, then the Atlas\n" +
"identity provider configuration likely has the incorrect domain(s) verified.",
'compare_domain_name_id':
"The Name ID does not contain one of the federated domains specified:\n" +
f"Name ID value: {self._saml.get_subject_name_id()}\n" +
f"Specified valid domains:" +
self._print_a_list("\n - {}", self._comparison_values.get_parsed_value('domains', [])) +
"\n\nIf the Name ID does not contain a verified domain name, it may be because\n" +
"the source Active Directory field does not contain the user's email address.\n" +
"The source field may contain an internal username or other value instead.",
# Issuer URI tests
'exists_issuer':
"The Issuer URI could not be parsed from the SAML response." +
"\nCannot run any verification tests for this parameter.",
'regex_issuer':
f"The Issuer URI does not match the anticipated pattern.\n" +
f"Issuer URI: {self._saml.get_issuer_uri()}",
'match_issuer':
"The Issuer URI in the SAML response does not match the specified comparison value:\n" +
f"SAML value: {self._saml.get_issuer_uri()}\n" +
f"Specified comparison value: {self._comparison_values.get_parsed_value('issuer')}" +
"\n\nGenerally, this means that the Atlas configuration needs " +
"to be set to match the SAML value",
# Audience URL tests
'exists_audience':
"The Audience URL could not be parsed from the SAML response." +
"\nCannot run any verification tests for this parameter.",
'regex_audience':
f"The Audience URL does not match the anticipated pattern.\n" +
f"Audience URL: {self._saml.get_audience_url()}",
'match_audience':
"The Audience URL in the SAML response does not match the specified comparison value:\n" +
f"SAML value: {self._saml.get_audience_url()}\n" +
f"Specified comparison value: {self._comparison_values.get_parsed_value('audience')}" +
"\n\nGenerally, this means that the Atlas configuration needs " +
"to be set to match the SAML value",
# ACS URL tests
'exists_acs':
"The Assertion Consumer Service URL could not be parsed from the SAML response." +
"\nCannot run any verification tests for this parameter.",
'regex_acs':
f"The Assertion Consumer Service URL does not match the anticipated pattern.\n" +
f"ACS URL: {self._saml.get_assertion_consumer_service_url()}",
'match_acs':
"The Assertion Consumer Service URL in the SAML response does not match the " +
"specified comparison value:\n" +
f"SAML value: {self._saml.get_assertion_consumer_service_url()}\n" +
f"Specified comparison value: {self._comparison_values.get_parsed_value('acs')}" +
"\n\nThis means that the identity provider configuration needs\n" +
"to be reconfigured to match the expected value",
# Encryption algorithm tests
'exists_encryption':
"The encryption algorithm could not be parsed from the SAML response." +
"\nCannot run any verification tests for this parameter.",
'regex_encryption':
f"The encryption algorithm does not match the anticipated pattern.\n" +
f"Encryption Algorithm: {self._saml.get_encryption_algorithm()}",
'match_encryption':
"The encryption algorithm for the SAML response does not " +
"match the specified comparison value:\n" +
f"SAML value: {self._saml.get_encryption_algorithm()}\n" +
f"Specified comparison value: " +
f"{self._comparison_values.get_parsed_value('encryption')}" +
"\n\nGenerally, this means that the Atlas configuration needs " +
"to be set to match the SAML value",
}
if self._certificate is not None:
messages.update({
# Certificate tests
'certificate_not_expired':
f"The SAML signing certificate included with the SAML response appears expired\n" +
f"or it expires today.\n" +
f"Expiration date (MM/DD/YYYY): {self._certificate.get_expiration_date():%m/%d/%Y}\n" +
"\nGenerally, this means that the identity provider needs to be updated with a\n" +
"valid certificate pair, and that the public certificate of the pair must be re-uploaded to Atlas.",
'match_certificate_expiry':
f"The expiration of the SAML signing certificate included with the SAML response\n" +
f"does not match the specified expiration date.\n" +
f"SAML value (MM/DD/YYYY): {self._certificate.get_expiration_date():%m/%d/%Y}\n" +
f"Specified comparison value (MM/DD/YYYY): " +
f"{self._comparison_values.get_parsed_value('cert_expiration') or datetime.now():%m/%d/%Y}" +
"\n\nThis is a likely indicator that the SAML signing certiticate uploaded to Atlas is not the\n" +
"correct certificate. Please direct the customer to upload the correct public certificate.",
})
self._messages = messages
|
/saml_reader-0.0.6.tar.gz/saml_reader-0.0.6/saml_reader/validation/mongo.py
| 0.767385 | 0.61231 |
mongo.py
|
pypi
|
import re
from datetime import datetime
class _NullUserInput:
"""
Class that represents an empty user input.
"""
pass
class UserInputValidator:
"""
Validates user input or SAML data against a regular expression and/or an arbitrary function.
"""
def __init__(self):
"""
Create an instance of the comparison engine. Loads comparison regular expressions
and functions.
Returns:
None
"""
# Regular expressions to validate SAML fields and claim attributes
self._regex_by_attribute = {
'firstName': r'^\s*\S+.*$',
'lastName': r'^\s*\S+.*$',
'email': r"\b(?i)([A-Z0-9._%+-]+@[A-Z0-9.-]+\.[A-Z]{2,})\b",
'issuer': r'^\s*\S+.*$',
'acs': r'^https:\/\/auth\.mongodb\.com\/sso\/saml2\/[a-z0-9A-Z]{20}$',
'audience': r'^https:\/\/www\.okta\.com\/saml2\/service-provider\/[a-z]{20}$',
'encryption': r'^(?i)sha-?(1|256)$',
'domains': r'^(?i)[A-Z0-9.-]+?\.[A-Z]{2,}$',
'memberOf': r'^\s*\S+.*$',
'role_mapping_expected': '^(?i)[YN]$'
}
# Arbitrary functions used to validate SAML field and claim attributes for those
# that require more than a simple string pattern matching.
self._func_by_attribute = {
'cert_expiration': self._validate_cert_expiration
}
def __contains__(self, name):
"""
Determines if an attribute name can be validated by this class.
Args:
name (basestring): attribute name
Returns:
(bool) True if the attribute name can be validated, False otherwise
"""
return name in self._regex_by_attribute or name in self._func_by_attribute
def validate(self, attribute_name, value):
"""
Determine if a value passes validation tests defined for a given attribute.
Args:
attribute_name (basestring): name of the attribute. Determines which test(s)
are run to validate the `value`.
value (`basestring` or `Any`): value to be validated. Usually this is a string,
but it can be any type when depending on a function for validation.
Returns:
(bool) True if the value passes validation, False otherwise
"""
if value == _NullUserInput:
return True
regex_valid = True
func_valid = True
if attribute_name in self._regex_by_attribute:
regex_valid = bool(re.fullmatch(self._regex_by_attribute[attribute_name], value))
if attribute_name in self._func_by_attribute:
func_valid = bool(self._func_by_attribute[attribute_name](value))
return regex_valid and func_valid
def get_validation_regex(self, attribute_name):
"""
Gets regular expression string for a given attribute, if it exists.
Args:
attribute_name (basestring): the name of the attribute whose regex
string is to be retrieved
Raises:
ValueError: if there is no defined regular expression string for the
provided attribute name
Returns:
basestring: regular expression string for validating the attribute
"""
if attribute_name in self._regex_by_attribute:
return self._regex_by_attribute[attribute_name]
raise ValueError(f"Regex for attribute name '{attribute_name}' not found")
def _validate_cert_expiration(self, value):
"""
Validates that the value specified is a valid date in the MM/DD/YYYY format and
that it it is in the future.
Args:
value (basestring): date string
Returns:
bool: True if the string is a valid date in the future
"""
try:
# Make sure it's a correctly-formatted date
date = datetime.strptime(value, '%m/%d/%Y')
except ValueError as e:
if "does not match format" in e.args[0]:
return False
if date < datetime.now():
# Date must be in the future
return False
return True
class UserInputParser:
"""
Parses user input after validation to coerce it into the correct format for testing.
"""
def __init__(self):
"""
Instantiates instance of the parser.
"""
self._validator = UserInputValidator()
# Custom parsing functions by attribute
self._parsing_func_by_attribute = {
'domains': lambda x: [v.strip().lower() for v in x],
'encryption': lambda x: "SHA" + re.findall(
self._validator.get_validation_regex('encryption'), x)[0],
'firstName': lambda x: x.strip(),
'lastName': lambda x: x.strip(),
'email': lambda x: x.strip(),
'role_mapping_expected': lambda x: x.upper() == 'Y',
'cert_expiration': lambda x: datetime.strptime(x, '%m/%d/%Y').date()
}
def parse(self, attribute_name, value):
"""
Parses the value using the function stored for the specified attribute name.
If no function is defined, or the value is of type `_NullUserInput`, the value is returned as is.
Args:
attribute_name (basestring): name of attribute
value (`basestring` or `Any`): attribute value to parse
Returns:
Any: parsed value
"""
if value == _NullUserInput or attribute_name not in self._parsing_func_by_attribute:
return value
return self._parsing_func_by_attribute[attribute_name](value)
class MongoComparisonValue:
"""
Collects comparison value input from the user through stdin prompts
"""
def __init__(self, name, prompt, multi_value=False, default=_NullUserInput):
"""
Create a comparison value object for a given value type.
Args:
name (basestring): name of the input value, must be contained in UserInputValidator.
prompt (basestring): the text with which to prompt the user during input
multi_value (bool, optional): True if the user should be prompted for more than one input value,
False will only prompt for one input value. Default: False (one input)
default (object, optional): The default value to set if the user does not input anything. Default: None
"""
self._validator = UserInputValidator()
if name not in self._validator:
raise ValueError(f"Unknown value name: {name}")
self._name = name
self._prompt = prompt
self._value = _NullUserInput
self._is_multivalued = multi_value
if self._validator.validate(name, default):
self._default = default
else:
raise ValueError(f"Invalid default value '{default}' for attribute '{name}'")
def prompt_for_user_input(self, input_stream=input, output_stream=print):
"""
Prompt user for input using stdin (by default) or another specified input stream.
Args:
input_stream (callable): function to gather user input. default: handle to `input()`
output_stream (callable): function to print user prompts. default: handle to `print()`
Returns:
(`basestring`, `list`, or `object`) The user input as a string or list, depending if multi-valued
or the default value if no user input provided
"""
if self._is_multivalued:
user_input = self._get_multi_value(input_stream=input_stream,
output_stream=output_stream)
else:
user_input = self._get_single_value(input_stream=input_stream,
output_stream=output_stream)
if user_input is _NullUserInput:
self._value = self._default
self._value = user_input
def get_name(self):
"""
Get value name
Returns:
(basestring) value name
"""
return self._name
def get_value(self):
"""
Get value after it has been gathered from input or set manually.
Raises:
ValueError: raised if value has not been set
Returns:
`basestring` or `Any`: value of the input
"""
if self._value is _NullUserInput:
raise ValueError("This value has not been gathered yet!")
return self._value
def set_value(self, value):
"""
Sets value programmatically without prompting user for input.
Args:
value (`basestring` or `Any`): the value to be stored
Raises:
ValueError: raised if the value does not pass validation with `UserInputValidator`
"""
if isinstance(value, list):
value_list = value
else:
value_list = [value]
if all(self._validator.validate(self._name, v) for v in value_list):
self._value = value
else:
raise ValueError("Input did not pass validation")
def is_null(self):
"""
Determine if the value is null, either by user input or not being set yet.
Will return False if the value is set as the default and the default value is not
`_NullUserInput`.
Returns:
bool: True if null, False otherwise
"""
return self._value is _NullUserInput
def _get_single_value(self, input_stream=input, output_stream=print):
"""
Prompt user for a single value with default prompt.
Args:
input_stream (callable): function to gather user input. default: handle to `input()`
output_stream (callable): function to print user prompts. default: handle to `print()`
Returns:
(`basestring` or `object`) The user input as a string
or the default value if no user input provided
"""
return self._get_and_validate_user_input(input_stream=input_stream,
output_stream=output_stream)
def _get_multi_value(self, input_stream=input, output_stream=print):
"""
Prompt user for a multiple values with a numbered prompt.
Args:
input_stream (callable): function to gather user input. default: handle to `input()`
output_stream (callable): function to print user prompts. default: handle to `print()`
Returns:
(`list` or `object`) The user input as a list
or the default value if no user input provided
"""
input_to_store = []
output_stream(self._prompt)
list_index = 1
user_input = self._get_and_validate_user_input(prompt=f"{list_index}.",
input_stream=input_stream,
output_stream=output_stream)
while user_input is not _NullUserInput:
input_to_store.append(user_input)
list_index += 1
user_input = self._get_and_validate_user_input(prompt=f"{list_index}.",
input_stream=input_stream,
output_stream=output_stream)
if not input_to_store:
input_to_store = self._default
return input_to_store
def _get_and_validate_user_input(self, prompt=None, input_stream=input, output_stream=print):
"""
Prompts user for input from stdin.
Args:
prompt (basestring, optional): The text to prompt the user with.
Default: None (prompts with self._prompt)
input_stream (callable): function to gather user input. default: handle to `input()`
output_stream (callable): function to print user prompts. default: handle to `print()`
Returns:
(`basestring`) the data input by the user. None if user inputs nothing.
"""
if prompt is None:
prompt = self._prompt
if not re.match(r'.*\s$', prompt):
# If the prompt doesn't end with a whitespace character, add a space for padding
prompt += " "
while True:
user_input = input_stream(prompt)
if user_input:
if self._validator.validate(self._name, user_input):
return user_input
else:
output_stream(f"Input did not pass validation. Try again or skip the value.")
else:
return _NullUserInput
class MongoFederationConfig:
"""
Stores user-provided federation configuration values for comparison with SAML data
"""
def __init__(self, **kwargs):
"""
Set comparison values and verify they match a regular expression (input validation)
Args:
**kwargs: currently accepted keywords are:
- `issuer`: Issuer URI
- `audience`: Audience URL
- `acs`: Assertion Consumer Service URL
- `encryption`: Encryption algorithm
- `firstName`: expected value for "firstName" claim attribute
- `lastName`: expected value for "lastName" claim attribute
- `email`: expected value for Name ID
- `domains`: domain names associated with the identity provider, as a
list of strings.
- `role_mapping_expected`: whether role mapping is configured, determines if
`memberOf` should be a required attribute
- `memberOf`: expected AD group names to be mapped for role mapping as a list of
strings.
- `cert_expiration`: expected expiration date for the SAML signing certificate
as a date string formatted "MM/DD/YYYY"
"""
self._settings = dict()
self._parser = UserInputParser()
if kwargs:
# TODO: Possibly separate this out as its own function or class?
# i.e. a `from_dict()` or `from_json()` function
for name, value in kwargs.items():
value_obj = MongoComparisonValue(name, "")
try:
value_obj.set_value(value)
except ValueError:
raise ValueError(f"Input for attribute {name} did not pass validation", name)
self.set_value(value_obj)
def get_parsed_value(self, value_name, default=None):
"""
Get comparison value by name
Args:
value_name (basestring): name of comparison value keyword
default (object, optional): the value returned if the comparison value is not populated. Default: None
Returns:
(`basestring` or `None`) comparison value, `None` if name does not exist
"""
return self._settings.get(value_name, default)
def set_values(self, value_list):
"""
Set multiple values in the configuration.
Args:
value_list (`list` of `MongoComparisonValue`): values to be added to configuration
Raises:
TypeError: raised if not all members of `value_list` are not of type `MongoComparisonValue`
"""
if not all(isinstance(v, MongoComparisonValue) for v in value_list):
raise TypeError("All values must be of type MongoComparisonValue")
for value_obj in value_list:
name = value_obj.get_name()
value = value_obj.get_value()
self._settings[name] = self._parser.parse(name, value)
def set_value(self, value):
"""
Set a value in the configuration.
Args:
value (MongoComparisonValue): the value to set
Raises:
TypeError: raised if `value` is not of type `MongoComparisonValue`
"""
if not isinstance(value, MongoComparisonValue):
raise TypeError("Value must be of type MongoComparisonValue")
self.set_values([value])
|
/saml_reader-0.0.6.tar.gz/saml_reader-0.0.6/saml_reader/validation/input_validation.py
| 0.900912 | 0.459501 |
input_validation.py
|
pypi
|
from datetime import datetime
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
from saml_reader.validation.input_validation import MongoFederationConfig
from saml_reader.cli import run_analysis, OutputStream
from saml_reader.web.app import app
def submit_analysis_to_backend(data_type, saml_data, comparison_data):
"""Sends data to the SAML reader backend after compilation from the
web frontend.
Args:
data_type (basestring): Type of data being passed in. Must be
`xml`, `base64`, or `har`.
saml_data (basestring): raw SAML data
comparison_data (MongoFederationConfig): federation data for comparison
Returns:
basestring: report generated after running tests
"""
report = OutputStream()
run_analysis(
input_type=data_type,
source='raw',
compare=True,
compare_object=comparison_data,
raw_data=saml_data,
print_analysis=True,
print_summary=True,
output_stream=report.print
)
# TODO: In the future, generate a nicer looking report on the webpage, so
# this function should just return the status of tests and another
# function will handle building the web report.
return report.getvalue()
@app.callback(
Output('analysis_output', 'value'),
[Input('submit_saml_data', 'n_clicks')],
[State('saml_data_type', 'value'),
State('saml_input', 'value'),
State('compare-first-name', 'value'),
State('compare-last-name', 'value'),
State('compare-email', 'value'),
State('compare-audience', 'value'),
State('compare-acs', 'value'),
State('compare-issuer', 'value'),
State('compare-encryption', 'value'),
State('compare-cert-expiration', 'date'),
State('compare-domain-list', 'value'),
State('compare-role-mapping-expected', 'value'),
State('compare-group-list', 'value')]
)
def submit_analysis(
n_clicks,
data_type, saml_data,
first_name, last_name, email, audience, acs, issuer, encryption,
cert_expiration, domain_list, role_mapping_expected, group_list):
"""Validates comparison input data and, if passes, send it to the analyzer.
If an entry fails, an error is output to the results section. If all entries
are acceptable, the output of the analyzer is output to the results section.
Args:
n_clicks (int): how many times was the submission button clicked
data_type (basestring): format of data entered
saml_data (basestring): SAML data
first_name (basestring): first name for comparison
last_name (basestring): last name for comparison
email (basestring): email for comparison
audience (basestring): audience URI for comparison
acs (basestring): assertion consumer service URL for comparison
issuer (basestring): issuer URI for comparison
encryption (basestring): encryption algorithm for comparison
cert_expiration (basestring): signing certificate expiration date for comparison
domain_list (`list` of `basestring`): list of federated domains for comparison
role_mapping_expected (`list` of `basestring`): contains "Yes" if role mapping is expected,
`None` otherwise.
group_list (`list` of `basestring`): list of expected role mapping groups for the user
Raises:
PreventUpdate: if the callback was canceled or there is no SAML data to send
Returns:
basestring: output for the results box
"""
if n_clicks is None or not saml_data:
raise PreventUpdate
comparison_data = {
"firstName": first_name or None,
"lastName": last_name or None,
"email": email or None,
"issuer": issuer or None,
"acs": acs or None,
"audience": audience or None,
"encryption": encryption or None,
"role_mapping_expected": "N"
}
if domain_list:
comparison_data["domains"] = domain_list
if role_mapping_expected:
comparison_data["role_mapping_expected"] = "Y"
if group_list:
comparison_data["memberOf"] = group_list
if cert_expiration:
comparison_data["cert_expiration"] = datetime.strptime(cert_expiration, "%Y-%m-%d").strftime("%m/%d/%Y")
try:
comparison_object = MongoFederationConfig(**{k:v for k,v in comparison_data.items() if v is not None})
except ValueError as e:
return e.args[0]
return submit_analysis_to_backend(data_type, saml_data, comparison_object)
@app.callback(
Output('div-role-mapping-groups', 'hidden'),
[Input('compare-role-mapping-expected', 'value')]
)
def toggle_role_mapping_entry(role_mapping_expected):
"""Toggles role mapping section on or off
Args:
role_mapping_expected (`list` of `basestring`): contains "Yes" if enabled,
`None` otherwise.
Returns:
bool: True if should be hidden, False otherwise
"""
if "Yes" in role_mapping_expected or []:
return False
return True
@app.callback(
[Output('compare-first-name', 'value'),
Output('compare-last-name', 'value'),
Output('compare-email', 'value'),
Output('compare-audience', 'value'),
Output('compare-acs', 'value'),
Output('compare-issuer', 'value'),
Output('compare-encryption', 'value'),
Output('domain-name-text', 'value'),
Output('group-name-text', 'value'),
Output('saml_input', 'value'),
Output('analysis_output', 'value'),
Output('compare-cert-expiration', 'date'),
Output('compare-domain-list', 'options'),
Output('compare-domain-list', 'value'),
Output('compare-role-mapping-expected', 'value'),
Output('compare-group-list', 'options'),
Output('compare-group-list', 'value')],
[Input('submit_reset_values', 'submit_n_clicks')]
)
def prompt_reset_values(n_clicks):
"""Clears entered data if user confirms ok.
Args:
n_clicks (int): non-zero if the user confirmed to clear the data
Raises:
PreventUpdate: if the callback was canceled (user said No)
Returns:
appropriate values to clear all data on the page
"""
if n_clicks is None:
raise PreventUpdate
return [""]*11 + [None] + [[]]*5
@app.callback(
[Output('compare-domain-list', 'options'),
Output('compare-domain-list', 'value'),
Output('domain-name-text', 'value')],
[Input('submit-add-domain', 'n_clicks'),
Input('domain-name-text', 'n_submit')],
[State('domain-name-text', 'value'),
State('compare-domain-list', 'options'),
State('compare-domain-list', 'value')]
)
def add_domain_to_list(n_clicks, n_submit, value, current_items, checked_items):
"""Adds a domain to the list of federated domains, checks its box, and clears entry field.
If value is already in list, just clears the entry field.
Args:
n_clicks (int): is not None if the button was pressed
n_submit (int): is not None if Enter was pressed in the domain text box
value (basestring): value of the domain
current_items (`list` of `dict`): checkbox options representing domains currently in list
checked_items (`list` of `basestring`): checked options (should be all domains)
Raises:
PreventUpdate: if button not clicked or Enter not pressed in the entry field
or no value in the entry field
Returns:
tuple: `list` of `dict` of items in the domain checklist,
`list` of `basestring` representing items checked,
`basestring` for value of entry field
"""
if (n_clicks is None) and (n_submit is None) or value == "":
raise PreventUpdate
if value not in [x["value"] for x in current_items]:
current_items.append({"label": value, "value": value})
checked_items.append(value)
return current_items, checked_items, ""
@app.callback(
Output('compare-domain-list', 'options'),
[Input('compare-domain-list', 'value')]
)
def remove_domain_from_list(checked_items):
"""Remove domain from the list when it is unchecked.
Args:
checked_items (`list` of `basestring`): list of domains currently checked
Returns:
`list` of `dict`: updated list of domains in checklist
"""
return [{"label": x, "value": x} for x in checked_items]
@app.callback(
[Output('compare-group-list', 'options'),
Output('compare-group-list', 'value'),
Output('group-name-text', 'value')],
[Input('submit-add-group', 'n_clicks'),
Input('group-name-text', 'n_submit')],
[State('group-name-text', 'value'),
State('compare-group-list', 'options'),
State('compare-group-list', 'value')]
)
def add_group_to_list(n_clicks, n_submit, value, current_items, checked_items):
"""Adds a AD to the list of mapped groups, checks its box, and clears entry field.
If value is already in list, just clears the entry field.
Args:
n_clicks (int): is not None if the button was pressed
n_submit (int): is not None if Enter was pressed in the group text box
value (basestring): value of the group
current_items (`list` of `dict`): checkbox options representing groups currently in list
checked_items (`list` of `basestring`): checked options (should be all groups)
Raises:
PreventUpdate: if button not clicked or Enter not pressed in the entry field
or no value in the entry field
Returns:
tuple: `list` of `dict` of items in the group checklist,
`list` of `basestring` representing items checked,
`basestring` for value of entry field
"""
# TODO: This should probably be consolidated with the domain list
if (n_clicks is None) and (n_submit is None) or value == "":
raise PreventUpdate
if value not in [x["value"] for x in current_items]:
current_items.append({"label": value, "value": value})
checked_items.append(value)
return current_items, checked_items, ""
@app.callback(
Output('compare-group-list', 'options'),
[Input('compare-group-list', 'value')]
)
def remove_group_from_list(checked_items):
"""Remove group from the list when it is unchecked.
Args:
checked_items (`list` of `basestring`): list of groups currently checked
Returns:
`list` of `dict`: updated list of groups in checklist
"""
# TODO: This could be consolidated with the domain list
return [{"label": x, "value": x} for x in checked_items]
|
/saml_reader-0.0.6.tar.gz/saml_reader-0.0.6/saml_reader/web/callbacks/analyze.py
| 0.764364 | 0.288731 |
analyze.py
|
pypi
|
from dash import dcc, html
def build_layout():
"""
Builds layout for page.
Returns:
an HTML component such as html.Div
"""
data_info_layout = html.Div([
html.Label(
children="Select Data Type:",
style={
'display': 'inline-block',
'margin-right': '10px',
'vertical-align': 'middle'
}
),
dcc.Dropdown(
id='saml_data_type',
options=[
{'label': 'xml', 'value': 'xml'},
{'label': 'base64', 'value': 'base64'},
{'label': 'har', 'value': 'har'}
],
value='xml',
placeholder="Select data type",
style={
'width': '100px',
'display': 'inline-block',
'margin-right': '10px',
'vertical-align': 'middle'
}
),
html.Button(
id="submit_saml_data",
children="Analyze",
style={
'display': 'inline-block',
'vertical-align': 'middle'
}
)
])
warning_text = dcc.Markdown(
"""
### SAML Data
*Please note:* It is not recommended to paste HAR data here, because HAR files are usually quite large
in size and you can crash your browser. Use the CLI instead.
"""
)
input_box = dcc.Textarea(
id='saml_input',
placeholder="Paste SAML data here",
style={
'width': "100%",
'height': 300,
'resize': 'none'
}
)
output_box_label = html.Label("Analysis output:")
output_box = dcc.Textarea(
id='analysis_output',
placeholder="Your analysis will appear here",
contentEditable=False,
style={
'width': "100%",
'height': 300,
'resize': 'none'
}
)
left_side = html.Div(
children=[
warning_text,
html.Br(),
data_info_layout,
html.Br(),
input_box,
html.Br(),
output_box_label,
html.Br(),
output_box
],
style={
"width": "50%",
'display': 'inline-block',
'vertical-align': 'top',
})
rs_top_text = html.Div([
dcc.Markdown(
"""
### Comparison values
If you would like to enter comparison values, please do so below."""
),
html.Details([
html.Summary(
dcc.Markdown(
"Need help finding this info? Click here.",
style={
'vertical-align': 'top',
'display': 'inline-block'
}
)
),
dcc.Markdown(
"""
#### Finding comparison values
First name, last name, and username information can be found in the
Support Portal or admin panel.
The Audience URI, Assertion Consumer Service URL, and associated domains
can be found in the customer's federation settings on the identity provider
information card.
The Issuer URI, encryption type, and SAML certificate expiration date
can be found by clicking "Modify" on the identity provider information
card in the customer's federation settings.
To determine if role mapping is expected, first look to see if any
organizations are associated with the active identity provider configuration.
For each organization that is associated, find the associated organization in
the Organizations section and click into the settings. From the organization settings,
click into Role Mappings. If there are any role mappings defined in any organization,
then the customer is expecting role mapping to be configured.
For expected group names, if a customer has specified which group name(s) their
user is supposed to have, then you can add that to this list."""
)]
)]
)
comparison_fields = html.Div([
dcc.ConfirmDialogProvider(
children=html.Button(
"Reset All Values",
style={"margin-bottom": "1em"}
),
id='submit_reset_values',
message='Are you sure you want to clear all values including SAML data?',
),
html.Br(),
html.Div([
html.Label(
"User's First Name",
style={
"width": "30%",
'display': 'inline-block',
'vertical-align': 'middle'
}
),
dcc.Input(
placeholder="Sam",
type='text',
value='',
id='compare-first-name',
style={
"width": "300px",
'display': 'inline-block',
'vertical-align': 'middle'
}
)
]),
html.Div([
html.Label(
"User's Last Name",
style={
"width": "30%",
'display': 'inline-block',
'vertical-align': 'middle'
}
),
dcc.Input(
placeholder="Ell",
type='text',
value='',
id='compare-last-name',
style={
"width": "300px",
'display': 'inline-block',
'vertical-align': 'middle'
}
)
]),
html.Div([
html.Label(
"User's Email Address/Username",
style={
"width": "30%",
'display': 'inline-block',
'vertical-align': 'middle'
}
),
dcc.Input(
placeholder="[email protected]",
type='text',
value='',
id='compare-email',
style={
"width": "300px",
'display': 'inline-block',
'vertical-align': 'middle'
}
)
]),
html.Div([
html.Label(
"Audience URI",
style={
"width": "30%",
'display': 'inline-block',
'vertical-align': 'middle'
}
),
dcc.Input(
placeholder="https://www.okta.com/saml2/service-provider/...",
type='text',
value='',
id='compare-audience',
style={
"width": "300px",
'display': 'inline-block',
'vertical-align': 'middle'
}
),
]),
html.Div([
html.Label(
"Assertion Consumer Service URL",
style={
"width": "30%",
'display': 'inline-block',
'vertical-align': 'middle'
}
),
dcc.Input(
placeholder="https://auth.mongodb.com/sso/saml2/...",
type='text',
value='',
id='compare-acs',
style={
"width": "300px",
'display': 'inline-block',
'vertical-align': 'middle'
}
)
]),
html.Div([
html.Label(
"Issuer URI",
style={
"width": "30%",
'display': 'inline-block',
'vertical-align': 'middle'
}
),
dcc.Input(
placeholder="idp-entity-id",
type='text',
value='',
id='compare-issuer',
style={
"width": "300px",
'display': 'inline-block',
'vertical-align': 'middle'
}
)
]),
html.Label(
"Encryption Type"
),
html.Br(),
dcc.Dropdown(
placeholder="SHA-?",
options=[{"label": x, "value": x} for x in ['SHA-256', 'SHA-1']],
value='',
id='compare-encryption',
style={
"width": "300px",
"display": "inline-block"}
),
html.Br(),
html.Label(
"SAML Certificate Expiration Date (MM/DD/YYYY)"
),
html.Br(),
dcc.DatePickerSingle(
placeholder='Select Date',
id='compare-cert-expiration',
clearable=True,
display_format="MM/DD/YYYY",
style={
"width": "500px"
}
),
html.Br(),
html.Label(
"Associated Domains"
),
html.Br(),
dcc.Input(
placeholder="mydomain.com",
type='text',
value='',
id='domain-name-text',
style={"width": "300px"}
),
html.Button(
"Add",
id='submit-add-domain',
style={"display": "inline-block", "vertical-align": "middle"}
),
html.Br(),
html.Div(
id='div-domain-list',
children=[
dcc.Markdown(
"List of domains:",
style={"font-weight": "bold"}
),
dcc.Checklist(
id="compare-domain-list",
options=[],
value=[],
inputStyle={
"margin-right": "1em"
},
labelStyle={
"font-weight": "normal",
"display": "block"
}
)],
style={
"width": "400px",
"border": "1px solid black",
"display": "inline-block",
"margin-bottom": "1em",
"margin-top": "1em",
"padding": "0.5em"
}
),
html.Br(),
dcc.Checklist(
id='compare-role-mapping-expected',
options=[
{"label": "Role mapping expected?", "value": "Yes"}
],
inputStyle={
"margin-right": "1em"
}
),
html.Div(
id='div-role-mapping-groups',
hidden=True,
children=[
html.Label(
"Expected Group Names"
),
html.Br(),
dcc.Input(
placeholder="Group Name",
type='text',
value='',
id='group-name-text',
style={"width": "300px"}
),
html.Button(
"Add",
id='submit-add-group',
style={"display": "inline-block",
"vertical-align": "middle"}
),
html.Br(),
html.Div(
id='div-group-list',
children=[
dcc.Markdown(
"List of expected group names:",
style={"font-weight": "bold"}
),
dcc.Checklist(
id="compare-group-list",
options=[],
value=[],
inputStyle={
"margin-right": "1em"
},
labelStyle={
"font-weight": "normal",
"display": "block"
}
)],
style={
"width": "400px",
"border": "1px solid black",
"display": "inline-block",
"margin-bottom": "1em",
"margin-top": "1em",
"padding": "0.5em"
}
),
]
)
], style={'display': 'inline-block',
'vertical-align': 'middle'})
right_side = html.Div(
children=[
rs_top_text,
html.Br(),
comparison_fields
],
style={
'display': 'inline-block',
'vertical-align': 'top',
'width': '50%'
}
)
layout = html.Div([
left_side, right_side
], className="row", style={"margin-bottom": "3em"})
return layout
"""Page layout"""
layout = build_layout()
|
/saml_reader-0.0.6.tar.gz/saml_reader-0.0.6/saml_reader/web/layouts/analyze.py
| 0.703244 | 0.222341 |
analyze.py
|
pypi
|
AWS SAML Authorization Script.
==============================
This is a tool to authenticate to Amazon Web Services using the ADFS
SAML provider and create temporary tokens for use with AWS API clients.
Two approaches
--------------
The code is packaged as a Python module; if you have a working Python
installation of a recent enough vintage, you should be able to install
it directly from PyPI (http://pypi.python.org) by running
``pip install samlkeygen``.
If you regularly use Docker and would rather run the tool without
installing anything, you can run a prebuilt Docker image directly from
Docker Hub (http://dockerhub.com) with
``docker run turnerlabs/samlkeygen``.
Both approaches require some environment variables and/or command-line
parameters in order to function properly; see below for details. Of
course, you can also always clone the git repository
(https://github.com/turnerlabs/samlkeygen) and use the source directly.
Pull requests with improvements are always welcome!
MacOS Note
~~~~~~~~~~
On some versions of MacOS, the stock Python install doesn’t include
``pip``. and even if you install it (e.g. with ``easy_install``),
installing ``samlkeygen`` may require upgrading some standard modules
that file system security restrictions will not permit. On such systems
you will need to either take the Docker approach or install a separate
instance of Python (2.7+ or 3.x, the module works either way); for the
latter solution, consider using Homebrew (http://brew.sh) and/or pyenv
(https://github.com/pyenv/pyenv)
Shortcuts
---------
When installed as a module, ``samlkeygen`` installs three front-end
scripts to make common use cases take less typing: ``awsprof``,
``awsprofs``, and ``samld``. There are bash alias definitions below to
create similar commands when using the Docker version of the tool; the
github repo includes a ``source``\ able bash script containing those
definitions.
Configuration Parameters
------------------------
The primary configuration parameters required for the authentication
operation are the URL of your ADFS service endpoint and the credentials
to log into that endpoint: your Active Directory domain, username, and
password. You can provide all of these via command-line parameters or
environment variables, though for security we recommend that you let the
tool prompt you for your password (which will not be echoed).
+---------------+-----------------------------------+------------------+
| Option | Environment Variable | Description |
+===============+===================================+==================+
| –url | ADFS_URL | Complete URL to |
| | | ADFS SAML |
| | | endpoint for AWS |
+---------------+-----------------------------------+------------------+
| –domain | ADFS_DOMAIN | Active |
| | | Directory/ADFS |
| | | domain name |
+---------------+-----------------------------------+------------------+
| –username | USER | Your Active |
| | | Directory |
| | | username |
| | | (sAMAccountName) |
+---------------+-----------------------------------+------------------+
| –password | PASSWORD | Your Active |
| | | Directory |
| | | password (again, |
| | | we recommend you |
| | | leave this unset |
| | | and allow the |
| | | program to |
| | | prompt you for |
| | | it) |
+---------------+-----------------------------------+------------------+
Depending on your environment, you may also have to be on your corporate
LAN or VPN in order to authenticate. Once you have obtained keys for a
profile, though, those should work from anywhere until they expire.
Quick start
~~~~~~~~~~~
``samlkeygen`` has two basic functions: the primary one is
authentication, in which it connects to the ADFS endpoint,
authenticates, and gets authorization tokens for the user’s defined SAML
roles. The secondary function is to simplify the selection of a
credentials profile file for use by a command.
Authentication
^^^^^^^^^^^^^^
SAML-based credentials are only good for an hour (a hard AWS-imposed
limit). To make this limit less inconvenient,
``samlkeygen authenticate`` provides a mode of operation called
``auto-update`` which requests your password once, then runs continually
and automatically requests and saves new credentials every hour just
before the old ones expire. The supplied entry points include ``samld``,
which can usually be run without arguments if the environment variables
are set properly, or with just ``--username sAMAccountName`` if that
isn’t the same as your local ``$USER`` on your workstation. Example:
::
$ samld --username gpburdell
...
Writing credentials for profile aws-shared-services:aws-shared-services-admin
Writing credentials for profile aws-shared-services:aws-shared-services-dns
Writing credentials for profile aws-ent-prod:aws-ent-prod-admin
Writing credentials for profile cdubs:aws-cdubs-admin
58 minutes till credential refresh
The full usage for ``samlkeygen authenticate`` may be found below.
Profile selection
~~~~~~~~~~~~~~~~~
The authentication tokens will be written out to your credentials file
with a separate profile for each SAML role, named
``$ACCOUNT_ALIAS:$ROLE_NAME``. You can get a list of your profiles by
running ``samlkeygen list-profiles``, which takes an optional parameter
to restrict the output to those profiles matching a substring (really a
regular expression). There’s an ``awsprofs`` alias/entry point for this
functionality:
::
$ awsprofs shared-services
aws-shared-services:aws-shared-services-admin
aws-shared-services:aws-shared-services-dns-cnn
These are normal AWS profiles and can be used like any other, by
supplying the ``--profile`` option to whatever AWS CLI command you are
running, or setting the ``AWS_PROFILE`` environment variable (or
``AWS_DEFAULT_PROFILE`` for some older tools). However, since the
autogenerated names are somewhat long, the script also has a subcommand
that lets you select a profile via substring or regular expression
match: ``select-profile`` works just like ``list-profiles``, but
requires that the pattern match exactly one profile. The supplied
aliases/entry points include one called ``awsprof`` (singular) for this
use case:
::
$ awsprof shared-services
samlkeygen.py: Pattern is not unique. It matches these profiles:
aws-shared-services:aws-shared-services-admin
aws-shared-services:aws-shared-services-dns-cnn
If the pattern does match one profile, that profile’s full name is
output by itself; the intent is to use the command in
command-substitution:
::
$ aws --profile $(awsprof shared-services-admin) iam list-account-aliases
{
"AccountAliases": [
"aws-shared-services"
]
}
Finally, if you are running the local Python version, you can ask the
script to run a command for you under a given profile. The pip-installed
entry poitns include one called ``awsrun`` for this function; there’s no
corresponding Docker alias because the Docker container would have to
include the AWS command-line tool you wanted to run this way.
That lets me replace the above example with this:
::
$ awsrun shared-services-admin aws iam list-account-aliases
{
"AccountAliases": [
"aws-shared-services"
]
}
The Docker aliases
------------------
Bash
~~~~
::
alias samld='docker run -it --rm -v "${AWS_DIR:-$HOME/.aws}:/aws" -e "USER=$USER" -e "ADFS_DOMAIN=$ADFS_DOMAIN" -e "ADFS_URL=$ADFS_URL" turnerlabs/samlkeygen authenticate --all-accounts --auto-update'
alias awsprofs='docker run --rm -v ~/.aws:/aws turnerlabs/samlkeygen list-profiles'
alias awsprof='docker run --rm -v ~/.aws:/aws turnerlabs/samlkeygen select-profile'
PowerShell
~~~~~~~~~~
::
$AWS_DIR = "$env:UserProfile\.aws" -replace "\\","//"
function Run-SamlKeygenAuto {
docker run -it --rm -v ${AWS_DIR}:/aws -e "USER=$env:UserName" `
-e "ADFS_DOMAIN=$ADFS_DOMAIN" -e "ADFS_URL=$ADFS_URL" `
docker run --rm -v ~/.aws:/aws turnerlabs/samlkeygen select-profile
}
New-Alias awsprof Run-SamlKeygen
Full Usage documentation
------------------------
::
usage: samlkeygen [-h]
{authenticate,list-profiles,select-profile,run-command,version}
...
positional arguments:
{authenticate,list-profiles,select-profile,run-command,version}
authenticate Authenticate via SAML and write out temporary security
tokens to the credentials file
list-profiles List available AWS profiles in the credentials file
select-profile Select a unique profile name
run-command Run a command with a given profile
version
optional arguments:
-h, --help show this help message and exit
::
usage: samlkeygen authenticate [-h] [--url URL] [--region REGION] [--batch]
[--all-accounts] [--account ACCOUNT]
[--profile PROFILE] [--domain DOMAIN]
[--role ROLE] [--username USERNAME]
[--password PASSWORD] [--filename FILENAME]
[--auto-update] [--verbose]
Authenticate via SAML and write out temporary security tokens to the credentials file
optional arguments:
-h, --help show this help message and exit
--url URL URL to ADFS provider (default: '')
--region REGION AWS region to use (default: 'us-east-1')
--batch Disable all interactive prompts (default: False)
--all-accounts Retrieve tokens for all accounts and roles (default:
False)
--account ACCOUNT Name or ID of AWS account for which to generate token
(default: -)
--profile PROFILE Name to give profile in credentials file (default
account:role) (default: -)
--domain DOMAIN Windows domain to authenticate to (default: '')
--role ROLE Name or ARN of role for which to generate token
(default: all for accounts) (default: -)
--username USERNAME Name of user to authenticate as (default: 'mjreed')
--password PASSWORD Password for user (default: -)
--filename FILENAME Name of AWS credentials file (default:
'/Users/mjreed/.aws/credentials')
--auto-update Continue running and update token(s) every hour
(default: False)
--verbose Display trace output (default: False)
::
usage: samlkeygen list-profiles [-h] [--filename FILENAME] [pattern]
List available AWS profiles in the credentials file
positional arguments:
pattern Restrict list to profiles matching pattern (default:
'.*')
optional arguments:
-h, --help show this help message and exit
--filename FILENAME Name of AWS credentials file (default:
'/Users/mjreed/.aws/credentials')
::
usage: samlkeygen select-profile [-h] [--filename FILENAME] pattern
Select a unique profile name
positional arguments:
pattern Run command with profile matching pattern
optional arguments:
-h, --help show this help message and exit
--filename FILENAME Name of AWS credentials file (default:
'/Users/mjreed/.aws/credentials')
|
/samlkeygen-1.4.1.tar.gz/samlkeygen-1.4.1/README.rst
| 0.861553 | 0.682441 |
README.rst
|
pypi
|
# AWS SAML Authorization Script.
This is a tool to authenticate to Amazon Web Services using the ADFS SAML provider
and create temporary tokens for use with AWS API clients.
## Two approaches
The code is packaged as a Python module; if you have a working Python
installation of a recent enough vintage, you should be able to install it
directly from PyPI (http://pypi.python.org) by running `pip install samlkeygen`.
If you regularly use Docker and would rather run the tool without installing
anything, you can run a prebuilt Docker image directly from Docker Hub
(http://dockerhub.com) with `docker run turnerlabs/samlkeygen`.
Both approaches require some environment variables and/or command-line
parameters in order to function properly; see below for details. Of course,
you can also always clone the git repository
(https://github.com/turnerlabs/samlkeygen) and use the source directly. Pull
requests with improvements are always welcome!
### MacOS Note
On some versions of MacOS, the stock Python install doesn't include `pip`. and
even if you install it (e.g. with `easy_install`), installing `samlkeygen` may
require upgrading some standard modules that file system security restrictions
will not permit. On such systems you will need to either take the Docker approach
or install a separate instance of Python (2.7+ or 3.x, the module works either way);
for the latter solution, consider using Homebrew (http://brew.sh) and/or pyenv
(https://github.com/pyenv/pyenv)
## Shortcuts
When installed as a module, `samlkeygen` installs three front-end scripts
to make common use cases take less typing: `awsprof`, `awsprofs`, and `samld`. There
are bash alias definitions below to create similar commands when using the Docker
version of the tool; the github repo includes a `source`able bash script containing
those definitions.
## Configuration Parameters
The primary configuration parameters required for the authentication operation
are the URL of your ADFS service endpoint and the credentials to log into that
endpoint: your Active Directory domain, username, and password. You can provide
all of these via command-line parameters or environment variables, though for
security we recommend that you let the tool prompt you for your password (which
will not be echoed).
| Option | Environment Variable | Description|
|----------|----------------------|------------|
|--url | ADFS\_URL | Complete URL to ADFS SAML endpoint for AWS
|--domain | ADFS\_DOMAIN | Active Directory/ADFS domain name|
|--username| USER | Your Active Directory username (sAMAccountName) |
|--password| PASSWORD | Your Active Directory password (again, we recommend you leave this unset and allow the program to prompt you for it) |
Depending on your environment, you may also have to be on your corporate LAN or
VPN in order to authenticate. Once you have obtained keys for a profile,
though, those should work from anywhere until they expire.
### Quick start
`samlkeygen` has two basic functions: the primary one is authentication, in which
it connects to the ADFS endpoint, authenticates, and gets authorization tokens for
the user's defined SAML roles. The secondary function is to simplify the selection
of a credentials profile file for use by a command.
#### Authentication
SAML-based credentials are only good for an hour (a hard AWS-imposed limit). To
make this limit less inconvenient, `samlkeygen authenticate` provides a mode of
operation called `auto-update` which requests your password once, then runs
continually and automatically requests and saves new credentials every hour
just before the old ones expire. The supplied entry points include `samld`,
which can usually be run without arguments if the environment variables are set
properly, or with just `--username sAMAccountName` if that isn't the
same as your local `$USER` on your workstation. Example:
```
$ samld --username gpburdell
...
Writing credentials for profile aws-shared-services:aws-shared-services-admin
Writing credentials for profile aws-shared-services:aws-shared-services-dns
Writing credentials for profile aws-ent-prod:aws-ent-prod-admin
Writing credentials for profile cdubs:aws-cdubs-admin
58 minutes till credential refresh
```
The full usage for `samlkeygen authenticate` may be found below.
### Profile selection
The authentication tokens will be written out to your credentials file with a
separate profile for each SAML role, named `$ACCOUNT_ALIAS:$ROLE_NAME`. You can
get a list of your profiles by running `samlkeygen list-profiles`, which
takes an optional parameter to restrict the output to those profiles matching
a substring (really a regular expression). There's an `awsprofs` alias/entry point
for this functionality:
```
$ awsprofs shared-services
aws-shared-services:aws-shared-services-admin
aws-shared-services:aws-shared-services-dns-cnn
```
These are normal AWS profiles and can be used like any other, by supplying the
`--profile` option to whatever AWS CLI command you are running,
or setting the `AWS_PROFILE` environment variable (or `AWS_DEFAULT_PROFILE` for
some older tools). However, since the autogenerated names are somewhat long,
the script also has a subcommand that lets you select a profile via substring
or regular expression match: `select-profile` works just like `list-profiles`,
but requires that the pattern match exactly one profile. The supplied
aliases/entry points include one called `awsprof` (singular) for this use case:
```
$ awsprof shared-services
samlkeygen.py: Pattern is not unique. It matches these profiles:
aws-shared-services:aws-shared-services-admin
aws-shared-services:aws-shared-services-dns-cnn
```
If the pattern does match one profile, that profile's full name is output by itself;
the intent is to use the command in command-substitution:
```
$ aws --profile $(awsprof shared-services-admin) iam list-account-aliases
{
"AccountAliases": [
"aws-shared-services"
]
}
```
Finally, if you are running the local Python version, you can ask the script to
run a command for you under a given profile. The pip-installed entry poitns
include one called `awsrun` for this function; there's no corresponding Docker
alias because the Docker container would have to include the AWS command-line
tool you wanted to run this way.
That lets me replace the above example with this:
```
$ awsrun shared-services-admin aws iam list-account-aliases
{
"AccountAliases": [
"aws-shared-services"
]
}
```
## The Docker aliases
### Bash
```
alias samld='docker run -it --rm -v "${AWS_DIR:-$HOME/.aws}:/aws" -e "USER=$USER" -e "ADFS_DOMAIN=$ADFS_DOMAIN" -e "ADFS_URL=$ADFS_URL" turnerlabs/samlkeygen authenticate --all-accounts --auto-update'
alias awsprofs='docker run --rm -v ~/.aws:/aws turnerlabs/samlkeygen list-profiles'
alias awsprof='docker run --rm -v ~/.aws:/aws turnerlabs/samlkeygen select-profile'
```
### PowerShell
```
$AWS_DIR = "$env:UserProfile\.aws" -replace "\\","//"
function Run-SamlKeygenAuto {
docker run -it --rm -v ${AWS_DIR}:/aws -e "USER=$env:UserName" `
-e "ADFS_DOMAIN=$ADFS_DOMAIN" -e "ADFS_URL=$ADFS_URL" `
docker run --rm -v ~/.aws:/aws turnerlabs/samlkeygen select-profile
}
New-Alias awsprof Run-SamlKeygen
```
## Full Usage documentation
```
usage: samlkeygen [-h]
{authenticate,list-profiles,select-profile,run-command,version}
...
positional arguments:
{authenticate,list-profiles,select-profile,run-command,version}
authenticate Authenticate via SAML and write out temporary security
tokens to the credentials file
list-profiles List available AWS profiles in the credentials file
select-profile Select a unique profile name
run-command Run a command with a given profile
version
optional arguments:
-h, --help show this help message and exit
```
```
usage: samlkeygen authenticate [-h] [--url URL] [--region REGION] [--batch]
[--all-accounts] [--account ACCOUNT]
[--profile PROFILE] [--domain DOMAIN]
[--role ROLE] [--username USERNAME]
[--password PASSWORD] [--filename FILENAME]
[--auto-update] [--verbose]
Authenticate via SAML and write out temporary security tokens to the credentials file
optional arguments:
-h, --help show this help message and exit
--url URL URL to ADFS provider (default: '')
--region REGION AWS region to use (default: 'us-east-1')
--batch Disable all interactive prompts (default: False)
--all-accounts Retrieve tokens for all accounts and roles (default:
False)
--account ACCOUNT Name or ID of AWS account for which to generate token
(default: -)
--profile PROFILE Name to give profile in credentials file (default
account:role) (default: -)
--domain DOMAIN Windows domain to authenticate to (default: '')
--role ROLE Name or ARN of role for which to generate token
(default: all for accounts) (default: -)
--username USERNAME Name of user to authenticate as (default: 'mjreed')
--password PASSWORD Password for user (default: -)
--filename FILENAME Name of AWS credentials file (default:
'/Users/mjreed/.aws/credentials')
--auto-update Continue running and update token(s) every hour
(default: False)
--verbose Display trace output (default: False)
```
```
usage: samlkeygen list-profiles [-h] [--filename FILENAME] [pattern]
List available AWS profiles in the credentials file
positional arguments:
pattern Restrict list to profiles matching pattern (default:
'.*')
optional arguments:
-h, --help show this help message and exit
--filename FILENAME Name of AWS credentials file (default:
'/Users/mjreed/.aws/credentials')
```
```
usage: samlkeygen select-profile [-h] [--filename FILENAME] pattern
Select a unique profile name
positional arguments:
pattern Run command with profile matching pattern
optional arguments:
-h, --help show this help message and exit
--filename FILENAME Name of AWS credentials file (default:
'/Users/mjreed/.aws/credentials')
```
|
/samlkeygen-1.4.1.tar.gz/samlkeygen-1.4.1/README.md
| 0.408749 | 0.776792 |
README.md
|
pypi
|
def sammon(x, n = 2, display = 0, inputdist = 'raw', maxhalves = 20, maxiter = 500, tolfun = 1e-9, init = 'pca'):
import numpy as np
from scipy.spatial.distance import cdist
X = x
# Create distance matrix unless given by parameters
if inputdist == 'distance':
xD = X
else:
xD = cdist(X, X)
# Remaining initialisation
N = X.shape[0] # hmmm, shape[1]?
scale = 0.5 / xD.sum()
if init == 'pca':
[UU,DD,_] = np.linalg.svd(X)
Y = UU[:,:n]*DD[:n]
else:
Y = np.random.normal(0.0,1.0,[N,n])
one = np.ones([N,n])
xD = xD + np.eye(N)
xDinv = 1 / xD # Returns inf where D = 0.
xDinv[np.isinf(xDinv)] = 0 # Fix by replacing inf with 0 (default Matlab behaviour).
yD = cdist(Y, Y) + np.eye(N)
yDinv = 1. / yD # Returns inf where d = 0.
np.fill_diagonal(xD, 1)
np.fill_diagonal(yD, 1)
np.fill_diagonal(xDinv, 0)
np.fill_diagonal(yDinv, 0)
xDinv[np.isnan(xDinv)] = 0
yDinv[np.isnan(xDinv)] = 0
xDinv[np.isinf(xDinv)] = 0
yDinv[np.isinf(yDinv)] = 0 # Fix by replacing inf with 0 (default Matlab behaviour).
delta = xD - yD
E = ((delta**2)*xDinv).sum()
# Get on with it
for i in range(maxiter):
# Compute gradient, Hessian and search direction (note it is actually
# 1/4 of the gradient and Hessian, but the step size is just the ratio
# of the gradient and the diagonal of the Hessian so it doesn't
# matter).
delta = yDinv - xDinv
deltaone = np.dot(delta,one)
g = np.dot(delta, Y) - (Y * deltaone)
dinv3 = yDinv ** 3
y2 = Y ** 2
H = np.dot(dinv3,y2) - deltaone - np.dot(2, Y) * np.dot(dinv3, Y) + y2 * np.dot(dinv3,one)
s = -g.flatten(order='F') / np.abs(H.flatten(order='F'))
y_old = Y
# Use step-halving procedure to ensure progress is made
for j in range(maxhalves):
s_reshape = s.reshape(2,round(len(s)/2)).T
y = y_old + s_reshape
d = cdist(y, y) + np.eye(N)
dinv = 1 / d # Returns inf where D = 0.
dinv[np.isinf(dinv)] = 0 # Fix by replacing inf with 0 (default Matlab behaviour).
delta = xD - d
E_new = ((delta**2)*xDinv).sum()
if E_new < E:
break
else:
s = np.dot(0.5,s)
# Bomb out if too many halving steps are required
if j == maxhalves:
print('Warning: maxhalves exceeded. Sammon mapping may not converge...')
# Evaluate termination criterion
if np.abs((E - E_new) / E) < tolfun:
if display:
print('TolFun exceeded: Optimisation terminated')
break
# Report progress
E = E_new
if display > 1:
print('epoch = ' + str(i) + ': E = ' + str(E * scale))
# Fiddle stress to match the original Sammon paper
E = E * scale
return [y,E]
|
/sammon-mapping-0.0.2.tar.gz/sammon-mapping-0.0.2/sammon/sammon.py
| 0.601711 | 0.586819 |
sammon.py
|
pypi
|
import os
import datetime
from sqlalchemy import create_engine, Table, Column, Integer, String, Text, \
MetaData, DateTime
from sqlalchemy.orm import mapper, sessionmaker
from sqlalchemy.sql import default_comparator
class ClientDatabase:
"""
Класс - оболочка для работы с базой данных клиента.
Использует SQLite базу данных, реализован с помощью
SQLAlchemy ORM и используется классический подход.
"""
class KnownUsers:
"""
Класс - отображение для таблицы всех пользователей.
"""
def __init__(self, user):
self.id = None
self.username = user
class MessageHistory:
"""
Класс - отображение для таблицы статистики переданных сообщений.
"""
def __init__(self, contact, direction, message):
self.id = None
self.contact = contact
self.direction = direction
self.message = message
self.date = datetime.datetime.now()
class Contacts:
"""
Класс - отображение для таблицы контактов.
"""
def __init__(self, contact):
self.id = None
self.name = contact
# Конструктор класса:
def __init__(self, name):
# Создаём движок базы данных, поскольку разрешено несколько клиентов
# одновременно, каждый должен иметь свою БД. Поскольку клиент
# мультипоточный необходимо отключить проверки на подключения с
# разных потоков иначе sqlite3.ProgrammingError
path = os.getcwd()
filename = f'client_{name}.db3'
self.database_engine = create_engine(
f'sqlite:///{os.path.join(path, filename)}',
echo=False,
pool_recycle=7200,
connect_args={
'check_same_thread': False})
# Создаём объект MetaData
self.metadata = MetaData()
# Создаём таблицу известных пользователей
users = Table('known_users', self.metadata,
Column('id', Integer, primary_key=True),
Column('username', String)
)
# Создаём таблицу истории сообщений
history = Table('message_history', self.metadata,
Column('id', Integer, primary_key=True),
Column('contact', String),
Column('direction', String),
Column('message', Text),
Column('date', DateTime)
)
# Создаём таблицу контактов
contacts = Table('contacts', self.metadata,
Column('id', Integer, primary_key=True),
Column('name', String, unique=True)
)
# Создаём таблицы
self.metadata.create_all(self.database_engine)
# Создаём отображения
mapper(self.KnownUsers, users)
mapper(self.MessageHistory, history)
mapper(self.Contacts, contacts)
# Создаём сессию
Session = sessionmaker(bind=self.database_engine)
self.session = Session()
# Необходимо очистить таблицу контактов, т.к. при запуске они
# подгружаются с сервера.
self.session.query(self.Contacts).delete()
self.session.commit()
def add_contact(self, contact):
"""Метод добавляющий контакт в базу данных."""
if not self.session.query(
self.Contacts).filter_by(
name=contact).count():
contact_row = self.Contacts(contact)
self.session.add(contact_row)
self.session.commit()
def contacts_clear(self):
"""Метод очищающий таблицу со списком контактов."""
self.session.query(self.Contacts).delete()
def del_contact(self, contact):
"""Метод удаляющий определённый контакт."""
self.session.query(self.Contacts).filter_by(name=contact).delete()
def add_users(self, users_list):
"""Метод заполняющий таблицу известных пользователей."""
self.session.query(self.KnownUsers).delete()
for user in users_list:
user_row = self.KnownUsers(user)
self.session.add(user_row)
self.session.commit()
def save_message(self, contact, direction, message):
"""Метод сохраняющий сообщение в базе данных."""
message_row = self.MessageHistory(contact, direction, message)
self.session.add(message_row)
self.session.commit()
def get_contacts(self):
"""Метод возвращающий список всех контактов."""
return [contact[0]
for contact in self.session.query(self.Contacts.name).all()]
def get_users(self):
"""Метод возвращающий список всех известных пользователей."""
return [user[0]
for user in self.session.query(self.KnownUsers.username).all()]
def check_user(self, user):
"""Метод проверяющий существует ли пользователь."""
if self.session.query(
self.KnownUsers).filter_by(
username=user).count():
return True
else:
return False
def check_contact(self, contact):
"""Метод проверяющий существует ли контакт."""
if self.session.query(self.Contacts).filter_by(name=contact).count():
return True
else:
return False
def get_history(self, contact):
"""
Метод возвращающий историю сообщений с определённым пользователем.
"""
query = self.session.query(
self.MessageHistory).filter_by(
contact=contact)
return [(history_row.contact,
history_row.direction,
history_row.message,
history_row.date) for history_row in query.all()]
# отладка
if __name__ == '__main__':
test_db = ClientDatabase('test1')
for i in ['test3', 'test4', 'test5']:
test_db.add_contact(i)
test_db.add_contact('test4')
test_db.add_users(['test1', 'test2', 'test3', 'test4', 'test5'])
test_db.save_message(
'test2',
'in',
f'Привет! я тестовое сообщение от {datetime.datetime.now()}!')
test_db.save_message(
'test2',
'out',
f'Привет! я другое тестовое сообщение от {datetime.datetime.now()}!')
print(test_db.get_contacts())
print(test_db.get_users())
print(test_db.check_user('test1'))
print(test_db.check_user('test10'))
print(sorted(test_db.get_history('test2'), key=lambda item: item[3]))
test_db.del_contact('test4')
print(test_db.get_contacts())
|
/samoryad_messenger_client-0.0.2.tar.gz/samoryad_messenger_client-0.0.2/client/database.py
| 0.400984 | 0.259109 |
database.py
|
pypi
|
import datetime
from sqlalchemy import create_engine, Table, Column, Integer, String, \
MetaData, ForeignKey, DateTime, Text
from sqlalchemy.orm import mapper, sessionmaker
from sqlalchemy.sql import default_comparator
class ServerStorage:
"""
Класс - оболочка для работы с базой данных сервера.
Использует SQLite базу данных, реализован с помощью
SQLAlchemy ORM и используется классический подход.
"""
class AllUsers:
"""Класс - отображение таблицы всех пользователей."""
def __init__(self, username, passwd_hash):
self.name = username
self.last_login = datetime.datetime.now()
self.passwd_hash = passwd_hash
self.pubkey = None
self.id = None
class ActiveUsers:
"""Класс - отображение таблицы активных пользователей."""
def __init__(self, user_id, ip_address, port, login_time):
self.user = user_id
self.ip_address = ip_address
self.port = port
self.login_time = login_time
self.id = None
class LoginHistory:
"""Класс - отображение таблицы истории входов."""
def __init__(self, name, date, ip, port):
self.id = None
self.name = name
self.date_time = date
self.ip = ip
self.port = port
class UsersContacts:
"""Класс - отображение таблицы контактов пользователей."""
def __init__(self, user, contact):
self.id = None
self.user = user
self.contact = contact
class UsersHistory:
"""Класс - отображение таблицы истории действий."""
def __init__(self, user):
self.id = None
self.user = user
self.sent = 0
self.accepted = 0
def __init__(self, path):
# Создаём движок базы данных
self.database_engine = create_engine(
f'sqlite:///{path}',
echo=False,
pool_recycle=7200,
connect_args={
'check_same_thread': False})
# Создаём объект MetaData
self.metadata = MetaData()
# Создаём таблицу пользователей
users_table = Table('Users', self.metadata,
Column('id', Integer, primary_key=True),
Column('name', String, unique=True),
Column('last_login', DateTime),
Column('passwd_hash', String),
Column('pubkey', Text)
)
# Создаём таблицу активных пользователей
active_users_table = Table(
'Active_users', self.metadata,
Column('id', Integer, primary_key=True),
Column('user', ForeignKey('Users.id'), unique=True),
Column('ip_address', String),
Column('port', Integer),
Column('login_time', DateTime))
# Создаём таблицу истории входов
user_login_history = Table('Login_history', self.metadata,
Column('id', Integer, primary_key=True),
Column('name', ForeignKey('Users.id')),
Column('date_time', DateTime),
Column('ip', String),
Column('port', String)
)
# Создаём таблицу контактов пользователей
contacts = Table('Contacts', self.metadata,
Column('id', Integer, primary_key=True),
Column('user', ForeignKey('Users.id')),
Column('contact', ForeignKey('Users.id'))
)
# Создаём таблицу статистики пользователей
users_history_table = Table('History', self.metadata,
Column('id', Integer, primary_key=True),
Column('user', ForeignKey('Users.id')),
Column('sent', Integer),
Column('accepted', Integer)
)
# Создаём таблицы
self.metadata.create_all(self.database_engine)
# Создаём отображения
mapper(self.AllUsers, users_table)
mapper(self.ActiveUsers, active_users_table)
mapper(self.LoginHistory, user_login_history)
mapper(self.UsersContacts, contacts)
mapper(self.UsersHistory, users_history_table)
# Создаём сессию
Session = sessionmaker(bind=self.database_engine)
self.session = Session()
# Если в таблице активных пользователей есть записи, то их необходимо
# удалить
self.session.query(self.ActiveUsers).delete()
self.session.commit()
def user_login(self, username, ip_address, port, key):
"""
Метод выполняющийся при входе пользователя, записывает в базу
факт входа
Обновляет открытый ключ пользователя при его изменении.
"""
# Запрос в таблицу пользователей на наличие там пользователя с таким
# именем
rez = self.session.query(self.AllUsers).filter_by(name=username)
# Если имя пользователя уже присутствует в таблице, обновляем
# время последнего входа и проверяем корректность ключа.
# Если клиент прислал новый ключ, сохраняем его.
if rez.count():
user = rez.first()
user.last_login = datetime.datetime.now()
if user.pubkey != key:
user.pubkey = key
# Если нету, то генерируем исключение
else:
raise ValueError('Пользователь не зарегистрирован.')
# Теперь можно создать запись в таблицу активных пользователей о факте
# входа.
new_active_user = self.ActiveUsers(
user.id, ip_address, port, datetime.datetime.now())
self.session.add(new_active_user)
# и сохранить в историю входов
history = self.LoginHistory(
user.id, datetime.datetime.now(), ip_address, port)
self.session.add(history)
# Сохрраняем изменения
self.session.commit()
def add_user(self, name, passwd_hash):
"""
Метод регистрации пользователя.
Принимает имя и хэш пароля, создаёт запись в таблице статистики.
"""
user_row = self.AllUsers(name, passwd_hash)
self.session.add(user_row)
self.session.commit()
history_row = self.UsersHistory(user_row.id)
self.session.add(history_row)
self.session.commit()
def remove_user(self, name):
"""Метод удаляющий пользователя из базы."""
user = self.session.query(self.AllUsers).filter_by(name=name).first()
self.session.query(self.ActiveUsers).filter_by(user=user.id).delete()
self.session.query(self.LoginHistory).filter_by(name=user.id).delete()
self.session.query(self.UsersContacts).filter_by(user=user.id).delete()
self.session.query(
self.UsersContacts).filter_by(
contact=user.id).delete()
self.session.query(self.UsersHistory).filter_by(user=user.id).delete()
self.session.query(self.AllUsers).filter_by(name=name).delete()
self.session.commit()
def get_hash(self, name):
"""Метод получения хэша пароля пользователя."""
user = self.session.query(self.AllUsers).filter_by(name=name).first()
return user.passwd_hash
def get_pubkey(self, name):
"""Метод получения публичного ключа пользователя."""
user = self.session.query(self.AllUsers).filter_by(name=name).first()
return user.pubkey
def check_user(self, name):
"""Метод проверяющий существование пользователя."""
if self.session.query(self.AllUsers).filter_by(name=name).count():
return True
else:
return False
def user_logout(self, username):
"""Метод фиксирующий отключения пользователя."""
# Запрашиваем пользователя, что покидает нас
user = self.session.query(
self.AllUsers).filter_by(
name=username).first()
# Удаляем его из таблицы активных пользователей.
self.session.query(self.ActiveUsers).filter_by(user=user.id).delete()
# Применяем изменения
self.session.commit()
def process_message(self, sender, recipient):
"""Метод записывающий в таблицу статистики факт передачи сообщения."""
# Получаем ID отправителя и получателя
sender = self.session.query(
self.AllUsers).filter_by(
name=sender).first().id
recipient = self.session.query(
self.AllUsers).filter_by(
name=recipient).first().id
# Запрашиваем строки из истории и увеличиваем счётчики
sender_row = self.session.query(
self.UsersHistory).filter_by(
user=sender).first()
sender_row.sent += 1
recipient_row = self.session.query(
self.UsersHistory).filter_by(
user=recipient).first()
recipient_row.accepted += 1
self.session.commit()
def add_contact(self, user, contact):
"""Метод добавления контакта для пользователя."""
# Получаем ID пользователей
user = self.session.query(self.AllUsers).filter_by(name=user).first()
contact = self.session.query(
self.AllUsers).filter_by(
name=contact).first()
# Проверяем что не дубль и что контакт может существовать (полю
# пользователь мы доверяем)
if not contact or self.session.query(self.UsersContacts).filter_by(
user=user.id, contact=contact.id).count():
return
# Создаём объект и заносим его в базу
contact_row = self.UsersContacts(user.id, contact.id)
self.session.add(contact_row)
self.session.commit()
# Функция удаляет контакт из базы данных
def remove_contact(self, user, contact):
"""Метод удаления контакта пользователя."""
# Получаем ID пользователей
user = self.session.query(self.AllUsers).filter_by(name=user).first()
contact = self.session.query(
self.AllUsers).filter_by(
name=contact).first()
# Проверяем что контакт может существовать (полю пользователь мы
# доверяем)
if not contact:
return
# Удаляем требуемое
self.session.query(self.UsersContacts).filter(
self.UsersContacts.user == user.id,
self.UsersContacts.contact == contact.id
).delete()
self.session.commit()
def users_list(self):
"""
Метод возвращающий список известных пользователей со
временем последнего входа.
"""
# Запрос строк таблицы пользователей.
query = self.session.query(
self.AllUsers.name,
self.AllUsers.last_login
)
# Возвращаем список кортежей
return query.all()
def active_users_list(self):
"""Метод возвращающий список активных пользователей."""
# Запрашиваем соединение таблиц и собираем кортежи имя, адрес, порт,
# время.
query = self.session.query(
self.AllUsers.name,
self.ActiveUsers.ip_address,
self.ActiveUsers.port,
self.ActiveUsers.login_time
).join(self.AllUsers)
# Возвращаем список кортежей
return query.all()
def login_history(self, username=None):
"""Метод возвращающий историю входов."""
# Запрашиваем историю входа
query = self.session.query(self.AllUsers.name,
self.LoginHistory.date_time,
self.LoginHistory.ip,
self.LoginHistory.port
).join(self.AllUsers)
# Если было указано имя пользователя, то фильтруем по нему
if username:
query = query.filter(self.AllUsers.name == username)
# Возвращаем список кортежей
return query.all()
def get_contacts(self, username):
"""Метод возвращающий список контактов пользователя."""
# Запрашивааем указанного пользователя
user = self.session.query(self.AllUsers).filter_by(name=username).one()
# Запрашиваем его список контактов
query = self.session.query(self.UsersContacts, self.AllUsers.name). \
filter_by(user=user.id). \
join(self.AllUsers, self.UsersContacts.contact == self.AllUsers.id)
# выбираем только имена пользователей и возвращаем их.
return [contact[1] for contact in query.all()]
def message_history(self):
"""Метод возвращающий статистику сообщений."""
query = self.session.query(
self.AllUsers.name,
self.AllUsers.last_login,
self.UsersHistory.sent,
self.UsersHistory.accepted
).join(self.AllUsers)
# Возвращаем список кортежей
return query.all()
# Отладка
if __name__ == '__main__':
test_db = ServerStorage('../server_database.db3')
test_db.user_login('test1', '192.168.1.113', 8080, 123)
test_db.user_login('test2', '192.168.1.113', 8081, 123)
print(test_db.users_list())
print(test_db.active_users_list())
test_db.user_logout('McG')
print(test_db.login_history('re'))
test_db.add_contact('test2', 'test1')
test_db.add_contact('test1', 'test3')
test_db.add_contact('test1', 'test6')
test_db.remove_contact('test1', 'test3')
test_db.process_message('test1', 'test2')
print(test_db.message_history())
|
/samoryad_messenger_server-0.0.2.tar.gz/samoryad_messenger_server-0.0.2/server/database.py
| 0.422266 | 0.162314 |
database.py
|
pypi
|
__version__ = "0.0.1-alpha.0"
__repo__ = "https://github.com/adafruit/Adafruit_Blinka.git"
import time
import threading
from collections import deque
import digitalio
class Event:
"""A key transition event."""
def __init__(self, key_number=0, pressed=True):
"""
Create a key transition event, which reports a key-pressed or key-released transition.
:param int key_number: the key number
:param bool pressed: ``True`` if the key was pressed; ``False`` if it was released.
"""
self._key_number = key_number
self._pressed = pressed
@property
def key_number(self):
"""The key number."""
return self._key_number
@property
def pressed(self):
"""
``True`` if the event represents a key down (pressed) transition.
The opposite of `released`.
"""
return self._pressed
@property
def released(self):
"""
``True`` if the event represents a key up (released) transition.
The opposite of `pressed`.
"""
return not self._pressed
def __eq__(self, other):
"""
Two `Event` objects are equal if their `key_number`
and `pressed`/`released` values are equal.
"""
return self.key_number == other.key_number and self.pressed == other.pressed
def __hash__(self):
"""Returns a hash for the `Event`, so it can be used in dictionaries, etc.."""
return hash(self._key_number)
def __repr__(self):
"""Return a textual representation of the object"""
return "<Event: key_number {} {}>".format(
self.key_number, "pressed" if self._pressed else "released"
)
class _EventQueue:
"""
A queue of `Event` objects, filled by a `keypad` scanner such as `Keys` or `KeyMatrix`.
You cannot create an instance of `_EventQueue` directly. Each scanner creates an
instance when it is created.
"""
def __init__(self, max_events):
self._events = deque([], max_events)
self._overflowed = False
def get(self):
"""
Return the next key transition event. Return ``None`` if no events are pending.
Note that the queue size is limited; see ``max_events`` in the constructor of
a scanner such as `Keys` or `KeyMatrix`.
If a new event arrives when the queue is full, the event is discarded, and
`overflowed` is set to ``True``.
:return: the next queued key transition `Event`
:rtype: Optional[Event]
"""
if not self._events:
return None
return self._events.popleft()
def get_into(self, event):
"""Store the next key transition event in the supplied event, if available,
and return ``True``.
If there are no queued events, do not touch ``event`` and return ``False``.
The advantage of this method over ``get()`` is that it does not allocate storage.
Instead you can reuse an existing ``Event`` object.
Note that the queue size is limited; see ``max_events`` in the constructor of
a scanner such as `Keys` or `KeyMatrix`.
:return ``True`` if an event was available and stored, ``False`` if not.
:rtype: bool
"""
if not self._events:
return False
next_event = self._events.popleft()
# pylint: disable=protected-access
event._key_number = next_event._key_number
event._pressed = next_event._pressed
# pylint: enable=protected-access
return True
def clear(self):
"""
Clear any queued key transition events. Also sets `overflowed` to ``False``.
"""
self._events.clear()
self._overflowed = False
def __bool__(self):
"""``True`` if `len()` is greater than zero.
This is an easy way to check if the queue is empty.
"""
return len(self._events) > 0
def __len__(self):
"""Return the number of events currently in the queue. Used to implement ``len()``."""
return len(self._events)
@property
def overflowed(self):
"""
``True`` if an event could not be added to the event queue because it was full. (read-only)
Set to ``False`` by `clear()`.
"""
return self._overflowed
def keypad_eventqueue_record(self, key_number, current):
"""Record a new event"""
if len(self._events) == self._events.maxlen:
self._overflowed = True
else:
self._events.append(Event(key_number, current))
class _KeysBase:
def __init__(self, interval, max_events, scanning_function):
self._interval = interval
self._last_scan = time.monotonic()
self._events = _EventQueue(max_events)
self._scanning_function = scanning_function
self._scan_thread = threading.Thread(target=self._scanning_loop, daemon=True)
self._scan_thread.start()
@property
def events(self):
"""The EventQueue associated with this Keys object. (read-only)"""
return self._events
def deinit(self):
"""Stop scanning"""
if self._scan_thread.is_alive():
self._scan_thread.join()
def __enter__(self):
"""No-op used by Context Managers."""
return self
def __exit__(self, exception_type, exception_value, traceback):
"""
Automatically deinitializes when exiting a context. See
:ref:`lifetime-and-contextmanagers` for more info.
"""
self.deinit()
def _scanning_loop(self):
while True:
remaining_delay = self._interval - (time.monotonic() - self._last_scan)
if remaining_delay > 0:
time.sleep(remaining_delay)
self._last_scan = time.monotonic()
self._scanning_function()
class Keys(_KeysBase):
"""Manage a set of independent keys."""
def __init__(
self, pins, *, value_when_pressed, pull=True, interval=0.02, max_events=64
):
"""
Create a `Keys` object that will scan keys attached to the given sequence of pins.
Each key is independent and attached to its own pin.
An `EventQueue` is created when this object is created and is available in the
`events` attribute.
:param Sequence[microcontroller.Pin] pins: The pins attached to the keys.
The key numbers correspond to indices into this sequence.
:param bool value_when_pressed: ``True`` if the pin reads high when the key is pressed.
``False`` if the pin reads low (is grounded) when the key is pressed.
All the pins must be connected in the same way.
:param bool pull: ``True`` if an internal pull-up or pull-down should be
enabled on each pin. A pull-up will be used if ``value_when_pressed`` is ``False``;
a pull-down will be used if it is ``True``.
If an external pull is already provided for all the pins, you can set
``pull`` to ``False``.
However, enabling an internal pull when an external one is already present is not
a problem;
it simply uses slightly more current.
:param float interval: Scan keys no more often than ``interval`` to allow for debouncing.
``interval`` is in float seconds. The default is 0.020 (20 msecs).
:param int max_events: maximum size of `events` `EventQueue`:
maximum number of key transition events that are saved.
Must be >= 1.
If a new event arrives when the queue is full, the oldest event is discarded.
"""
self._digitalinouts = []
for pin in pins:
dio = digitalio.DigitalInOut(pin)
if pull:
dio.pull = (
digitalio.Pull.DOWN if value_when_pressed else digitalio.Pull.UP
)
self._digitalinouts.append(dio)
self._currently_pressed = [False] * len(pins)
self._previously_pressed = [False] * len(pins)
self._value_when_pressed = value_when_pressed
super().__init__(interval, max_events, self._keypad_keys_scan)
def deinit(self):
"""Stop scanning and release the pins."""
super().deinit()
for dio in self._digitalinouts:
dio.deinit()
def reset(self):
"""Reset the internal state of the scanner to assume that all keys are now released.
Any key that is already pressed at the time of this call will therefore immediately cause
a new key-pressed event to occur.
"""
self._currently_pressed = self._previously_pressed = [False] * self.key_count
@property
def key_count(self):
"""The number of keys that are being scanned. (read-only)"""
return len(self._digitalinouts)
def _keypad_keys_scan(self):
for key_number, dio in enumerate(self._digitalinouts):
self._previously_pressed[key_number] = self._currently_pressed[key_number]
current = dio.value == self._value_when_pressed
self._currently_pressed[key_number] = current
if self._previously_pressed[key_number] != current:
self._events.keypad_eventqueue_record(key_number, current)
class KeyMatrix(_KeysBase):
"""Manage a 2D matrix of keys with row and column pins."""
# pylint: disable=too-many-arguments
def __init__(
self,
row_pins,
column_pins,
columns_to_anodes=True,
interval=0.02,
max_events=64,
):
"""
Create a `Keys` object that will scan the key matrix attached to the given row and
column pins.
There should not be any external pull-ups or pull-downs on the matrix:
``KeyMatrix`` enables internal pull-ups or pull-downs on the pins as necessary.
The keys are numbered sequentially from zero. A key number can be computed
by ``row * len(column_pins) + column``.
An `EventQueue` is created when this object is created and is available in the `events`
attribute.
:param Sequence[microcontroller.Pin] row_pins: The pins attached to the rows.
:param Sequence[microcontroller.Pin] column_pins: The pins attached to the colums.
:param bool columns_to_anodes: Default ``True``.
If the matrix uses diodes, the diode anodes are typically connected to the column pins,
and the cathodes should be connected to the row pins. If your diodes are reversed,
set ``columns_to_anodes`` to ``False``.
:param float interval: Scan keys no more often than ``interval`` to allow for debouncing.
``interval`` is in float seconds. The default is 0.020 (20 msecs).
:param int max_events: maximum size of `events` `EventQueue`:
maximum number of key transition events that are saved.
Must be >= 1.
If a new event arrives when the queue is full, the oldest event is discarded.
"""
self._row_digitalinouts = []
for row_pin in row_pins:
row_dio = digitalio.DigitalInOut(row_pin)
row_dio.switch_to_input(
pull=(digitalio.Pull.UP if columns_to_anodes else digitalio.Pull.DOWN)
)
self._row_digitalinouts.append(row_dio)
self._column_digitalinouts = []
for column_pin in column_pins:
col_dio = digitalio.DigitalInOut(column_pin)
col_dio.switch_to_input(
pull=(digitalio.Pull.UP if columns_to_anodes else digitalio.Pull.DOWN)
)
self._column_digitalinouts.append(col_dio)
self._currently_pressed = [False] * len(column_pins) * len(row_pins)
self._previously_pressed = [False] * len(column_pins) * len(row_pins)
self._columns_to_anodes = columns_to_anodes
super().__init__(interval, max_events, self._keypad_keymatrix_scan)
# pylint: enable=too-many-arguments
@property
def key_count(self):
"""The number of keys that are being scanned. (read-only)"""
return len(self._row_digitalinouts) * len(self._column_digitalinouts)
def deinit(self):
"""Stop scanning and release the pins."""
super().deinit()
for row_dio in self._row_digitalinouts:
row_dio.deinit()
for col_dio in self._column_digitalinouts:
col_dio.deinit()
def reset(self):
"""
Reset the internal state of the scanner to assume that all keys are now released.
Any key that is already pressed at the time of this call will therefore immediately cause
a new key-pressed event to occur.
"""
self._previously_pressed = self._currently_pressed = [False] * self.key_count
def _row_column_to_key_number(self, row, column):
return row * len(self._column_digitalinouts) + column
def _keypad_keymatrix_scan(self):
for row, row_dio in enumerate(self._row_digitalinouts):
row_dio.switch_to_output(
value=(not self._columns_to_anodes),
drive_mode=digitalio.DriveMode.PUSH_PULL,
)
for col, col_dio in enumerate(self._column_digitalinouts):
key_number = self._row_column_to_key_number(row, col)
self._previously_pressed[key_number] = self._currently_pressed[
key_number
]
current = col_dio.value != self._columns_to_anodes
self._currently_pressed[key_number] = current
if self._previously_pressed[key_number] != current:
self._events.keypad_eventqueue_record(key_number, current)
row_dio.value = self._columns_to_anodes
row_dio.switch_to_input(
pull=(
digitalio.Pull.UP
if self._columns_to_anodes
else digitalio.Pull.DOWN
)
)
class ShiftRegisterKeys(_KeysBase):
"""Manage a set of keys attached to an incoming shift register."""
def __init__(
self,
*,
clock,
data,
latch,
value_to_latch=True,
key_count,
value_when_pressed,
interval=0.02,
max_events=64
):
"""
Create a `Keys` object that will scan keys attached to a parallel-in serial-out
shift register like the 74HC165 or CD4021.
Note that you may chain shift registers to load in as many values as you need.
Key number 0 is the first (or more properly, the zero-th) bit read. In the
74HC165, this bit is labeled ``Q7``. Key number 1 will be the value of ``Q6``, etc.
An `EventQueue` is created when this object is created and is available in the
`events` attribute.
:param microcontroller.Pin clock: The shift register clock pin.
The shift register should clock on a low-to-high transition.
:param microcontroller.Pin data: the incoming shift register data pin
:param microcontroller.Pin latch:
Pin used to latch parallel data going into the shift register.
:param bool value_to_latch: Pin state to latch data being read.
``True`` if the data is latched when ``latch`` goes high
``False`` if the data is latched when ``latch goes low.
The default is ``True``, which is how the 74HC165 operates. The CD4021 latch is
the opposite. Once the data is latched, it will be shifted out by toggling the
clock pin.
:param int key_count: number of data lines to clock in
:param bool value_when_pressed: ``True`` if the pin reads high when the key is pressed.
``False`` if the pin reads low (is grounded) when the key is pressed.
:param float interval: Scan keys no more often than ``interval`` to allow for debouncing.
``interval`` is in float seconds. The default is 0.020 (20 msecs).
:param int max_events: maximum size of `events` `EventQueue`:
maximum number of key transition events that are saved.
Must be >= 1.
If a new event arrives when the queue is full, the oldest event is discarded.
"""
clock_dio = digitalio.DigitalInOut(clock)
clock_dio.switch_to_output(
value=False, drive_mode=digitalio.DriveMode.PUSH_PULL
)
self._clock = clock_dio
data_dio = digitalio.DigitalInOut(data)
data_dio.switch_to_input()
self._data = data_dio
latch_dio = digitalio.DigitalInOut(latch)
latch_dio.switch_to_output(value=True, drive_mode=digitalio.DriveMode.PUSH_PULL)
self._latch = latch_dio
self._value_to_latch = value_to_latch
self._currently_pressed = [False] * key_count
self._previously_pressed = [False] * key_count
self._value_when_pressed = value_when_pressed
self._key_count = key_count
super().__init__(interval, max_events, self._keypad_shiftregisterkeys_scan)
def deinit(self):
"""Stop scanning and release the pins."""
super().deinit()
self._clock.deinit()
self._data.deinit()
self._latch.deinit()
def reset(self):
"""
Reset the internal state of the scanner to assume that all keys are now released.
Any key that is already pressed at the time of this call will therefore immediately cause
a new key-pressed event to occur.
"""
self._currently_pressed = self._previously_pressed = [False] * self._key_count
@property
def key_count(self):
"""The number of keys that are being scanned. (read-only)"""
return self._key_count
@property
def events(self):
"""The `EventQueue` associated with this `Keys` object. (read-only)"""
return self._events
def _keypad_shiftregisterkeys_scan(self):
self._latch.value = self._value_to_latch
for key_number in range(self._key_count):
self._clock.value = False
self._previously_pressed[key_number] = self._currently_pressed[key_number]
current = self._data.value == self._value_when_pressed
self._currently_pressed[key_number] = current
self._clock.value = True
if self._previously_pressed[key_number] != current:
self._events.keypad_eventqueue_record(key_number, current)
self._latch.value = not self._value_to_latch
|
/samourai-Adafruit-Blinka-0.0.1a0.tar.gz/samourai-Adafruit-Blinka-0.0.1a0/src/keypad.py
| 0.93603 | 0.456289 |
keypad.py
|
pypi
|
__version__ = "0.0.1-alpha.0"
__repo__ = "https://github.com/adafruit/Adafruit_Blinka.git"
import adafruit_platformdetect.constants.boards as ap_board
from adafruit_blinka import Lockable, agnostic
# pylint: disable=import-outside-toplevel,too-many-arguments
class I2C(Lockable):
"""Bitbang/Software I2C implementation"""
def __init__(self, scl, sda, frequency=400000):
# TODO: This one is a bit questionable:
if agnostic.board_id == ap_board.PYBOARD:
raise NotImplementedError("No software I2C on {}".format(agnostic.board_id))
if agnostic.detector.board.any_embedded_linux:
# TODO: Attempt to load this library automatically
raise NotImplementedError(
"For bitbangio on Linux, please use Adafruit_CircuitPython_BitbangIO"
)
self.init(scl, sda, frequency)
def init(self, scl, sda, frequency):
"""Initialization"""
from machine import Pin
from machine import I2C as _I2C
self.deinit()
id = ( # pylint: disable=redefined-builtin
-1
) # force bitbanging implementation - in future
# introspect platform if SDA/SCL matches hardware I2C
self._i2c = _I2C(id, Pin(scl.id), Pin(sda.id), freq=frequency)
def deinit(self):
"""Deinitialization"""
try:
del self._i2c
except AttributeError:
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.deinit()
def scan(self):
"""Scan for attached devices"""
return self._i2c.scan()
def readfrom_into(self, address, buffer, start=0, end=None):
"""Read from a device at specified address into a buffer"""
if start != 0 or end is not None:
if end is None:
end = len(buffer)
buffer = memoryview(buffer)[start:end]
stop = True # remove for efficiency later
return self._i2c.readfrom_into(address, buffer, stop)
def writeto(self, address, buffer, start=0, end=None, stop=True):
"""Write to a device at specified address from a buffer"""
if start != 0 or end is not None:
if end is None:
return self._i2c.writeto(address, memoryview(buffer)[start:], stop)
return self._i2c.writeto(address, memoryview(buffer)[start:end], stop)
return self._i2c.writeto(address, buffer, stop)
# TODO untested, as actually busio.SPI was on
# tasklist https://github.com/adafruit/Adafruit_Micropython_Blinka/issues/2 :(
class SPI(Lockable):
"""Bitbang/Software SPI implementation"""
def __init__(self, clock, MOSI=None, MISO=None):
if agnostic.detector.board.any_embedded_linux:
# TODO: Attempt to load this library automatically
raise NotImplementedError(
"For bitbangio on Linux, please use Adafruit_CircuitPython_BitbangIO"
)
from machine import SPI as _SPI
self._spi = _SPI(-1)
self._pins = (clock, MOSI, MISO)
def configure(self, baudrate=100000, polarity=0, phase=0, bits=8):
"""Update the configuration"""
from machine import Pin
from machine import SPI as _SPI
if self._locked:
# TODO verify if _spi obj 'caches' sck, mosi, miso to
# avoid storing in _attributeIds (duplicated in busio)
# i.e. #init ignores MOSI=None rather than unsetting
self._spi.init(
baudrate=baudrate,
polarity=polarity,
phase=phase,
bits=bits,
firstbit=_SPI.MSB,
sck=Pin(self._pins[0].id),
mosi=Pin(self._pins[1].id),
miso=Pin(self._pins[2].id),
)
else:
raise RuntimeError("First call try_lock()")
def write(self, buf):
"""Write to the SPI device"""
return self._spi.write(buf)
def readinto(self, buf):
"""Read from the SPI device into a buffer"""
return self.readinto(buf)
def write_readinto(self, buffer_out, buffer_in):
"""Write to the SPI device and read from the SPI device into a buffer"""
return self.write_readinto(buffer_out, buffer_in)
|
/samourai-Adafruit-Blinka-0.0.1a0.tar.gz/samourai-Adafruit-Blinka-0.0.1a0/src/bitbangio.py
| 0.450359 | 0.183887 |
bitbangio.py
|
pypi
|
class Enum:
"""
Object supporting CircuitPython-style of static symbols
as seen with Direction.OUTPUT, Pull.UP
"""
def __repr__(self):
"""
Assumes instance will be found as attribute of own class.
Returns dot-subscripted path to instance
(assuming absolute import of containing package)
"""
cls = type(self)
for key in dir(cls):
if getattr(cls, key) is self:
return "{}.{}.{}".format(cls.__module__, cls.__qualname__, key)
return repr(self)
@classmethod
def iteritems(cls):
"""
Inspects attributes of the class for instances of the class
and returns as key,value pairs mirroring dict#iteritems
"""
for key in dir(cls):
val = getattr(cls, key)
if isinstance(cls, val):
yield (key, val)
class ContextManaged:
"""An object that automatically deinitializes hardware with a context manager."""
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.deinit()
# pylint: disable=no-self-use
def deinit(self):
"""Free any hardware used by the object."""
return
# pylint: enable=no-self-use
class Lockable(ContextManaged):
"""An object that must be locked to prevent collisions on a microcontroller resource."""
_locked = False
def try_lock(self):
"""Attempt to grab the lock. Return True on success, False if the lock is already taken."""
if self._locked:
return False
self._locked = True
return True
def unlock(self):
"""Release the lock so others may use the resource."""
if self._locked:
self._locked = False
else:
raise ValueError("Not locked")
def patch_system():
"""Patch modules that may be different due to the platform."""
# pylint: disable=import-outside-toplevel
import sys
from adafruit_blinka.agnostic import time
# pylint: enable=import-outside-toplevel
sys.modules["time"] = time
|
/samourai-Adafruit-Blinka-0.0.1a0.tar.gz/samourai-Adafruit-Blinka-0.0.1a0/src/adafruit_blinka/__init__.py
| 0.849332 | 0.318618 |
__init__.py
|
pypi
|
import os
try:
from microcontroller.pin import pwmOuts
except ImportError:
raise RuntimeError("No PWM outputs defined for this board") from ImportError
# pylint: disable=unnecessary-pass
class PWMError(IOError):
"""Base class for PWM errors."""
pass
# pylint: enable=unnecessary-pass
class PWMOut:
"""Pulse Width Modulation Output Class"""
# Sysfs paths
_sysfs_path = "/sys/class/pwm/"
_channel_path = "pwmchip{}"
# Channel paths
_export_path = "export"
_unexport_path = "unexport"
_pin_path = "pwm-{}:{}"
# Pin attribute paths
_pin_period_path = "period"
_pin_duty_cycle_path = "duty_cycle"
_pin_polarity_path = "polarity"
_pin_enable_path = "enable"
def __init__(self, pin, *, frequency=500, duty_cycle=0, variable_frequency=False):
"""Instantiate a PWM object and open the sysfs PWM corresponding to the
specified channel and pin.
Args:
pin (Pin): CircuitPython Pin object to output to
duty_cycle (int) : The fraction of each pulse which is high. 16-bit
frequency (int) : target frequency in Hertz (32-bit)
variable_frequency (bool) : True if the frequency will change over time
Returns:
PWMOut: PWMOut object.
Raises:
PWMError: if an I/O or OS error occurs.
TypeError: if `channel` or `pin` types are invalid.
ValueError: if PWM channel does not exist.
"""
self._pwmpin = None
self._channel = None
self._period = 0
self._open(pin, duty_cycle, frequency, variable_frequency)
def __del__(self):
self.deinit()
def __enter__(self):
return self
def __exit__(self, t, value, traceback):
self.deinit()
def _open(self, pin, duty=0, freq=500, variable_frequency=False):
self._channel = None
for pwmpair in pwmOuts:
if pwmpair[1] == pin:
self._channel = pwmpair[0][0]
self._pwmpin = pwmpair[0][1]
self._pin = pin
if self._channel is None:
raise RuntimeError("No PWM channel found for this Pin")
if variable_frequency:
print("Variable Frequency is not supported, continuing without it...")
channel_path = os.path.join(
self._sysfs_path, self._channel_path.format(self._channel)
)
if not os.path.isdir(channel_path):
raise ValueError(
"PWM channel does not exist, check that the required modules are loaded."
)
pin_path = os.path.join(
channel_path, self._pin_path.format(self._channel, self._pwmpin)
)
if not os.path.isdir(pin_path):
try:
with open(
os.path.join(channel_path, self._export_path), "w"
) as f_export:
f_export.write("%d\n" % self._pwmpin)
except IOError as e:
raise PWMError(e.errno, "Exporting PWM pin: " + e.strerror) from IOError
# Look up the period, for fast duty cycle updates
self._period = self._get_period()
# set frequency
self.frequency = freq
# set duty
self.duty_cycle = duty
self._set_enabled(True)
def deinit(self):
"""Deinit the sysfs PWM."""
# pylint: disable=broad-except
try:
channel_path = os.path.join(
self._sysfs_path, self._channel_path.format(self._channel)
)
if self._channel is not None:
# self.duty_cycle = 0
self._set_enabled(False) # make to disable before unexport
try:
# unexport_path = os.path.join(channel_path, self._unexport_path)
with open(
os.path.join(channel_path, self._unexport_path), "w"
) as f_unexport:
f_unexport.write("%d\n" % self._pwmpin)
except IOError as e:
raise PWMError(
e.errno, "Unexporting PWM pin: " + e.strerror
) from IOError
except Exception as e:
# due to a race condition for which I have not yet been
# able to find the root cause, deinit() often fails
# but it does not effect future usage of the pwm pin
print(
"warning: failed to deinitialize pwm pin {0}:{1} due to: {2}\n".format(
self._channel, self._pwmpin, type(e).__name__
)
)
finally:
self._channel = None
self._pwmpin = None
# pylint: enable=broad-except
def _is_deinited(self):
if self._pwmpin is None:
raise ValueError(
"Object has been deinitialize and can no longer "
"be used. Create a new object."
)
def _write_pin_attr(self, attr, value):
# Make sure the pin is active
self._is_deinited()
path = os.path.join(
self._sysfs_path,
self._channel_path.format(self._channel),
self._pin_path.format(self._channel, self._pwmpin),
attr,
)
with open(path, "w") as f_attr:
f_attr.write(value + "\n")
def _read_pin_attr(self, attr):
# Make sure the pin is active
self._is_deinited()
path = os.path.join(
self._sysfs_path,
self._channel_path.format(self._channel),
self._pin_path.format(self._channel, self._pwmpin),
attr,
)
with open(path, "r") as f_attr:
return f_attr.read().strip()
# Mutable properties
def _get_period(self):
period_ns = self._read_pin_attr(self._pin_period_path)
try:
period_ns = int(period_ns)
except ValueError:
raise PWMError(
None, 'Unknown period value: "%s"' % period_ns
) from ValueError
# Convert period from nanoseconds to seconds
period = period_ns / 1e9
# Update our cached period
self._period = period
return period
def _set_period(self, period):
if not isinstance(period, (int, float)):
raise TypeError("Invalid period type, should be int or float.")
# Convert period from seconds to integer nanoseconds
period_ns = int(period * 1e9)
self._write_pin_attr(self._pin_period_path, "{}".format(period_ns))
# Update our cached period
self._period = float(period)
period = property(_get_period, _set_period)
"""Get or set the PWM's output period in seconds.
Raises:
PWMError: if an I/O or OS error occurs.
TypeError: if value type is not int or float.
:type: int, float
"""
def _get_duty_cycle(self):
duty_cycle_ns = self._read_pin_attr(self._pin_duty_cycle_path)
try:
duty_cycle_ns = int(duty_cycle_ns)
except ValueError:
raise PWMError(
None, 'Unknown duty cycle value: "%s"' % duty_cycle_ns
) from ValueError
# Convert duty cycle from nanoseconds to seconds
duty_cycle = duty_cycle_ns / 1e9
# Convert duty cycle to ratio from 0.0 to 1.0
duty_cycle = duty_cycle / self._period
# convert to 16-bit
duty_cycle = int(duty_cycle * 65535)
return duty_cycle
def _set_duty_cycle(self, duty_cycle):
if not isinstance(duty_cycle, (int, float)):
raise TypeError("Invalid duty cycle type, should be int or float.")
# convert from 16-bit
duty_cycle /= 65535.0
if not 0.0 <= duty_cycle <= 1.0:
raise ValueError("Invalid duty cycle value, should be between 0.0 and 1.0.")
# Convert duty cycle from ratio to seconds
duty_cycle = duty_cycle * self._period
# Convert duty cycle from seconds to integer nanoseconds
duty_cycle_ns = int(duty_cycle * 1e9)
self._write_pin_attr(self._pin_duty_cycle_path, "{}".format(duty_cycle_ns))
duty_cycle = property(_get_duty_cycle, _set_duty_cycle)
"""Get or set the PWM's output duty cycle as a ratio from 0.0 to 1.0.
Raises:
PWMError: if an I/O or OS error occurs.
TypeError: if value type is not int or float.
ValueError: if value is out of bounds of 0.0 to 1.0.
:type: int, float
"""
def _get_frequency(self):
return 1.0 / self._get_period()
def _set_frequency(self, frequency):
if not isinstance(frequency, (int, float)):
raise TypeError("Invalid frequency type, should be int or float.")
self._set_period(1.0 / frequency)
frequency = property(_get_frequency, _set_frequency)
"""Get or set the PWM's output frequency in Hertz.
Raises:
PWMError: if an I/O or OS error occurs.
TypeError: if value type is not int or float.
:type: int, float
"""
def _get_enabled(self):
enabled = self._read_pin_attr(self._pin_enable_path)
if enabled == "1":
return True
if enabled == "0":
return False
raise PWMError(None, 'Unknown enabled value: "%s"' % enabled)
def _set_enabled(self, value):
"""Get or set the PWM's output enabled state.
Raises:
PWMError: if an I/O or OS error occurs.
TypeError: if value type is not bool.
:type: bool
"""
if not isinstance(value, bool):
raise TypeError("Invalid enabled type, should be string.")
self._write_pin_attr(self._pin_enable_path, "1" if value else "0")
# String representation
def __str__(self):
return "PWM%d, pin %s (freq=%f Hz, duty_cycle=%f%%)" % (
self._channel,
self._pin,
self.frequency,
self.duty_cycle * 100,
)
|
/samourai-Adafruit-Blinka-0.0.1a0.tar.gz/samourai-Adafruit-Blinka-0.0.1a0/src/adafruit_blinka/microcontroller/am335x/sysfs_pwmout.py
| 0.754644 | 0.234692 |
sysfs_pwmout.py
|
pypi
|
import os
from time import sleep
from errno import EACCES
try:
from microcontroller.pin import pwmOuts
except ImportError:
raise RuntimeError("No PWM outputs defined for this board.") from ImportError
# pylint: disable=unnecessary-pass, too-many-instance-attributes
class PWMError(IOError):
"""Base class for PWM errors."""
pass
# pylint: enable=unnecessary-pass
class PWMOut:
"""Pulse Width Modulation Output Class"""
# Number of retries to check for successful PWM export on open
PWM_STAT_RETRIES = 10
# Delay between check for successful PWM export on open (100ms)
PWM_STAT_DELAY = 0.1
# Sysfs paths
_chip_path = "pwmchip{}"
_channel_path = "pwm{}"
def __init__(self, pwm, *, frequency=500, duty_cycle=0, variable_frequency=False):
"""Instantiate a PWM object and open the sysfs PWM corresponding to the
specified chip and channel.
Args:
pwm (str): PWM pin.
frequency (int, float): target frequency in Hertz (32-bit).
duty_cycle (int, float): The fraction of each pulse which is high (16-bit).
variable_frequency (bool): True if the frequency will change over time.
Returns:
PWM: PWM object.
Raises:
PWMError: if an I/O or OS error occurs.
TypeError: if `chip` or `channel` types are invalid.
LookupError: if PWM chip does not exist.
TimeoutError: if waiting for PWM export times out.
"""
self._chip = None
self._channel = None
self._period_ns = 0
self._open(pwm, frequency, duty_cycle, variable_frequency)
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def _open(self, pwm, frequency, duty_cycle, variable_frequency):
for pwmout in pwmOuts:
if pwmout[1] == pwm:
self._chip = pwmout[0][0]
self._channel = pwmout[0][1]
self._chip_path = os.path.join(
"/sys/class/pwm", self._chip_path.format(self._chip)
)
self._channel_path = os.path.join(
self._chip_path, self._channel_path.format(self._channel)
)
if variable_frequency:
print("Variable Frequency is not supported, continuing without it...")
if not os.path.isdir(self._chip_path):
raise LookupError("Opening PWM: PWM chip {} not found.".format(self._chip))
if not os.path.isdir(self._channel_path):
# Exporting the PWM.
try:
with open(os.path.join(self._chip_path, "export"), "w") as f_export:
f_export.write("{:d}\n".format(self._channel))
except IOError as e:
raise PWMError(
e.errno, "Exporting PWM channel: " + e.strerror
) from IOError
# Loop until PWM is exported
exported = False
for i in range(PWMOut.PWM_STAT_RETRIES):
if os.path.isdir(self._channel_path):
exported = True
break
sleep(PWMOut.PWM_STAT_DELAY)
if not exported:
raise TimeoutError(
'Exporting PWM: waiting for "{:s}" timed out.'.format(
self._channel_path
)
)
# Loop until 'period' is writable, This could take some time after
# export as application of the udev rules after export is asynchronous.
# Without this loop, the following properties may not be writable yet.
for i in range(PWMOut.PWM_STAT_RETRIES):
try:
with open(
os.path.join(self._channel_path, "period"),
"w",
):
break
except IOError as e:
if e.errno != EACCES or (
e.errno == EACCES and i == PWMOut.PWM_STAT_RETRIES - 1
):
raise PWMError(
e.errno, "Opening PWM period: " + e.strerror
) from IOError
sleep(PWMOut.PWM_STAT_DELAY)
self.frequency = frequency
self.duty_cycle = duty_cycle
# Cache the period for fast duty cycle updates
self._period_ns = self._get_period_ns()
def close(self):
"""Close the PWM."""
if self._channel is not None:
# Unexporting the PWM channel
try:
unexport_fd = os.open(
os.path.join(self._chip_path, "unexport"), os.O_WRONLY
)
os.write(unexport_fd, "{:d}\n".format(self._channel).encode())
os.close(unexport_fd)
except OSError as e:
raise PWMError(e.errno, "Unexporting PWM: " + e.strerror) from OSError
self._chip = None
self._channel = None
def _write_channel_attr(self, attr, value):
with open(os.path.join(self._channel_path, attr), "w") as f_attr:
f_attr.write(value + "\n")
def _read_channel_attr(self, attr):
with open(os.path.join(self._channel_path, attr), "r") as f_attr:
return f_attr.read().strip()
# Methods
def enable(self):
"""Enable the PWM output."""
self.enabled = True
def disable(self):
"""Disable the PWM output."""
self.enabled = False
# Mutable properties
def _get_period(self):
return float(self.period_ms) / 1000
def _set_period(self, period):
if not isinstance(period, (int, float)):
raise TypeError("Invalid period type, should be int.")
self.period_ms = int(period * 1000)
period = property(_get_period, _set_period)
"""Get or set the PWM's output period in seconds.
Raises:
PWMError: if an I/O or OS error occurs.
TypeError: if value type is not int.
:type: int, float
"""
def _get_period_ms(self):
return self.period_us / 1000
def _set_period_ms(self, period_ms):
if not isinstance(period_ms, (int, float)):
raise TypeError("Invalid period type, should be int or float.")
self.period_us = int(period_ms * 1000)
period_ms = property(_get_period_ms, _set_period_ms)
"""Get or set the PWM's output period in milliseconds.
Raises:
PWMError: if an I/O or OS error occurs.
TypeError: if value type is not int.
:type: int, float
"""
def _get_period_us(self):
return self.period_ns / 1000
def _set_period_us(self, period_us):
if not isinstance(period_us, int):
raise TypeError("Invalid period type, should be int.")
self.period_ns = int(period_us * 1000)
period_us = property(_get_period_us, _set_period_us)
"""Get or set the PWM's output period in microseconds.
Raises:
PWMError: if an I/O or OS error occurs.
TypeError: if value type is not int.
:type: int
"""
def _get_period_ns(self):
period_ns = self._read_channel_attr("period")
try:
period_ns = int(period_ns)
except ValueError:
raise PWMError(
None, 'Unknown period value: "%s".' % period_ns
) from ValueError
self._period_ns = period_ns
return period_ns
def _set_period_ns(self, period_ns):
if not isinstance(period_ns, int):
raise TypeError("Invalid period type, should be int.")
self._write_channel_attr("period", str(period_ns))
# Update our cached period
self._period_ns = period_ns
period_ns = property(_get_period_ns, _set_period_ns)
"""Get or set the PWM's output period in nanoseconds.
Raises:
PWMError: if an I/O or OS error occurs.
TypeError: if value type is not int.
:type: int
"""
def _get_duty_cycle_ns(self):
duty_cycle_ns_str = self._read_channel_attr("duty_cycle")
try:
duty_cycle_ns = int(duty_cycle_ns_str)
except ValueError:
raise PWMError(
None, 'Unknown duty cycle value: "{:s}"'.format(duty_cycle_ns_str)
) from ValueError
return duty_cycle_ns
def _set_duty_cycle_ns(self, duty_cycle_ns):
if not isinstance(duty_cycle_ns, int):
raise TypeError("Invalid duty cycle type, should be int.")
self._write_channel_attr("duty_cycle", str(duty_cycle_ns))
duty_cycle_ns = property(_get_duty_cycle_ns, _set_duty_cycle_ns)
"""Get or set the PWM's output duty cycle in nanoseconds.
Raises:
PWMError: if an I/O or OS error occurs.
TypeError: if value type is not int.
:type: int
"""
def _get_duty_cycle(self):
return float(self.duty_cycle_ns) / self._period_ns
def _set_duty_cycle(self, duty_cycle):
if not isinstance(duty_cycle, (int, float)):
raise TypeError("Invalid duty cycle type, should be int or float.")
if not 0.0 <= duty_cycle <= 1.0:
raise ValueError("Invalid duty cycle value, should be between 0.0 and 1.0.")
# Convert duty cycle from ratio to nanoseconds
self.duty_cycle_ns = int(duty_cycle * self._period_ns)
duty_cycle = property(_get_duty_cycle, _set_duty_cycle)
"""Get or set the PWM's output duty cycle as a ratio from 0.0 to 1.0.
Raises:
PWMError: if an I/O or OS error occurs.
TypeError: if value type is not int or float.
ValueError: if value is out of bounds of 0.0 to 1.0.
:type: int, float
"""
def _get_frequency(self):
return 1.0 / self.period
def _set_frequency(self, frequency):
if not isinstance(frequency, (int, float)):
raise TypeError("Invalid frequency type, should be int or float.")
self.period = 1.0 / frequency
frequency = property(_get_frequency, _set_frequency)
"""Get or set the PWM's output frequency in Hertz.
Raises:
PWMError: if an I/O or OS error occurs.
TypeError: if value type is not int or float.
:type: int, float
"""
def _get_polarity(self):
return self._read_channel_attr("polarity")
def _set_polarity(self, polarity):
if not isinstance(polarity, str):
raise TypeError("Invalid polarity type, should be str.")
if polarity.lower() not in ["normal", "inversed"]:
raise ValueError('Invalid polarity, can be: "normal" or "inversed".')
self._write_channel_attr("polarity", polarity.lower())
polarity = property(_get_polarity, _set_polarity)
"""Get or set the PWM's output polarity. Can be "normal" or "inversed".
Raises:
PWMError: if an I/O or OS error occurs.
TypeError: if value type is not str.
ValueError: if value is invalid.
:type: str
"""
def _get_enabled(self):
enabled = self._read_channel_attr("enable")
if enabled == "1":
return True
if enabled == "0":
return False
raise PWMError(None, 'Unknown enabled value: "{:s}"'.format(enabled))
def _set_enabled(self, value):
if not isinstance(value, bool):
raise TypeError("Invalid enabled type, should be bool.")
self._write_channel_attr("enable", "1" if value else "0")
enabled = property(_get_enabled, _set_enabled)
"""Get or set the PWM's output enabled state.
Raises:
PWMError: if an I/O or OS error occurs.
TypeError: if value type is not bool.
:type: bool
"""
# String representation
def __str__(self):
return (
"PWM {:d}, chip {:d} (period={:f} sec, duty_cycle={:f}%,"
" polarity={:s}, enabled={:s})".format(
self._channel,
self._chip,
self.period,
self.duty_cycle * 100,
self.polarity,
str(self.enabled),
)
)
|
/samourai-Adafruit-Blinka-0.0.1a0.tar.gz/samourai-Adafruit-Blinka-0.0.1a0/src/adafruit_blinka/microcontroller/rockchip/PWMOut.py
| 0.679179 | 0.273483 |
PWMOut.py
|
pypi
|
from .rp2040_u2if import rp2040_u2if
class Pin:
"""A basic Pin class for use with RP2040 with u2if firmware."""
# pin modes
IN = 0
OUT = 1
# pin values
LOW = 0
HIGH = 1
# pin pulls
PULL_NONE = 0
PULL_UP = 1
PULL_DOWN = 2
def __init__(self, pin_id=None):
self.id = pin_id
self._mode = None
self._pull = None
# pylint:disable = no-self-use
def _u2if_open_hid(self, vid, pid):
rp2040_u2if.open(vid, pid)
def init(self, mode=IN, pull=PULL_NONE):
"""Initialize the Pin"""
pull = Pin.PULL_NONE if pull is None else pull
if self.id is None:
raise RuntimeError("Can not init a None type pin.")
if mode not in (Pin.IN, Pin.OUT):
raise ValueError("Incorrect mode value.")
if pull not in (Pin.PULL_NONE, Pin.PULL_UP, Pin.PULL_DOWN):
raise ValueError("Incorrect pull value.")
rp2040_u2if.gpio_init_pin(self.id, mode, pull)
self._mode = mode
self._pull = pull
def value(self, val=None):
"""Set or return the Pin Value"""
# Digital In / Out
if self._mode in (Pin.IN, Pin.OUT):
# digital read
if val is None:
return rp2040_u2if.gpio_get_pin(self.id)
# digital write
if val in (Pin.LOW, Pin.HIGH):
rp2040_u2if.gpio_set_pin(self.id, val)
return None
# nope
raise ValueError("Invalid value for pin.")
raise RuntimeError(
"No action for mode {} with value {}".format(self._mode, val)
)
# create pin instances for each pin
GP0 = Pin(0)
GP1 = Pin(1)
GP2 = Pin(2)
GP3 = Pin(3)
GP4 = Pin(4)
GP5 = Pin(5)
GP6 = Pin(6)
GP7 = Pin(7)
GP8 = Pin(8)
GP9 = Pin(9)
GP10 = Pin(10)
GP11 = Pin(11)
GP12 = Pin(12)
GP13 = Pin(13)
GP14 = Pin(14)
GP15 = Pin(15)
GP16 = Pin(16)
GP17 = Pin(17)
GP18 = Pin(18)
GP19 = Pin(19)
GP20 = Pin(20)
GP21 = Pin(21)
GP22 = Pin(22)
GP23 = Pin(23)
GP24 = Pin(24)
GP25 = Pin(25)
GP26 = Pin(26)
GP27 = Pin(27)
GP28 = Pin(28)
GP29 = Pin(29)
|
/samourai-Adafruit-Blinka-0.0.1a0.tar.gz/samourai-Adafruit-Blinka-0.0.1a0/src/adafruit_blinka/microcontroller/rp2040_u2if/pin.py
| 0.579995 | 0.213767 |
pin.py
|
pypi
|
from .rp2040_u2if import rp2040_u2if
class I2C:
"""I2C Base Class for RP2040 u2if"""
def __init__(self, index, *, frequency=100000):
self._index = index
rp2040_u2if.i2c_set_port(self._index)
rp2040_u2if.i2c_configure(frequency)
def scan(self):
"""Perform an I2C Device Scan"""
rp2040_u2if.i2c_set_port(self._index)
return rp2040_u2if.i2c_scan()
# pylint: disable=unused-argument
def writeto(self, address, buffer, *, start=0, end=None, stop=True):
"""Write data from the buffer to an address"""
rp2040_u2if.i2c_set_port(self._index)
rp2040_u2if.i2c_writeto(address, buffer, start=start, end=end)
def readfrom_into(self, address, buffer, *, start=0, end=None, stop=True):
"""Read data from an address and into the buffer"""
rp2040_u2if.i2c_set_port(self._index)
rp2040_u2if.i2c_readfrom_into(address, buffer, start=start, end=end)
def writeto_then_readfrom(
self,
address,
buffer_out,
buffer_in,
*,
out_start=0,
out_end=None,
in_start=0,
in_end=None,
stop=False
):
"""Write data from buffer_out to an address and then
read data from an address and into buffer_in
"""
rp2040_u2if.i2c_set_port(self._index)
rp2040_u2if.i2c_writeto_then_readfrom(
address,
buffer_out,
buffer_in,
out_start=out_start,
out_end=out_end,
in_start=in_start,
in_end=in_end,
)
# pylint: enable=unused-argument
class I2C_Pico(I2C):
"""I2C Class for Pico u2if"""
def __init__(self, scl, sda, *, frequency=100000):
index = None
if scl.id == 5 and sda.id == 4:
index = 0
if scl.id == 15 and sda.id == 14:
index = 1
if index is None:
raise ValueError("I2C not found on specified pins.")
self._index = index
super().__init__(index, frequency=frequency)
class I2C_Feather(I2C):
"""I2C Class for Feather u2if"""
def __init__(self, scl, sda, *, frequency=100000):
index = None
if scl.id == 3 and sda.id == 2:
index = 1
if index is None:
raise ValueError("I2C not found on specified pins.")
self._index = index
super().__init__(index, frequency=frequency)
class I2C_QTPY(I2C):
"""I2C Class for QT Py 2if"""
def __init__(self, scl, sda, *, frequency=100000):
index = None
if scl.id == 25 and sda.id == 24:
index = 0
if scl.id == 23 and sda.id == 22:
index = 1
if index is None:
raise ValueError("I2C not found on specified pins.")
self._index = index
super().__init__(index, frequency=frequency)
class I2C_ItsyBitsy(I2C):
"""I2C Class for ItsyBitsy u2if"""
def __init__(self, scl, sda, *, frequency=100000):
index = None
if scl.id == 3 and sda.id == 2:
index = 1
if index is None:
raise ValueError("I2C not found on specified pins.")
self._index = index
super().__init__(index, frequency=frequency)
class I2C_QT2040_Trinkey(I2C):
"""I2C Class for QT2040 Trinkey u2if"""
def __init__(self, scl, sda, *, frequency=100000):
index = None
if scl.id == 17 and sda.id == 16:
index = 0
if index is None:
raise ValueError("I2C not found on specified pins.")
self._index = index
super().__init__(index, frequency=frequency)
|
/samourai-Adafruit-Blinka-0.0.1a0.tar.gz/samourai-Adafruit-Blinka-0.0.1a0/src/adafruit_blinka/microcontroller/rp2040_u2if/i2c.py
| 0.617743 | 0.278727 |
i2c.py
|
pypi
|
from .rp2040_u2if import rp2040_u2if
# pylint: disable=protected-access, no-self-use
class SPI:
"""SPI Base Class for RP2040 u2if"""
MSB = 0
def __init__(self, index, *, baudrate=100000):
self._index = index
self._frequency = baudrate
rp2040_u2if.spi_set_port(self._index)
rp2040_u2if.spi_configure(self._frequency)
# pylint: disable=too-many-arguments,unused-argument
def init(
self,
baudrate=1000000,
polarity=0,
phase=0,
bits=8,
firstbit=MSB,
sck=None,
mosi=None,
miso=None,
):
"""Initialize the Port"""
self._frequency = baudrate
rp2040_u2if.spi_set_port(self._index)
rp2040_u2if.spi_configure(self._frequency)
# pylint: enable=too-many-arguments
@property
def frequency(self):
"""Return the current frequency"""
return self._frequency
def write(self, buf, start=0, end=None):
"""Write data from the buffer to SPI"""
rp2040_u2if.spi_write(buf, start=start, end=end)
def readinto(self, buf, start=0, end=None, write_value=0):
"""Read data from SPI and into the buffer"""
rp2040_u2if.spi_readinto(buf, start=start, end=end, write_value=write_value)
# pylint: disable=too-many-arguments
def write_readinto(
self, buffer_out, buffer_in, out_start=0, out_end=None, in_start=0, in_end=None
):
"""Perform a half-duplex write from buffer_out and then
read data into buffer_in
"""
rp2040_u2if.spi_write_readinto(
buffer_out,
buffer_in,
out_start=out_start,
out_end=out_end,
in_start=in_start,
in_end=in_end,
)
# pylint: enable=too-many-arguments
class SPI_Pico(SPI):
"""SPI Class for Pico u2if"""
def __init__(self, clock, *, baudrate=100000):
index = None
if clock.id == 18:
index = 0
if clock.id == 10:
index = 1
if index is None:
raise ValueError("No SPI port on specified pin.")
super().__init__(index, baudrate=baudrate)
class SPI_Feather(SPI):
"""SPI Class for Feather u2if"""
def __init__(self, clock, *, baudrate=100000):
index = None
if clock.id == 18:
index = 0
if index is None:
raise ValueError("No SPI port on specified pin.")
super().__init__(index, baudrate=baudrate)
class SPI_QTPY(SPI):
"""SPI Class for QT Py u2if"""
def __init__(self, clock, *, baudrate=100000):
index = None
if clock.id == 6:
index = 0
if index is None:
raise ValueError("No SPI port on specified pin.")
super().__init__(index, baudrate=baudrate)
class SPI_ItsyBitsy(SPI):
"""SPI Class for ItsyBitsy u2if"""
def __init__(self, clock, *, baudrate=100000):
index = None
if clock.id == 18:
index = 0
if index is None:
raise ValueError("No SPI port on specified pin.")
super().__init__(index, baudrate=baudrate)
|
/samourai-Adafruit-Blinka-0.0.1a0.tar.gz/samourai-Adafruit-Blinka-0.0.1a0/src/adafruit_blinka/microcontroller/rp2040_u2if/spi.py
| 0.7324 | 0.22154 |
spi.py
|
pypi
|
from greatfet import GreatFET
try:
from microcontroller.pin import pwmOuts
except ImportError:
raise RuntimeError("No PWM outputs defined for this board") from ImportError
# pylint: disable=unnecessary-pass
class PWMError(IOError):
"""Base class for PWM errors."""
pass
# pylint: enable=unnecessary-pass
class PWMOut:
"""Pulse Width Modulation Output Class"""
MAX_CYCLE_LEVEL = 1024
def __init__(self, pin, *, frequency=750, duty_cycle=0, variable_frequency=False):
"""This class makes use of the GreatFET One's Pattern Generator to create a
Simulated Pulse width modulation. The way that the Pattern Generator works is that
takes a pattern in the form of bytes and will repeat the output. The trick to simulate
PWM is to generate the correct byte pattern for the correct channel.
Args:
pin (Pin): CircuitPython Pin object to output to
duty_cycle (int) : The fraction of each pulse which is high. 16-bit
frequency (int) : target frequency in Hertz (32-bit)
Returns:
PWMOut: PWMOut object.
Raises:
PWMError: if an I/O or OS error occurs.
TypeError: if `channel` or `pin` types are invalid.
ValueError: if PWM channel does not exist.
"""
self._gf = GreatFET()
if variable_frequency:
raise NotImplementedError("Variable Frequency is not currently supported.")
self._pattern = None
self._channel = None
self._enable = False
self._frequency = 500
self._duty_cycle = 0
self._open(pin, duty_cycle, frequency)
def __enter__(self):
return self
def __exit__(self, t, value, traceback):
self.deinit()
def _open(self, pin, duty=0, freq=500):
self._channel = None
for pwmpair in pwmOuts:
if pwmpair[1] == pin:
self._channel = pwmpair[0]
self._pin = pin
if self._channel is None:
raise RuntimeError("No PWM channel found for this Pin")
# set duty
self.duty_cycle = duty
# set frequency
self.frequency = freq
self._set_enabled(True)
def deinit(self):
"""Deinit the GreatFET One PWM."""
# pylint: disable=broad-except
try:
if self._channel is not None:
# self.duty_cycle = 0
self._set_enabled(False)
except Exception as e:
# due to a race condition for which I have not yet been
# able to find the root cause, deinit() often fails
# but it does not effect future usage of the pwm pin
print(
"warning: failed to deinitialize pwm pin {0} due to: {1}\n".format(
self._channel, type(e).__name__
)
)
finally:
self._pattern = None
self._channel = None
# pylint: enable=broad-except
def _is_deinited(self):
if self._pattern is None:
raise ValueError(
"Object has been deinitialize and can no longer "
"be used. Create a new object."
)
# Mutable properties
def _get_period(self):
return 1.0 / self._get_frequency()
def _set_period(self, period):
"""Get or set the PWM's output period in seconds.
Raises:
PWMError: if an I/O or OS error occurs.
TypeError: if value type is not int or float.
:type: int, float
"""
if not isinstance(period, (int, float)):
raise TypeError("Invalid period type, should be int or float.")
self._set_frequency(1.0 / period)
period = property(_get_period, _set_period)
def _get_duty_cycle(self):
"""Get or set the PWM's output duty cycle as a ratio from 0.0 to 1.0.
Raises:
PWMError: if an I/O or OS error occurs.
TypeError: if value type is not int or float.
ValueError: if value is out of bounds of 0.0 to 1.0.
:type: int, float
"""
return self._duty_cycle
def _set_duty_cycle(self, duty_cycle):
if not isinstance(duty_cycle, (int, float)):
raise TypeError("Invalid duty cycle type, should be int or float.")
# convert from 16-bit
if isinstance(duty_cycle, int):
duty_cycle /= 65535.0
if not 0.0 <= duty_cycle <= 1.0:
raise ValueError("Invalid duty cycle value, should be between 0.0 and 1.0.")
# Generate a pattern for 1024 samples of the duty cycle
pattern = [(1 << self._channel)] * round(PWMOut.MAX_CYCLE_LEVEL * duty_cycle)
pattern += [(0 << self._channel)] * round(
PWMOut.MAX_CYCLE_LEVEL * (1.0 - duty_cycle)
)
self._pattern = pattern
self._duty_cycle = duty_cycle
if self._enable:
self._set_enabled(True)
duty_cycle = property(_get_duty_cycle, _set_duty_cycle)
def _get_frequency(self):
return self._frequency
def _set_frequency(self, frequency):
"""Get or set the PWM's output frequency in Hertz.
Raises:
PWMError: if an I/O or OS error occurs.
TypeError: if value type is not int or float.
:type: int, float
"""
if not isinstance(frequency, (int, float)):
raise TypeError("Invalid frequency type, should be int or float.")
# We are sending 1024 samples per second already
self._gf.pattern_generator.set_sample_rate(frequency * len(self._pattern))
self._frequency = frequency
frequency = property(_get_frequency, _set_frequency)
def _get_enabled(self):
enabled = self._enable
if enabled == "1":
return True
if enabled == "0":
return False
raise PWMError(None, 'Unknown enabled value: "%s"' % enabled)
def _set_enabled(self, value):
"""Get or set the PWM's output enabled state.
Raises:
PWMError: if an I/O or OS error occurs.
TypeError: if value type is not bool.
:type: bool
"""
if not isinstance(value, bool):
raise TypeError("Invalid enabled type, should be string.")
self._enable = value
if self._gf:
if self._enable:
if self._pattern:
self._gf.pattern_generator.scan_out_pattern(self._pattern)
else:
self._gf.pattern_generator.stop()
|
/samourai-Adafruit-Blinka-0.0.1a0.tar.gz/samourai-Adafruit-Blinka-0.0.1a0/src/adafruit_blinka/microcontroller/nxp_lpc4330/pwmout.py
| 0.877135 | 0.350671 |
pwmout.py
|
pypi
|
from greatfet import GreatFET
class SPI:
"""Custom I2C Class for NXP LPC4330"""
MSB = 0
def __init__(self):
self._gf = GreatFET()
self._frequency = None
self.buffer_size = 255
self._mode = 0
self._spi = None
self._presets = {
204000: (100, 9),
408000: (100, 4),
680000: (100, 2),
1020000: (100, 1),
2040000: (50, 1),
4250000: (24, 1),
8500000: (12, 1),
12750000: (8, 1),
17000000: (6, 1),
20400000: (2, 4),
25500000: (4, 1),
34000000: (2, 2),
51000000: (2, 1),
102000000: (2, 0),
}
# pylint: disable=too-many-arguments,unused-argument
def init(
self,
baudrate=100000,
polarity=0,
phase=0,
bits=8,
firstbit=MSB,
sck=None,
mosi=None,
miso=None,
):
"""Initialize the Port"""
# Figure out the mode based on phase and polarity
polarity = int(polarity)
phase = int(phase)
self._mode = (polarity << 1) | phase
# Using API due to possible interface change
self._spi = self._gf.apis.spi
# Check baudrate against presets and adjust to the closest one
if self._frequency is None:
preset = self._find_closest_preset(baudrate)
else:
preset = self._presets[self._frequency]
clock_prescale_rate, serial_clock_rate = preset
self._spi.init(serial_clock_rate, clock_prescale_rate)
# Set the polarity and phase (the "SPI mode").
self._spi.set_clock_polarity_and_phase(self._mode)
# pylint: enable=too-many-arguments
def _find_closest_preset(self, target_frequency):
"""Loop through self._frequencies and find the closest
setting. Return the preset values and set the frequency
to the found value
"""
closest_preset = None
for frequency in self._presets:
preset = self._presets[frequency]
if self._frequency is None or abs(frequency - target_frequency) < abs(
self._frequency - target_frequency
):
self._frequency = frequency
closest_preset = preset
return closest_preset
@property
def frequency(self):
"""Return the current frequency"""
return self._frequency
def write(self, buf, start=0, end=None):
"""Write data from the buffer to SPI"""
end = end if end else len(buf)
self._transmit(buf[start:end])
# pylint: disable=unused-argument
def readinto(self, buf, start=0, end=None, write_value=0):
"""Read data from SPI and into the buffer"""
end = end if end else len(buf)
result = self._transmit([write_value] * (end - start), end - start)
for i, b in enumerate(result):
buf[start + i] = b
# pylint: enable=unused-argument
# pylint: disable=too-many-arguments
def write_readinto(
self, buffer_out, buffer_in, out_start=0, out_end=None, in_start=0, in_end=None
):
"""Perform a half-duplex write from buffer_out and then
read data into buffer_in
"""
out_end = out_end if out_end else len(buffer_out)
in_end = in_end if in_end else len(buffer_in)
result = self._transmit(buffer_out[out_start:out_end], in_end - in_start)
for i, b in enumerate(result):
buffer_in[in_start + i] = b
# pylint: enable=too-many-arguments
def _transmit(self, data, receive_length=None):
data_to_transmit = bytearray(data)
data_received = bytearray()
if receive_length is None:
receive_length = len(data)
# If we need to receive more than we've transmitted, extend the data out.
if receive_length > len(data):
padding = receive_length - len(data)
data_to_transmit.extend([0] * padding)
# Transmit our data in chunks of the buffer size.
while data_to_transmit:
# Extract a single data chunk from the transmit buffer.
chunk = data_to_transmit[0 : self.buffer_size]
del data_to_transmit[0 : self.buffer_size]
# Finally, exchange the data.
response = self._spi.clock_data(len(chunk), bytes(chunk))
data_received.extend(response)
# Once we're done, return the data received.
return bytes(data_received)
|
/samourai-Adafruit-Blinka-0.0.1a0.tar.gz/samourai-Adafruit-Blinka-0.0.1a0/src/adafruit_blinka/microcontroller/nxp_lpc4330/spi.py
| 0.84367 | 0.363449 |
spi.py
|
pypi
|
from microcontroller import Pin
GP0 = Pin(0)
GP1 = Pin(1)
GP2 = Pin(2)
GP3 = Pin(3)
GP4 = Pin(4)
GP5 = Pin(5)
GP6 = Pin(6)
GP7 = Pin(7)
GP8 = Pin(8)
GP9 = Pin(9)
GP10 = Pin(10)
GP11 = Pin(11)
GP12 = Pin(12)
GP13 = Pin(13)
GP14 = Pin(14)
GP15 = Pin(15)
GP16 = Pin(16)
GP17 = Pin(17)
GP18 = Pin(18)
GP19 = Pin(19)
GP20 = Pin(20)
GP21 = Pin(21)
GP22 = Pin(22)
GP23 = Pin(23)
GP24 = Pin(24)
GP25 = Pin(25)
GP26 = Pin(26)
GP27 = Pin(27)
GP28 = Pin(28)
GP29 = Pin(29)
# ordered as spiId, sckId, mosiId (tx), misoId (rx)
spiPorts = (
(0, GP2, GP3, GP0),
(0, GP2, GP3, GP4),
(0, GP2, GP3, GP16),
(0, GP2, GP7, GP0),
(0, GP2, GP7, GP4),
(0, GP2, GP7, GP16),
(0, GP2, GP19, GP0),
(0, GP2, GP19, GP4),
(0, GP2, GP19, GP16),
(0, GP6, GP3, GP0),
(0, GP6, GP3, GP4),
(0, GP6, GP3, GP16),
(0, GP6, GP7, GP0),
(0, GP6, GP7, GP4),
(0, GP6, GP7, GP16),
(0, GP6, GP19, GP0),
(0, GP6, GP19, GP4),
(0, GP6, GP19, GP16),
(0, GP18, GP3, GP0),
(0, GP18, GP3, GP4),
(0, GP18, GP3, GP16),
(0, GP18, GP7, GP0),
(0, GP18, GP7, GP4),
(0, GP18, GP7, GP16),
(0, GP18, GP19, GP0),
(0, GP18, GP19, GP4),
(0, GP18, GP19, GP16),
(1, GP10, GP11, GP8),
(1, GP10, GP11, GP12),
(1, GP10, GP15, GP8),
(1, GP10, GP15, GP12),
(1, GP14, GP11, GP8),
(1, GP14, GP11, GP12),
(1, GP14, GP15, GP8),
(1, GP14, GP15, GP12),
)
# ordered as uartId, txId, rxId
uartPorts = (
(0, GP0, GP1),
(0, GP0, GP13),
(0, GP12, GP1),
(0, GP12, GP13),
(1, GP4, GP5),
(1, GP4, GP9),
(1, GP8, GP5),
(1, GP8, GP9),
)
# ordered as scl, sda
i2cPorts = (
(0, GP1, GP0),
(0, GP1, GP4),
(0, GP1, GP8),
(0, GP1, GP12),
(0, GP5, GP0),
(0, GP5, GP4),
(0, GP5, GP8),
(0, GP5, GP12),
(0, GP9, GP0),
(0, GP9, GP4),
(0, GP9, GP8),
(0, GP9, GP12),
(0, GP13, GP0),
(0, GP13, GP4),
(0, GP13, GP8),
(0, GP13, GP12),
(1, GP3, GP2),
(1, GP3, GP6),
(1, GP3, GP10),
(1, GP3, GP14),
(1, GP7, GP2),
(1, GP7, GP6),
(1, GP7, GP10),
(1, GP7, GP14),
(1, GP11, GP2),
(1, GP11, GP6),
(1, GP11, GP10),
(1, GP11, GP14),
(1, GP15, GP2),
(1, GP15, GP6),
(1, GP15, GP10),
(1, GP15, GP14),
)
|
/samourai-Adafruit-Blinka-0.0.1a0.tar.gz/samourai-Adafruit-Blinka-0.0.1a0/src/adafruit_blinka/microcontroller/rp2040/pin.py
| 0.447702 | 0.368037 |
pin.py
|
pypi
|
from machine import SPI as _SPI
from machine import Pin
from microcontroller.pin import spiPorts
# pylint: disable=protected-access, no-self-use
class SPI:
"""Custom SPI Class for RP2040"""
def __init__(self, clock, MOSI=None, MISO=None, *, baudrate=1000000):
self._frequency = baudrate
for portId, portSck, portMosi, portMiso in spiPorts:
if (
(clock == portSck)
and MOSI in (portMosi, None) # Clock is required!
and MISO in (portMiso, None) # But can do with just output
): # Or just input
mosiPin = Pin(portMosi.id) if MOSI else None
misoPin = Pin(portMiso.id) if MISO else None
self._spi = _SPI(
portId,
sck=Pin(portSck.id),
mosi=mosiPin,
miso=misoPin,
baudrate=baudrate,
)
break
else:
raise ValueError(
"No Hardware SPI on (SCLK, MOSI, MISO)={}\nValid SPI ports:{}".format(
(clock, MOSI, MISO), spiPorts
)
)
# pylint: disable=too-many-arguments,unused-argument
def init(
self,
baudrate=1000000,
polarity=0,
phase=0,
bits=8,
firstbit=_SPI.MSB,
sck=None,
mosi=None,
miso=None,
):
"""Initialize the Port"""
self._frequency = baudrate
self._spi.init(
baudrate=baudrate,
polarity=polarity,
phase=phase,
bits=bits,
firstbit=firstbit,
)
# pylint: enable=too-many-arguments
@property
def frequency(self):
"""Return the current frequency"""
return self._frequency
def write(self, buf, start=0, end=None):
"""Write data from the buffer to SPI"""
self._spi.write(buf)
def readinto(self, buf, start=0, end=None, write_value=0):
"""Read data from SPI and into the buffer"""
self._spi.readinto(buf)
# pylint: disable=too-many-arguments
def write_readinto(
self, buffer_out, buffer_in, out_start=0, out_end=None, in_start=0, in_end=None
):
"""Perform a half-duplex write from buffer_out and then
read data into buffer_in
"""
self._spi.write_readinto(
buffer_out,
buffer_in,
out_start=out_start,
out_end=out_end,
in_start=in_start,
in_end=in_end,
)
# pylint: enable=too-many-arguments
|
/samourai-Adafruit-Blinka-0.0.1a0.tar.gz/samourai-Adafruit-Blinka-0.0.1a0/src/adafruit_blinka/microcontroller/rp2040/spi.py
| 0.677261 | 0.187765 |
spi.py
|
pypi
|
try:
import gpiod
except ImportError:
raise ImportError(
"libgpiod Python bindings not found, please install and try again! See "
"https://github.com/adafruit/Raspberry-Pi-Installer-Scripts/blob/master/libgpiod.sh"
) from ImportError
class Pin:
"""Pins dont exist in CPython so...lets make our own!"""
IN = 0
OUT = 1
LOW = 0
HIGH = 1
PULL_NONE = 0
PULL_UP = 1
PULL_DOWN = 2
_CONSUMER = "adafruit_blinka"
id = None
_value = LOW
_mode = IN
def __init__(self, pin_id):
self.id = pin_id
if isinstance(pin_id, tuple):
self._num = int(pin_id[1])
self._chip = gpiod.Chip(str(pin_id[0]), gpiod.Chip.OPEN_BY_NUMBER)
else:
self._num = int(pin_id)
self._chip = gpiod.Chip("gpiochip0", gpiod.Chip.OPEN_BY_NAME)
self._line = None
def __repr__(self):
return str(self.id)
def __eq__(self, other):
return self.id == other
def init(self, mode=IN, pull=None):
"""Initialize the Pin"""
if not self._line:
self._line = self._chip.get_line(int(self._num))
# print("init line: ", self.id, self._line)
if mode is not None:
if mode == self.IN:
flags = 0
if pull is not None:
if pull == self.PULL_UP:
raise NotImplementedError(
"Internal pullups not supported in libgpiod, "
"use physical resistor instead!"
)
if pull == self.PULL_DOWN:
raise NotImplementedError(
"Internal pulldowns not supported in libgpiod, "
"use physical resistor instead!"
)
raise RuntimeError("Invalid pull for pin: %s" % self.id)
self._mode = self.IN
self._line.release()
self._line.request(
consumer=self._CONSUMER, type=gpiod.LINE_REQ_DIR_IN, flags=flags
)
elif mode == self.OUT:
if pull is not None:
raise RuntimeError("Cannot set pull resistor on output")
self._mode = self.OUT
self._line.release()
self._line.request(consumer=self._CONSUMER, type=gpiod.LINE_REQ_DIR_OUT)
else:
raise RuntimeError("Invalid mode for pin: %s" % self.id)
def value(self, val=None):
"""Set or return the Pin Value"""
if val is None:
return self._line.get_value()
if val in (self.LOW, self.HIGH):
self._value = val
self._line.set_value(val)
return None
raise RuntimeError("Invalid value for pin")
|
/samourai-Adafruit-Blinka-0.0.1a0.tar.gz/samourai-Adafruit-Blinka-0.0.1a0/src/adafruit_blinka/microcontroller/generic_linux/libgpiod_pin.py
| 0.610105 | 0.182007 |
libgpiod_pin.py
|
pypi
|
import os
from adafruit_blinka import ContextManaged
try:
from microcontroller.pin import analogOuts
except ImportError:
raise RuntimeError("No Analog Outputs defined for this board") from ImportError
class AnalogOut(ContextManaged):
"""Analog Output Class"""
# Sysfs paths
_sysfs_path = "/sys/bus/iio/devices/"
_device_path = "iio:device{}"
# Channel paths
_channel_path = "out_voltage{}_raw"
_scale_path = "out_voltage_scale"
def __init__(self, dac_id):
"""Instantiate an AnalogOut object and verify the sysfs IIO
corresponding to the specified channel and pin.
Args:
dac_id (int): Analog Output ID as defined in microcontroller.pin
Returns:
AnalogOut: AnalogOut object.
Raises:
TypeError: if `channel` or `pin` types are invalid.
ValueError: if AnalogOut channel does not exist.
"""
self.id = dac_id
self._device = None
self._channel = None
self._open(dac_id)
def __enter__(self):
return self
def _open(self, dac_id):
self._device = None
for dacpair in analogOuts:
if dacpair[0] == dac_id:
self._device = dacpair[1]
self._channel = dacpair[2]
if self._device is None:
raise RuntimeError("No AnalogOut device found for the given ID")
device_path = os.path.join(
self._sysfs_path, self._device_path.format(self._device)
)
if not os.path.isdir(device_path):
raise ValueError(
"AnalogOut device does not exist, check that the required modules are loaded."
)
@property
def value(self):
"""Return an error. This is output only."""
# emulate what CircuitPython does
raise AttributeError("unreadable attribute")
@value.setter
def value(self, value):
"""Write to the DAC"""
path = os.path.join(
self._sysfs_path,
self._device_path.format(self._device),
self._channel_path.format(self._channel),
)
with open(path, "w") as analog_out:
return analog_out.write(value + "\n")
def deinit(self):
self._device = None
self._channel = None
|
/samourai-Adafruit-Blinka-0.0.1a0.tar.gz/samourai-Adafruit-Blinka-0.0.1a0/src/adafruit_blinka/microcontroller/generic_linux/sysfs_analogout.py
| 0.703957 | 0.173954 |
sysfs_analogout.py
|
pypi
|
import os
from adafruit_blinka import ContextManaged
try:
from microcontroller.pin import analogIns
except ImportError:
raise RuntimeError("No Analog Inputs defined for this board") from ImportError
class AnalogIn(ContextManaged):
"""Analog Input Class"""
# Sysfs paths
_sysfs_path = "/sys/bus/iio/devices/"
_device_path = "iio:device{}"
# Channel paths
_channel_path = "in_voltage{}_raw"
_scale_path = "in_voltage_scale"
def __init__(self, adc_id):
"""Instantiate an AnalogIn object and verify the sysfs IIO
corresponding to the specified channel and pin.
Args:
adc_id (int): Analog Input ID as defined in microcontroller.pin
Returns:
AnalogIn: AnalogIn object.
Raises:
TypeError: if `channel` or `pin` types are invalid.
ValueError: if AnalogIn channel does not exist.
"""
self.id = adc_id
self._device = None
self._channel = None
self._open(adc_id)
def __enter__(self):
return self
def _open(self, adc_id):
self._device = None
for adcpair in analogIns:
if adcpair[0] == adc_id:
self._device = adcpair[1]
self._channel = adcpair[2]
if self._device is None:
raise RuntimeError("No AnalogIn device found for the given ID")
device_path = os.path.join(
self._sysfs_path, self._device_path.format(self._device)
)
if not os.path.isdir(device_path):
raise ValueError(
"AnalogIn device does not exist, check that the required modules are loaded."
)
@property
def value(self):
"""Read the ADC and return the value as an integer"""
path = os.path.join(
self._sysfs_path,
self._device_path.format(self._device),
self._channel_path.format(self._channel),
)
with open(path, "r") as analog_in:
return int(analog_in.read().strip())
# pylint: disable=no-self-use
@value.setter
def value(self, value):
# emulate what CircuitPython does
raise AttributeError("'AnalogIn' object has no attribute 'value'")
# pylint: enable=no-self-use
def deinit(self):
self._device = None
self._channel = None
|
/samourai-Adafruit-Blinka-0.0.1a0.tar.gz/samourai-Adafruit-Blinka-0.0.1a0/src/adafruit_blinka/microcontroller/generic_linux/sysfs_analogin.py
| 0.661486 | 0.212763 |
sysfs_analogin.py
|
pypi
|
import Adafruit_PureIO.spi as spi
from adafruit_blinka.agnostic import detector
class SPI:
"""SPI Class"""
MSB = 0
LSB = 1
CPHA = 1
CPOL = 2
baudrate = 100000
mode = 0
bits = 8
def __init__(self, portid):
if isinstance(portid, tuple):
self._spi = spi.SPI(device=portid)
else:
self._spi = spi.SPI(device=(portid, 0))
self.clock_pin = None
self.mosi_pin = None
self.miso_pin = None
self.chip = None
# pylint: disable=too-many-arguments,unused-argument
def init(
self,
baudrate=100000,
polarity=0,
phase=0,
bits=8,
firstbit=MSB,
sck=None,
mosi=None,
miso=None,
):
"""Initialize SPI"""
mode = 0
if polarity:
mode |= self.CPOL
if phase:
mode |= self.CPHA
self.baudrate = baudrate
self.mode = mode
self.bits = bits
self.chip = detector.chip
# Pins are not used
self.clock_pin = sck
self.mosi_pin = mosi
self.miso_pin = miso
# pylint: enable=too-many-arguments,unused-argument
# pylint: disable=unnecessary-pass
def set_no_cs(self):
"""Setting so that SPI doesn't automatically set the CS pin"""
# No kernel seems to support this, so we're just going to pass
pass
# pylint: enable=unnecessary-pass
@property
def frequency(self):
"""Return the current baudrate"""
return self.baudrate
def write(self, buf, start=0, end=None):
"""Write data from the buffer to SPI"""
if not buf:
return
if end is None:
end = len(buf)
try:
# self._spi.open(self._port, 0)
self.set_no_cs()
self._spi.max_speed_hz = self.baudrate
self._spi.mode = self.mode
self._spi.bits_per_word = self.bits
self._spi.writebytes(buf[start:end])
# self._spi.close()
except FileNotFoundError:
print("Could not open SPI device - check if SPI is enabled in kernel!")
raise
def readinto(self, buf, start=0, end=None, write_value=0):
"""Read data from SPI and into the buffer"""
if not buf:
return
if end is None:
end = len(buf)
try:
# self._spi.open(self._port, 0)
# self.set_no_cs()
self._spi.max_speed_hz = self.baudrate
self._spi.mode = self.mode
self._spi.bits_per_word = self.bits
data = self._spi.transfer([write_value] * (end - start))
for i in range(end - start): # 'readinto' the given buffer
buf[start + i] = data[i]
# self._spi.close()
except FileNotFoundError:
print("Could not open SPI device - check if SPI is enabled in kernel!")
raise
# pylint: disable=too-many-arguments
def write_readinto(
self, buffer_out, buffer_in, out_start=0, out_end=None, in_start=0, in_end=None
):
"""Perform a half-duplex write from buffer_out and then
read data into buffer_in
"""
if not buffer_out or not buffer_in:
return
if out_end is None:
out_end = len(buffer_out)
if in_end is None:
in_end = len(buffer_in)
if out_end - out_start != in_end - in_start:
raise RuntimeError("Buffer slices must be of equal length.")
try:
# self._spi.open(self._port, 0)
# self.set_no_cs()
self._spi.max_speed_hz = self.baudrate
self._spi.mode = self.mode
self._spi.bits_per_word = self.bits
data = self._spi.transfer(list(buffer_out[out_start : out_end + 1]))
for i in range((in_end - in_start)):
buffer_in[i + in_start] = data[i]
# self._spi.close()
except FileNotFoundError:
print("Could not open SPI device - check if SPI is enabled in kernel!")
raise
# pylint: enable=too-many-arguments
|
/samourai-Adafruit-Blinka-0.0.1a0.tar.gz/samourai-Adafruit-Blinka-0.0.1a0/src/adafruit_blinka/microcontroller/generic_linux/spi.py
| 0.640186 | 0.22325 |
spi.py
|
pypi
|
try:
from microcontroller.pin import pwmOuts
except ImportError:
raise RuntimeError("No PWM outputs defined for this board") from ImportError
from microcontroller.pin import Pin
# pylint: disable=unnecessary-pass
class PWMError(IOError):
"""Base class for PWM errors."""
pass
# pylint: enable=unnecessary-pass
class PWMOut:
"""Pulse Width Modulation Output Class"""
# Nova instance
_nova = None
MAX_CYCLE_LEVEL = 1024
def __init__(self, pin, *, frequency=750, duty_cycle=0, variable_frequency=False):
"""Instantiate a PWM object and open the sysfs PWM corresponding to the
specified channel and pin.
Args:
pin (Pin): CircuitPython Pin object to output to
duty_cycle (int) : The fraction of each pulse which is high. 16-bit
frequency (int) : target frequency in Hertz (32-bit)
variable_frequency (bool) : True if the frequency will change over time
Returns:
PWMOut: PWMOut object.
Raises:
PWMError: if an I/O or OS error occurs.
TypeError: if `channel` or `pin` types are invalid.
ValueError: if PWM channel does not exist.
"""
if PWMOut._nova is None:
# pylint: disable=import-outside-toplevel
from adafruit_blinka.microcontroller.nova import Connection
# pylint: enable=import-outside-toplevel
PWMOut._nova = Connection.getInstance()
PWMOut._nova.setOperationMode(0, "IO")
self._pwmpin = None
self._channel = None
self._enable = False
self._open(pin, duty_cycle, frequency, variable_frequency)
def __del__(self):
self.deinit()
PWMOut._nova.close()
def __enter__(self):
return self
def __exit__(self, t, value, traceback):
self.deinit()
def _open(self, pin, duty=0, freq=750, variable_frequency=False):
self._channel = None
for pwmpair in pwmOuts:
if pwmpair[1] == pin:
self._channel = pwmpair[0][0]
self._pwmpin = pwmpair[0][1]
self._pin = pin
if self._channel is None:
raise RuntimeError("No PWM channel found for this Pin")
if variable_frequency:
print("Variable Frequency is not supported, continuing without it...")
PWMOut._nova.setIOpinMode(self._pwmpin, Pin.PWM)
# set frequency
self.frequency = freq
# set period
self._period = self._get_period()
# set duty
self.duty_cycle = duty
self._set_enabled(True)
def deinit(self):
"""Deinit the Nova PWM."""
# pylint: disable=broad-except
try:
if self._channel is not None:
# self.duty_cycle = 0
self._set_enabled(False) # make to disable before unexport
except Exception as e:
# due to a race condition for which I have not yet been
# able to find the root cause, deinit() often fails
# but it does not effect future usage of the pwm pin
print(
"warning: failed to deinitialize pwm pin {0}:{1} due to: {2}\n".format(
self._channel, self._pwmpin, type(e).__name__
)
)
finally:
self._channel = None
self._pwmpin = None
# pylint: enable=broad-except
def _is_deinited(self):
if self._pwmpin is None:
raise ValueError(
"Object has been deinitialize and can no longer "
"be used. Create a new object."
)
# Mutable properties
def _get_period(self):
return 1.0 / self._get_frequency()
def _set_period(self, period):
if not isinstance(period, (int, float)):
raise TypeError("Invalid period type, should be int or float.")
self._set_frequency(1.0 / period)
period = property(_get_period, _set_period)
"""Get or set the PWM's output period in seconds.
Raises:
PWMError: if an I/O or OS error occurs.
TypeError: if value type is not int or float.
:type: int, float
"""
def _get_duty_cycle(self):
duty_cycle = Pin._nova.getIOpinValue(self._pwmpin)
# Convert duty cycle to ratio from 0.0 to 1.0
duty_cycle = duty_cycle / PWMOut.MAX_CYCLE_LEVEL
# convert to 16-bit
duty_cycle = int(duty_cycle * 65535)
return duty_cycle
def _set_duty_cycle(self, duty_cycle):
if not isinstance(duty_cycle, (int, float)):
raise TypeError("Invalid duty cycle type, should be int or float.")
# convert from 16-bit
duty_cycle /= 65535.0
if not 0.0 <= duty_cycle <= 1.0:
raise ValueError("Invalid duty cycle value, should be between 0.0 and 1.0.")
# Convert duty cycle from ratio to 1024 levels
duty_cycle = duty_cycle * PWMOut.MAX_CYCLE_LEVEL
# Set duty cycle
# pylint: disable=protected-access
Pin._nova.setIOpinValue(self._pwmpin, duty_cycle)
# pylint: enable=protected-access
duty_cycle = property(_get_duty_cycle, _set_duty_cycle)
"""Get or set the PWM's output duty cycle as a ratio from 0.0 to 1.0.
Raises:
PWMError: if an I/O or OS error occurs.
TypeError: if value type is not int or float.
ValueError: if value is out of bounds of 0.0 to 1.0.
:type: int, float
"""
def _get_frequency(self):
return int(PWMOut._nova.getIOpinPWMFreq(self._pwmpin).split("PWMFREQ ")[1])
def _set_frequency(self, frequency):
if not isinstance(frequency, (int, float)):
raise TypeError("Invalid frequency type, should be int or float.")
PWMOut._nova.setIOpinPWMFreq(self._pwmpin, frequency)
frequency = property(_get_frequency, _set_frequency)
"""Get or set the PWM's output frequency in Hertz.
Raises:
PWMError: if an I/O or OS error occurs.
TypeError: if value type is not int or float.
:type: int, float
"""
def _get_enabled(self):
enabled = self._enable
if enabled == "1":
return True
if enabled == "0":
return False
raise PWMError(None, 'Unknown enabled value: "%s"' % enabled)
def _set_enabled(self, value):
"""Get or set the PWM's output enabled state.
Raises:
PWMError: if an I/O or OS error occurs.
TypeError: if value type is not bool.
:type: bool
"""
if not isinstance(value, bool):
raise TypeError("Invalid enabled type, should be string.")
self._enable = value
if not self._enable:
self._set_duty_cycle(0.0)
# String representation
def __str__(self):
return "PWM%d, pin %s (freq=%f Hz, duty_cycle=%f%%)" % (
self._pin,
self._pin,
self.frequency,
self.duty_cycle * 100,
)
|
/samourai-Adafruit-Blinka-0.0.1a0.tar.gz/samourai-Adafruit-Blinka-0.0.1a0/src/adafruit_blinka/microcontroller/nova/pwmout.py
| 0.837919 | 0.264637 |
pwmout.py
|
pypi
|
from adafruit_blinka.microcontroller.nova import Connection
class SPI:
"""Custom SPI Class for Binho Nova"""
MSB = 0
BUFFER_PAYLOAD_MAX_LENGTH = 64
WHR_PAYLOAD_MAX_LENGTH = 1024
def __init__(self, clock):
self._nova = Connection.getInstance()
self._nova.setNumericalBase(10)
self._nova.setOperationMode(0, "SPI")
self._nova.setClockSPI(0, clock)
self._nova.setModeSPI(0, 0)
self._nova.setIOpinMode(0, "DOUT")
self._nova.setIOpinMode(1, "DOUT")
self._nova.beginSPI(0)
self._novaCMDVer = "0"
if hasattr(self._nova, "getCommandVer"):
response = self._nova.getCommandVer().split(" ")
if response[0] != "-NG":
self._novaCMDVer = response[1]
# Cpol and Cpha set by mode
# Mode Cpol Cpha
# 0 0 0
# 1 0 1
# 2 1 0
# 3 1 1
def __del__(self):
"""Close Nova on delete"""
self._nova.close()
# pylint: disable=too-many-arguments,unused-argument
def init(
self,
baudrate=1000000,
polarity=0,
phase=0,
bits=8,
firstbit=MSB,
sck=None,
mosi=None,
miso=None,
):
"""Initialize the Port"""
self._nova.setClockSPI(0, baudrate)
self._nova.setModeSPI(0, (polarity << 1) | (phase))
# pylint: enable=too-many-arguments,unused-argument
@staticmethod
def get_received_data(lineOutput):
"""Return any received data"""
return lineOutput.split("RXD ")[1]
@property
def frequency(self):
"""Return the current frequency"""
return self._nova.getClockSPI(0).split("CLK ")[1]
def write(self, buf, start=0, end=None):
"""Write data from the buffer to SPI"""
end = end if end else len(buf)
payloadMaxLength = self.BUFFER_PAYLOAD_MAX_LENGTH
if int(self._novaCMDVer) >= 1:
payloadMaxLength = self.WHR_PAYLOAD_MAX_LENGTH
chunks, rest = divmod(end - start, payloadMaxLength)
for i in range(chunks):
chunk_start = start + i * payloadMaxLength
chunk_end = chunk_start + payloadMaxLength
if int(self._novaCMDVer) >= 1:
self._nova.writeToReadFromSPI(
0, True, False, chunk_end - chunk_start, buf[chunk_start:chunk_end]
)
else:
self._nova.clearBuffer(0)
self._nova.writeToBuffer(0, 0, buf[chunk_start:chunk_end])
self._nova.transferBufferSPI(0, chunk_end - chunk_start + 1)
if rest:
if int(self._novaCMDVer) >= 1:
self._nova.writeToReadFromSPI(0, True, False, rest, buf[-1 * rest :])
else:
self._nova.clearBuffer(0)
self._nova.writeToBuffer(0, 0, buf[-1 * rest :])
self._nova.transferBufferSPI(0, rest)
def readinto(self, buf, start=0, end=None, write_value=0):
"""Read data from SPI and into the buffer"""
end = end if end else len(buf)
if int(self._novaCMDVer) >= 1:
chunks, rest = divmod(end - start, self.WHR_PAYLOAD_MAX_LENGTH)
i = 0
for i in range(chunks):
chunk_start = start + i * self.WHR_PAYLOAD_MAX_LENGTH
chunk_end = chunk_start + self.WHR_PAYLOAD_MAX_LENGTH
result = self._nova.writeToReadFromSPI(
0, False, True, chunk_end - chunk_start, write_value
)
if result != "-NG":
resp = result.split(" ")
resp = resp[2]
# loop over half of resp len as we're reading 2 chars at a time to form a byte
loops = int(len(resp) / 2)
for j in range(loops):
buf[(i * self.WHR_PAYLOAD_MAX_LENGTH) + start + j] = int(
resp[j * 2] + resp[j * 2 + 1], 16
)
else:
raise RuntimeError(
"Received error response from Binho Nova, result = " + result
)
if rest:
result = self._nova.writeToReadFromSPI(
0, False, True, rest, write_value
)
if result != "-NG":
resp = result.split(" ")
resp = resp[2]
# loop over half of resp len as we're reading 2 chars at a time to form a byte
loops = int(len(resp) / 2)
for j in range(loops):
buf[(i * self.WHR_PAYLOAD_MAX_LENGTH) + start + j] = int(
resp[j * 2] + resp[j * 2 + 1], 16
)
else:
raise RuntimeError(
"Received error response from Binho Nova, result = " + result
)
else:
for i in range(start, end):
buf[start + i] = int(
self.get_received_data(self._nova.transferSPI(0, write_value))
)
# pylint: disable=too-many-arguments,too-many-locals,too-many-branches
def write_readinto(
self, buffer_out, buffer_in, out_start=0, out_end=None, in_start=0, in_end=None
):
"""Perform a half-duplex write from buffer_out and then
read data into buffer_in
"""
out_end = out_end if out_end else len(buffer_out)
in_end = in_end if in_end else len(buffer_in)
readlen = in_end - in_start
writelen = out_end - out_start
if readlen > writelen:
# resize out and pad with 0's
tmp = bytearray(buffer_out)
tmp.extend([0] * (readlen - len(buffer_out)))
buffer_out = tmp
if int(self._novaCMDVer) >= 1:
chunks, rest = divmod(len(buffer_out), self.WHR_PAYLOAD_MAX_LENGTH)
i = 0
for i in range(chunks):
chunk_start = out_start + i * self.WHR_PAYLOAD_MAX_LENGTH
chunk_end = chunk_start + self.WHR_PAYLOAD_MAX_LENGTH
result = self._nova.writeToReadFromSPI(
0,
True,
True,
chunk_end - chunk_start,
buffer_out[chunk_start:chunk_end],
)
if result != "-NG":
resp = result.split(" ")
resp = resp[2]
# loop over half of resp len as we're reading 2 chars at a time to form a byte
loops = int(len(resp) / 2)
for j in range(loops):
buffer_in[
(i * self.WHR_PAYLOAD_MAX_LENGTH) + in_start + j
] = int(resp[j * 2] + resp[j * 2 + 1], 16)
else:
raise RuntimeError(
"Received error response from Binho Nova, result = " + result
)
if rest:
result = self._nova.writeToReadFromSPI(
0, True, True, rest, buffer_out[-1 * rest :]
)
if result != "-NG":
resp = result.split(" ")
resp = resp[2]
# loop over half of resp len as we're reading 2 chars at a time to form a byte
loops = int(len(resp) / 2)
for j in range(loops):
buffer_in[
(i * self.WHR_PAYLOAD_MAX_LENGTH) + in_start + j
] = int(resp[j * 2] + resp[j * 2 + 1], 16)
else:
raise RuntimeError(
"Received error response from Binho Nova, result = " + result
)
print(buffer_in)
else:
for data_out in buffer_out:
data_in = int(
self.get_received_data(self._nova.transferSPI(0, data_out))
)
if i < readlen:
buffer_in[in_start + i] = data_in
i += 1
# pylint: enable=too-many-arguments,too-many-locals,too-many-branches
|
/samourai-Adafruit-Blinka-0.0.1a0.tar.gz/samourai-Adafruit-Blinka-0.0.1a0/src/adafruit_blinka/microcontroller/nova/spi.py
| 0.724091 | 0.167287 |
spi.py
|
pypi
|
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM) # Use BCM pins D4 = GPIO #4
GPIO.setwarnings(False) # shh!
class Pin:
"""Pins dont exist in CPython so...lets make our own!"""
IN = 0
OUT = 1
LOW = 0
HIGH = 1
PULL_NONE = 0
PULL_UP = 1
PULL_DOWN = 2
id = None
_value = LOW
_mode = IN
def __init__(self, bcm_number):
self.id = bcm_number
def __repr__(self):
return str(self.id)
def __eq__(self, other):
return self.id == other
def init(self, mode=IN, pull=None):
"""Initialize the Pin"""
if mode is not None:
if mode == self.IN:
self._mode = self.IN
GPIO.setup(self.id, GPIO.IN)
elif mode == self.OUT:
self._mode = self.OUT
GPIO.setup(self.id, GPIO.OUT)
else:
raise RuntimeError("Invalid mode for pin: %s" % self.id)
if pull is not None:
if self._mode != self.IN:
raise RuntimeError("Cannot set pull resistor on output")
if pull == self.PULL_UP:
GPIO.setup(self.id, GPIO.IN, pull_up_down=GPIO.PUD_UP)
elif pull == self.PULL_DOWN:
GPIO.setup(self.id, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
else:
raise RuntimeError("Invalid pull for pin: %s" % self.id)
def value(self, val=None):
"""Set or return the Pin Value"""
if val is not None:
if val == self.LOW:
self._value = val
GPIO.output(self.id, val)
elif val == self.HIGH:
self._value = val
GPIO.output(self.id, val)
else:
raise RuntimeError("Invalid value for pin")
return None
return GPIO.input(self.id)
# Pi 1B rev1 only?
D0 = Pin(0)
D1 = Pin(1)
D2 = Pin(2)
SDA = Pin(2)
D3 = Pin(3)
SCL = Pin(3)
D4 = Pin(4)
D5 = Pin(5)
D6 = Pin(6)
D7 = Pin(7)
CE1 = Pin(7)
D8 = Pin(8)
CE0 = Pin(8)
D9 = Pin(9)
MISO = Pin(9)
D10 = Pin(10)
MOSI = Pin(10)
D11 = Pin(11)
SCLK = Pin(11) # Raspberry Pi naming
SCK = Pin(11) # CircuitPython naming
D12 = Pin(12)
D13 = Pin(13)
D14 = Pin(14)
TXD = Pin(14)
D15 = Pin(15)
RXD = Pin(15)
D16 = Pin(16)
D17 = Pin(17)
D18 = Pin(18)
D19 = Pin(19)
MISO_1 = Pin(19)
D20 = Pin(20)
MOSI_1 = Pin(20)
D21 = Pin(21)
SCLK_1 = Pin(21)
SCK_1 = Pin(21)
D22 = Pin(22)
D23 = Pin(23)
D24 = Pin(24)
D25 = Pin(25)
D26 = Pin(26)
D27 = Pin(27)
D28 = Pin(28)
D29 = Pin(29)
D30 = Pin(30)
D31 = Pin(31)
D32 = Pin(32)
D33 = Pin(33)
D34 = Pin(34)
D35 = Pin(35)
D36 = Pin(36)
D37 = Pin(37)
D38 = Pin(38)
D39 = Pin(39)
D40 = Pin(40)
MISO_2 = Pin(40)
D41 = Pin(41)
MOSI_2 = Pin(41)
D42 = Pin(42)
SCLK_2 = Pin(42)
SCK_2 = Pin(43)
D43 = Pin(43)
D44 = Pin(44)
D45 = Pin(45)
# ordered as spiId, sckId, mosiId, misoId
spiPorts = (
(0, SCLK, MOSI, MISO),
(1, SCLK_1, MOSI_1, MISO_1),
(2, SCLK_2, MOSI_2, MISO_2),
(3, D3, D2, D1), # SPI3 on Pi4/CM4
(4, D7, D6, D5), # SPI4 on Pi4/CM4
(5, D15, D14, D13), # SPI5 on Pi4/CM4
)
# ordered as uartId, txId, rxId
uartPorts = ((1, TXD, RXD),)
# These are the known hardware I2C ports / pins.
# For software I2C ports created with the i2c-gpio overlay, see:
# https://github.com/adafruit/Adafruit_Python_Extended_Bus
i2cPorts = (
(1, SCL, SDA),
(0, D1, D0), # both pi 1 and pi 2 i2c ports!
(10, D45, D44), # internal i2c bus for the CM4
)
|
/samourai-Adafruit-Blinka-0.0.1a0.tar.gz/samourai-Adafruit-Blinka-0.0.1a0/src/adafruit_blinka/microcontroller/bcm283x/pin.py
| 0.480722 | 0.151843 |
pin.py
|
pypi
|
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM) # Use BCM pins D4 = GPIO #4
GPIO.setwarnings(False) # shh!
# pylint: disable=unnecessary-pass
class PWMError(IOError):
"""Base class for PWM errors."""
pass
# pylint: enable=unnecessary-pass
class PWMOut:
"""Pulse Width Modulation Output Class"""
def __init__(self, pin, *, frequency=500, duty_cycle=0, variable_frequency=False):
self._pwmpin = None
self._period = 0
self._open(pin, duty_cycle, frequency, variable_frequency)
def __del__(self):
self.deinit()
def __enter__(self):
return self
def __exit__(self, t, value, traceback):
self.deinit()
def _open(self, pin, duty=0, freq=500, variable_frequency=False):
self._pin = pin
GPIO.setup(pin.id, GPIO.OUT)
self._pwmpin = GPIO.PWM(pin.id, freq)
if variable_frequency:
print("Variable Frequency is not supported, continuing without it...")
# set frequency
self.frequency = freq
# set duty
self.duty_cycle = duty
self.enabled = True
def deinit(self):
"""Deinit the PWM."""
if self._pwmpin is not None:
self._pwmpin.stop()
GPIO.cleanup(self._pin.id)
self._pwmpin = None
def _is_deinited(self):
if self._pwmpin is None:
raise ValueError(
"Object has been deinitialize and can no longer "
"be used. Create a new object."
)
@property
def period(self):
"""Get or set the PWM's output period in seconds.
Raises:
PWMError: if an I/O or OS error occurs.
TypeError: if value type is not int or float.
:type: int, float
"""
return 1.0 / self.frequency
@period.setter
def period(self, period):
if not isinstance(period, (int, float)):
raise TypeError("Invalid period type, should be int or float.")
self.frequency = 1.0 / period
@property
def duty_cycle(self):
"""Get or set the PWM's output duty cycle which is the fraction of
each pulse which is high. 16-bit
Raises:
PWMError: if an I/O or OS error occurs.
TypeError: if value type is not int or float.
ValueError: if value is out of bounds of 0.0 to 1.0.
:type: int, float
"""
return int(self._duty_cycle * 65535)
@duty_cycle.setter
def duty_cycle(self, duty_cycle):
if not isinstance(duty_cycle, (int, float)):
raise TypeError("Invalid duty cycle type, should be int or float.")
if not 0 <= duty_cycle <= 65535:
raise ValueError("Invalid duty cycle value, should be between 0 and 65535")
# convert from 16-bit
duty_cycle /= 65535.0
self._duty_cycle = duty_cycle
self._pwmpin.ChangeDutyCycle(round(self._duty_cycle * 100))
@property
def frequency(self):
"""Get or set the PWM's output frequency in Hertz.
Raises:
PWMError: if an I/O or OS error occurs.
TypeError: if value type is not int or float.
:type: int, float
"""
return self._frequency
@frequency.setter
def frequency(self, frequency):
if not isinstance(frequency, (int, float)):
raise TypeError("Invalid frequency type, should be int or float.")
self._pwmpin.ChangeFrequency(round(frequency))
self._frequency = frequency
@property
def enabled(self):
"""Get or set the PWM's output enabled state.
Raises:
PWMError: if an I/O or OS error occurs.
TypeError: if value type is not bool.
:type: bool
"""
return self._enabled
@enabled.setter
def enabled(self, value):
if not isinstance(value, bool):
raise TypeError("Invalid enabled type, should be string.")
if value:
self._pwmpin.start(round(self._duty_cycle * 100))
else:
self._pwmpin.stop()
self._enabled = value
# String representation
def __str__(self):
return "pin %s (freq=%f Hz, duty_cycle=%f%%)" % (
self._pin,
self.frequency,
self.duty_cycle,
)
|
/samourai-Adafruit-Blinka-0.0.1a0.tar.gz/samourai-Adafruit-Blinka-0.0.1a0/src/adafruit_blinka/microcontroller/bcm283x/pulseio/PWMOut.py
| 0.863031 | 0.28626 |
PWMOut.py
|
pypi
|
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_DPS310.git"
# Common imports; remove if unused or pylint will complain
import math
from time import sleep
import adafruit_bus_device.i2c_device as i2c_device
from adafruit_register.i2c_struct import UnaryStruct, ROUnaryStruct
from adafruit_register.i2c_bit import RWBit, ROBit
from adafruit_register.i2c_bits import RWBits, ROBits
_DPS310_DEFAULT_ADDRESS = 0x77 # DPS310 default i2c address
_DPS310_DEVICE_ID = 0x10 # DPS310 device identifier
_DPS310_PRSB2 = 0x00 # Highest byte of pressure data
_DPS310_TMPB2 = 0x03 # Highest byte of temperature data
_DPS310_PRSCFG = 0x06 # Pressure configuration
_DPS310_TMPCFG = 0x07 # Temperature configuration
_DPS310_MEASCFG = 0x08 # Sensor configuration
_DPS310_CFGREG = 0x09 # Interrupt/FIFO configuration
_DPS310_RESET = 0x0C # Soft reset
_DPS310_PRODREVID = 0x0D # Register that contains the part ID
_DPS310_TMPCOEFSRCE = 0x28 # Temperature calibration src
# pylint: disable=no-member,unnecessary-pass
class CV:
"""struct helper"""
@classmethod
def add_values(cls, value_tuples):
"""Add CV values to the class"""
cls.string = {}
cls.lsb = {}
for value_tuple in value_tuples:
name, value, string, lsb = value_tuple
setattr(cls, name, value)
cls.string[value] = string
cls.lsb[value] = lsb
@classmethod
def is_valid(cls, value):
"""Validate that a given value is a member"""
return value in cls.string
class Mode(CV):
"""Options for ``mode``
+--------------------------+------------------------------------------------------------------+
| Mode | Description |
+--------------------------+------------------------------------------------------------------+
| ``Mode.IDLE`` | Puts the sensor into a shutdown state |
+--------------------------+------------------------------------------------------------------+
| ``Mode.ONE_PRESSURE`` | Setting `mode` to ``Mode.ONE_PRESSURE`` takes a single pressure |
| | measurement then switches to ``Mode.IDLE`` |
+--------------------------+------------------------------------------------------------------+
| ``Mode.ONE_TEMPERATURE`` | Setting `mode` to ``Mode.ONE_TEMPERATURE`` takes a single |
| | temperature measurement then switches to ``Mode.IDLE`` |
+--------------------------+------------------------------------------------------------------+
| ``Mode.CONT_PRESSURE`` | Take pressure measurements at the current `pressure_rate`. |
| | `temperature` will not be updated |
+--------------------------+------------------------------------------------------------------+
| ``Mode.CONT_TEMP`` | Take temperature measurements at the current `temperature_rate`. |
| | `pressure` will not be updated |
+--------------------------+------------------------------------------------------------------+
| ``Mode.CONT_PRESTEMP`` | Take temperature and pressure measurements at the current |
| | `pressure_rate` and `temperature_rate` |
+--------------------------+------------------------------------------------------------------+
"""
pass # pylint: disable=unnecessary-pass
Mode.add_values(
(
("IDLE", 0, "Idle", None),
("ONE_PRESSURE", 1, "One-Shot Pressure", None),
("ONE_TEMPERATURE", 2, "One-Shot Temperature", None),
("CONT_PRESSURE", 5, "Continuous Pressure", None),
("CONT_TEMP", 6, "Continuous Temperature", None),
("CONT_PRESTEMP", 7, "Continuous Pressure & Temperature", None),
)
)
class Rate(CV):
"""Options for :attr:`pressure_rate` and :attr:`temperature_rate`"""
pass
Rate.add_values(
(
("RATE_1_HZ", 0, 1, None),
("RATE_2_HZ", 1, 2, None),
("RATE_4_HZ", 2, 4, None),
("RATE_8_HZ", 3, 8, None),
("RATE_16_HZ", 4, 16, None),
("RATE_32_HZ", 5, 32, None),
("RATE_64_HZ", 6, 64, None),
("RATE_128_HZ", 7, 128, None),
)
)
class SampleCount(CV):
"""Options for :attr:`temperature_oversample_count` and :attr:`pressure_oversample_count`"""
pass
SampleCount.add_values(
(
("COUNT_1", 0, 1, None),
("COUNT_2", 1, 2, None),
("COUNT_4", 2, 4, None),
("COUNT_8", 3, 8, None),
("COUNT_16", 4, 16, None),
("COUNT_32", 5, 32, None),
("COUNT_64", 6, 64, None),
("COUNT_128", 7, 128, None),
)
)
# pylint: enable=unnecessary-pass
class DPS310:
# pylint: disable=too-many-instance-attributes
"""Library for the DPS310 Precision Barometric Pressure Sensor.
:param ~busio.I2C i2c_bus: The I2C bus the DPS310 is connected to.
:param int address: The I2C device address. Defaults to :const:`0x77`
**Quickstart: Importing and using the DPS310**
Here is an example of using the :class:`DPS310` class.
First you will need to import the libraries to use the sensor
.. code-block:: python
import board
import adafruit_dps310
Once this is done you can define your `board.I2C` object and define your sensor object
.. code-block:: python
i2c = board.I2C() # uses board.SCL and board.SDA
dps310 = adafruit_dps310.DPS310(i2c)
Now you have access to the :attr:`temperature` and :attr:`pressure` attributes.
.. code-block:: python
temperature = dps310.temperature
pressure = dps310.pressure
"""
# Register definitions
_device_id = ROUnaryStruct(_DPS310_PRODREVID, ">B")
_reset_register = UnaryStruct(_DPS310_RESET, ">B")
_mode_bits = RWBits(3, _DPS310_MEASCFG, 0)
_pressure_ratebits = RWBits(3, _DPS310_PRSCFG, 4)
_pressure_osbits = RWBits(4, _DPS310_PRSCFG, 0)
_temp_ratebits = RWBits(3, _DPS310_TMPCFG, 4)
_temp_osbits = RWBits(4, _DPS310_TMPCFG, 0)
_temp_measurement_src_bit = RWBit(_DPS310_TMPCFG, 7)
_pressure_shiftbit = RWBit(_DPS310_CFGREG, 2)
_temp_shiftbit = RWBit(_DPS310_CFGREG, 3)
_coefficients_ready = RWBit(_DPS310_MEASCFG, 7)
_sensor_ready = RWBit(_DPS310_MEASCFG, 6)
_temp_ready = RWBit(_DPS310_MEASCFG, 5)
_pressure_ready = RWBit(_DPS310_MEASCFG, 4)
_raw_pressure = ROBits(24, _DPS310_PRSB2, 0, 3, lsb_first=False)
_raw_temperature = ROBits(24, _DPS310_TMPB2, 0, 3, lsb_first=False)
_calib_coeff_temp_src_bit = ROBit(_DPS310_TMPCOEFSRCE, 7)
_reg0e = RWBits(8, 0x0E, 0)
_reg0f = RWBits(8, 0x0F, 0)
_reg62 = RWBits(8, 0x62, 0)
def __init__(self, i2c_bus, address=_DPS310_DEFAULT_ADDRESS):
self.i2c_device = i2c_device.I2CDevice(i2c_bus, address)
if self._device_id != _DPS310_DEVICE_ID:
raise RuntimeError("Failed to find DPS310 - check your wiring!")
self._pressure_scale = None
self._temp_scale = None
self._c0 = None
self._c1 = None
self._c00 = None
self._c00 = None
self._c10 = None
self._c10 = None
self._c01 = None
self._c11 = None
self._c20 = None
self._c21 = None
self._c30 = None
self._oversample_scalefactor = (
524288,
1572864,
3670016,
7864320,
253952,
516096,
1040384,
2088960,
)
self.sea_level_pressure = 1013.25
"""Pressure in hectoPascals at sea level. Used to calibrate :attr:`altitude`."""
self.initialize()
def initialize(self):
"""Initialize the sensor to continuous measurement"""
self.reset()
self.pressure_rate = Rate.RATE_64_HZ
self.pressure_oversample_count = SampleCount.COUNT_64
self.temperature_rate = Rate.RATE_64_HZ
self.temperature_oversample_count = SampleCount.COUNT_64
self.mode = Mode.CONT_PRESTEMP
# wait until we have at least one good measurement
self.wait_temperature_ready()
self.wait_pressure_ready()
# (https://github.com/Infineon/DPS310-Pressure-Sensor#temperature-measurement-issue)
# similar to DpsClass::correctTemp(void) from infineon's c++ library
def _correct_temp(self):
"""Correct temperature readings on ICs with a fuse bit problem"""
self._reg0e = 0xA5
self._reg0f = 0x96
self._reg62 = 0x02
self._reg0e = 0
self._reg0f = 0
# perform a temperature measurement
# the most recent temperature will be saved internally
# and used for compensation when calculating pressure
_unused = self._raw_temperature
def reset(self):
"""Reset the sensor"""
self._reset_register = 0x89
# wait for hardware reset to finish
sleep(0.010)
while not self._sensor_ready:
sleep(0.001)
self._correct_temp()
self._read_calibration()
# make sure we're using the temperature source used for calibration
self._temp_measurement_src_bit = self._calib_coeff_temp_src_bit
@property
def pressure(self):
"""Returns the current pressure reading in hPA"""
temp_reading = self._raw_temperature
raw_temperature = self._twos_complement(temp_reading, 24)
pressure_reading = self._raw_pressure
raw_pressure = self._twos_complement(pressure_reading, 24)
_scaled_rawtemp = raw_temperature / self._temp_scale
_temperature = _scaled_rawtemp * self._c1 + self._c0 / 2.0
p_red = raw_pressure / self._pressure_scale
pres_calc = (
self._c00
+ p_red * (self._c10 + p_red * (self._c20 + p_red * self._c30))
+ _scaled_rawtemp * (self._c01 + p_red * (self._c11 + p_red * self._c21))
)
final_pressure = pres_calc / 100
return final_pressure
@property
def altitude(self):
"""The altitude based on the sea level pressure (:attr:`sea_level_pressure`) -
which you must enter ahead of time)"""
return 44330 * (1.0 - math.pow(self.pressure / self.sea_level_pressure, 0.1903))
@property
def temperature(self):
"""The current temperature reading in degrees Celsius"""
_scaled_rawtemp = self._raw_temperature / self._temp_scale
_temperature = _scaled_rawtemp * self._c1 + self._c0 / 2.0
return _temperature
@property
def temperature_ready(self):
"""Returns true if there is a temperature reading ready"""
return self._temp_ready
def wait_temperature_ready(self):
"""Wait until a temperature measurement is available.
To avoid waiting indefinitely this function raises an
error if the sensor isn't configured for temperate measurements,
ie. ``Mode.ONE_TEMPERATURE``, ``Mode.CONT_TEMP`` or ``Mode.CONT_PRESTEMP``.
See the `Mode` documentation for details.
"""
if (
self._mode_bits == Mode.IDLE
or self._mode_bits == Mode.ONE_PRESSURE
or self._mode_bits == Mode.CONT_PRESSURE
):
raise RuntimeError(
"Sensor mode is set to idle or pressure measurement,\
can't wait for a temperature measurement"
)
while self._temp_ready is False:
sleep(0.001)
@property
def pressure_ready(self):
"""Returns true if pressure readings are ready"""
return self._pressure_ready
def wait_pressure_ready(self):
"""Wait until a pressure measurement is available
To avoid waiting indefinitely this function raises an
error if the sensor isn't configured for pressure measurements,
ie. ``Mode.ONE_PRESSURE``, ``Mode.CONT_PRESSURE`` or ``Mode.CONT_PRESTEMP``
See the `Mode` documentation for details.
"""
if (
self._mode_bits == Mode.IDLE
or self._mode_bits == Mode.ONE_TEMPERATURE
or self._mode_bits == Mode.CONT_TEMP
):
raise RuntimeError(
"Sensor mode is set to idle or temperature measurement,\
can't wait for a pressure measurement"
)
while self._pressure_ready is False:
sleep(0.001)
@property
def mode(self):
"""The measurement mode. Must be a `Mode`. See the `Mode` documentation for details"""
return self._mode_bits
@mode.setter
def mode(self, value):
if not Mode.is_valid(value):
raise AttributeError("mode must be an `Mode`")
self._mode_bits = value
@property
def pressure_rate(self):
"""Configure the pressure measurement rate. Must be a `Rate`"""
return self._pressure_ratebits
@pressure_rate.setter
def pressure_rate(self, value):
if not Rate.is_valid(value):
raise AttributeError("pressure_rate must be a Rate")
self._pressure_ratebits = value
@property
def pressure_oversample_count(self):
"""The number of samples taken per pressure measurement. Must be a ``SampleCount``"""
return self._pressure_osbits
@pressure_oversample_count.setter
def pressure_oversample_count(self, value):
if not SampleCount.is_valid(value):
raise AttributeError("pressure_oversample_count must be a SampleCount")
self._pressure_osbits = value
self._pressure_shiftbit = value > SampleCount.COUNT_8
self._pressure_scale = self._oversample_scalefactor[value]
@property
def temperature_rate(self):
"""Configure the temperature measurement rate. Must be a `Rate`"""
return self._temp_ratebits
@temperature_rate.setter
def temperature_rate(self, value):
if not Rate.is_valid(value):
raise AttributeError("temperature_rate must be a Rate")
self._temp_ratebits = value
@property
def temperature_oversample_count(self):
"""The number of samples taken per temperature measurement. Must be a ``SampleCount``"""
return self._temp_osbits
@temperature_oversample_count.setter
def temperature_oversample_count(self, value):
if not SampleCount.is_valid(value):
raise AttributeError("temperature_oversample_count must be a SampleCount")
self._temp_osbits = value
self._temp_scale = self._oversample_scalefactor[value]
self._temp_shiftbit = value > SampleCount.COUNT_8
@staticmethod
def _twos_complement(val, bits):
if val & (1 << (bits - 1)):
val -= 1 << bits
return val
def _read_calibration(self):
while not self._coefficients_ready:
sleep(0.001)
buffer = bytearray(19)
coeffs = [None] * 18
for offset in range(18):
buffer = bytearray(2)
buffer[0] = 0x10 + offset
with self.i2c_device as i2c:
i2c.write_then_readinto(buffer, buffer, out_end=1, in_start=1)
coeffs[offset] = buffer[1]
self._c0 = (coeffs[0] << 4) | ((coeffs[1] >> 4) & 0x0F)
self._c0 = self._twos_complement(self._c0, 12)
self._c1 = self._twos_complement(((coeffs[1] & 0x0F) << 8) | coeffs[2], 12)
self._c00 = (coeffs[3] << 12) | (coeffs[4] << 4) | ((coeffs[5] >> 4) & 0x0F)
self._c00 = self._twos_complement(self._c00, 20)
self._c10 = ((coeffs[5] & 0x0F) << 16) | (coeffs[6] << 8) | coeffs[7]
self._c10 = self._twos_complement(self._c10, 20)
self._c01 = self._twos_complement((coeffs[8] << 8) | coeffs[9], 16)
self._c11 = self._twos_complement((coeffs[10] << 8) | coeffs[11], 16)
self._c20 = self._twos_complement((coeffs[12] << 8) | coeffs[13], 16)
self._c21 = self._twos_complement((coeffs[14] << 8) | coeffs[15], 16)
self._c30 = self._twos_complement((coeffs[16] << 8) | coeffs[17], 16)
|
/samourai-circuitpython-dps310-0.0.1b3.tar.gz/samourai-circuitpython-dps310-0.0.1b3/adafruit_dps310.py
| 0.695441 | 0.397149 |
adafruit_dps310.py
|
pypi
|
# Python 2/3
try:
unicode = unicode
except NameError:
unicode = str
import re
GREEK = [
(u'?alpha', u'α'),
(u'?beta', u'β'),
(u'?gamma', u'γ'),
(u'?delta', u'δ'),
(u'?epsilon', u'ε'),
(u'?zeta', u'ζ'),
(u'?theta', u'θ'),
(u'?iota', u'ι'),
(u'?kappa', u'κ'),
(u'?lambda', u'λ'),
(u'?mu', u'μ'),
(u'?nu', u'ν'),
(u'?xi', u'ξ'),
(u'?omicron', u'ο'),
(u'?pi', u'π'),
(u'?rho', u'ρ'),
(u'?sigma', u'σ'),
(u'?tau', u'τ'),
(u'?upsilon', u'υ'),
(u'?phi', u'φ'),
(u'?chi', u'χ'),
(u'?psi', u'ψ'),
(u'?omega', u'ω'),
]
class Scanner(object):
def __init__(self, text):
self.text = unicode(text)
self.token = None
self.type = None
self.pos = 0
self.scan()
def near_text(self, length=10):
return self.text[self.pos:self.pos + length]
def scan_pattern(self, pattern, type, token_group=1, rest_group=2):
pattern = r'(' + pattern + r')'
regexp = re.compile(pattern, flags=re.DOTALL)
match = regexp.match(self.text, pos=self.pos)
if not match:
return False
else:
self.type = type
self.token = match.group(token_group)
self.pos += len(self.token)
return True
def scan(self):
self.scan_pattern(r'[ \t\n\r]*', 'whitespace')
while self.scan_pattern(r'\/\/.*?[\n\r]', 'comment'):
self.scan_pattern(r'[ \t\n\r]*', 'whitespace')
if self.pos >= len(self.text):
self.token = None
self.type = 'EOF'
return
if self.scan_pattern(u'\\~|→|=|¬|∧|∨', 'operator'):
return
# Note that '?' appears in both `variable` and in `punct`.
# This is acceptable however, because in normal prose, the
# '?' symbol is never immediately followed by a letter.
if self.scan_pattern(r'\?[a-zA-Z_]+', 'variable'):
return
if self.scan_pattern(r'\,|\.|\;|\:|\?|\!|\"', 'punct'):
return
if self.scan_pattern(r'\(|\)|\{|\}|\[|\]', 'bracket'):
return
if self.scan_pattern(r"[a-zA-Z_]['a-zA-Z0-9_-]*", 'word'):
return
if self.scan_pattern(u'[αβγδεζθικλμνξοπρστυφχψω]', 'variable'):
for varname, letter in GREEK:
if letter == self.token:
self.token = varname
break
assert self.token.startswith('?'), repr(self.token)
return
if self.scan_pattern(r'.', 'unknown character'):
return
else:
raise AssertionError("this should never happen, self.text=(%s), self.pos=(%s)" % (self.text, self.pos))
def expect(self, token):
if self.token == token:
self.scan()
else:
raise SyntaxError(u"Expected '%s', but found '%s' (near '%s')" %
(token, self.token, self.near_text()))
def on(self, *tokens):
return self.token in tokens
def on_type(self, *types):
return self.type in types
def check_type(self, *types):
if not self.on_type(*types):
raise SyntaxError(u"Expected %s, but found %s ('%s') (near '%s')" %
(types, self.type, self.token, self.near_text()))
def consume(self, *tokens):
if self.token in tokens:
self.scan()
return True
else:
return False
|
/samovar_py-0.6-py3-none-any.whl/samovar/scanner.py
| 0.53777 | 0.327964 |
scanner.py
|
pypi
|
from samovar.ast import World, Scenario, Rule, Cond, Assert, Retract
from samovar.terms import Term, Var
from samovar.scanner import Scanner
# World ::= {Scenario}.
# Scenario ::= "scenario" Atom "{" {Import | Proposition | Rule | Goal} ["." | ","] "}".
# Import ::= "import" Atom.
# Goal ::= "goal" Cond.
# Proposition ::= Term.
# Rule ::= Cond {Var | Atom | Punct} Cond.
# Cond ::= "[" Expr {"," Expr} ["where" {Var "=" Term [","]}"]".
# Expr ::= Term | NotSym Term.
# Term ::= Var | Atom ["(" Term {AndSym Term} ")"].
# Var ::= Qmark | Greek.
# Qmark ::= '?' Atom.
# Greek ::= <<one of: αβγδεζθικλμνξοπρστυφχψω>>.
# Atom ::= <<A-Za-z_>> <<A-Za-z0-9_-'>>*.
# Punct ::= <<"',.;:?!>>.
# NotSym ::= '~' | '¬'.
# AndSym ::= ',' | '∧'.
class SamovarSyntaxError(ValueError):
pass
def variables_in_cond(cond):
vars_ = set()
for expr in cond.exprs:
expr.term.collect_variables(vars_)
for key, value in cond.bindings.items():
vars_.add(Var(key))
return vars_
class Parser(object):
def __init__(self, text):
self.scanner = Scanner(text)
self.scenario_map = {}
def world(self):
scenarios = []
while self.scanner.on('scenario'):
scenario = self.scenario()
self.scenario_map[scenario.name] = scenario
scenarios.append(scenario)
return World(scenarios=scenarios)
def scenario(self):
propositions = []
rules = []
goal = None
self.scanner.expect('scenario')
self.scanner.check_type('word')
name = self.scanner.token
self.scanner.scan()
self.scanner.expect('{')
while not self.scanner.on('}'):
if self.scanner.consume('import'):
self.scanner.check_type('word')
from_name = self.scanner.token
self.scanner.scan()
from_scenario = self.scenario_map[from_name]
rules.extend(from_scenario.rules)
propositions.extend(from_scenario.propositions)
elif self.scanner.consume('goal'):
assert goal is None
goal = self.cond()
elif self.scanner.on('['):
rules.append(self.rule())
else:
propositions.append(self.proposition())
self.scanner.consume('.')
self.scanner.consume(',')
self.scanner.expect('}')
return Scenario(name=name, propositions=propositions, rules=rules, goal=goal)
def proposition(self):
return self.term()
def rule(self):
words = []
pre = self.cond()
while not self.scanner.on('['):
words.append(self.word())
post = self.cond()
if post.bindings:
raise SamovarSyntaxError("Consequences of a rule cannot include a `where` clause")
pre_variables = variables_in_cond(pre)
words_variables = set(w for w in words if isinstance(w, Var))
text = ' '.join([str(w) for w in words])
if '?_' in [w.name for w in words_variables]:
raise SamovarSyntaxError('Text "{}" contains wildcard'.format(text))
extra_vars_in_words = words_variables - pre_variables
if extra_vars_in_words:
extra_vars = ', '.join([str(v) for v in sorted(extra_vars_in_words)])
raise SamovarSyntaxError('Text "{}" contains unbound variables: {}'.format(text, extra_vars))
post_variables = variables_in_cond(post)
if '?_' in [w.name for w in post_variables]:
raise SamovarSyntaxError("Consequences contains wildcard")
extra_vars_in_post = post_variables - pre_variables
if extra_vars_in_post:
extra_vars = ', '.join([str(v) for v in sorted(extra_vars_in_words)])
raise SamovarSyntaxError("Consequences contains unbound variables: {}".format(extra_vars))
return Rule(pre=pre, words=words, post=post)
def cond(self):
exprs = []
bindings = {}
self.scanner.expect('[')
if not self.scanner.on(']') and not self.scanner.on('where'):
exprs.append(self.expr())
while self.scanner.consume(',', u'∧'):
exprs.append(self.expr())
if self.scanner.consume('where'):
while not self.scanner.on(']'):
v = self.var()
self.scanner.expect('=')
t = self.term()
bindings[v.name] = t
self.scanner.consume(',', u'∧')
self.scanner.expect(']')
return Cond(exprs=exprs, bindings=bindings)
def expr(self):
if self.scanner.consume('~', u'¬', '!'):
return Retract(term=self.term())
else:
return Assert(term=self.term())
def term(self):
if self.scanner.on_type('variable'):
return self.var()
self.scanner.check_type('word')
constructor = self.scanner.token
self.scanner.scan()
subterms = []
if self.scanner.consume('('):
subterms.append(self.term())
while self.scanner.consume(','):
subterms.append(self.term())
self.scanner.expect(')')
return Term(constructor, *subterms)
def word(self):
if self.scanner.on_type('variable'):
return self.var()
self.scanner.check_type('word', 'punct', 'operator')
constructor = self.scanner.token
self.scanner.scan()
return Term(constructor)
def var(self):
self.scanner.check_type('variable')
name = self.scanner.token
self.scanner.scan()
v = Var(name)
return v
|
/samovar_py-0.6-py3-none-any.whl/samovar/parser.py
| 0.630912 | 0.302211 |
parser.py
|
pypi
|
import socket
from samp_client.constants import *
from samp_client.exceptions import SampError, RconError, InvalidRconPassword, ConnectionError
from samp_client.models import ServerInfo, Rule, Client, ClientDetail, RConPlayer
from samp_client.utils import encode_bytes, decode_int, decode_string, build_rcon_command, parse_server_var
class SampClient(object):
"""
Client class for communicating with SA-MP Query API
http://wiki.sa-mp.com/wiki/Query_Mechanism
"""
timeout = 1.0
socket_cls = socket.socket
def __init__(self, address='127.0.0.1', port=7777, rcon_password=None):
super(SampClient, self).__init__()
assert isinstance(address, str)
self.address = address
self.port = int(port)
self.rcon_password = rcon_password
def connect(self):
try:
self.address = socket.gethostbyname(self.address)
self.header = MSG_PREFIX + encode_bytes(*map(int, self.address.split('.'))) + encode_bytes(self.port & 0xFF, self.port >> 8 & 0xFF)
self.socket = self.socket_cls(socket.AF_INET, socket.SOCK_DGRAM)
self.socket.settimeout(self.timeout)
return self
except socket.error as e:
raise ConnectionError(e)
def disconnect(self):
self.socket.close()
del self.socket
def __enter__(self):
return self.connect()
def __exit__(self, exc_type, exc_val, exc_tb):
if hasattr(self, 'socket'):
self.disconnect()
def send_request(self, opcode, extras=b'', return_response=True):
body = self.header + opcode + extras
self.socket.sendto(body, (self.address, self.port))
if return_response:
return self.receive()
def receive(self, buffersize=4096, strip_header=True):
try:
response = self.socket.recv(buffersize)
# Strip header from the response
return response[11:] if strip_header else response
except socket.timeout as e:
pass
except socket.error as e:
raise ConnectionError(e)
def get_server_info(self):
response = self.send_request(OPCODE_INFO)
offset = 0
hostname = decode_string(response, 5, 4)
offset += len(hostname)
gamemode = decode_string(response, offset + 9, 4)
offset += len(gamemode)
language = decode_string(response, offset + 13, 4)
return ServerInfo(
password=bool(response[0]),
players=decode_int(response[1:3]),
max_players=decode_int(response[3:5]),
hostname=hostname,
gamemode=gamemode,
language=language,
)
def get_server_rules(self):
response = self.send_request(OPCODE_RULES)
num_rules = decode_int(response[:2])
offset = 2
result = []
for n in range(num_rules):
name = decode_string(response, offset, len_bytes=1)
offset += 1 + len(name)
value = decode_string(response, offset, len_bytes=1)
offset += 1 + len(value)
rule = Rule(
name=str(name),
value=value,
)
result.append(rule)
return result
def get_server_rules_dict(self):
return {rule.name: rule.value for rule in self.get_server_rules()}
def get_server_clients(self):
response = self.send_request(OPCODE_CLIENTS)
result = []
if response is None:
# SA-MP server will return null if there's a lot of players
# We'll handle this by returning an empty list instead to avoid type error.
return result
num_clients = decode_int(response[:2])
offset = 2
for n in range(num_clients):
name = decode_string(response, offset, len_bytes=1)
offset += 1 + len(name)
score = decode_int(response[offset:offset + 4])
offset += 4
client = Client(
name=name,
score=score,
)
result.append(client)
return result
def get_server_clients_detailed(self):
response = self.send_request(OPCODE_CLIENTS_DETAILED)
result = []
if response is None:
# SA-MP server will return null if there's a lot of players
# We'll handle this by returning an empty list instead to avoid type error.
return result
num_clients = decode_int(response[:2])
offset = 2
for n in range(num_clients):
player_id = decode_int(response[offset:offset])
offset += 1
name = decode_string(response, offset, len_bytes=1)
offset += 1 + len(name)
score = decode_int(response[offset:offset + 4])
offset += 4
ping = decode_int(response[offset:offset + 4])
offset += 4
detail = ClientDetail(
id=player_id,
name=name,
score=score,
ping=ping,
)
result.append(detail)
return result
def probe_server(self, value='ping'):
if isinstance(value, str):
value = bytes(value, ENCODING)
assert len(value) == 4, 'Value must be exactly 4 characters'
response = self.send_request(OPCODE_PSEUDORANDOM, extras=value)
return response
def validate_server(self, value='ping'):
"""
Sends a query to server and validates that response matches the requested value
"""
response = self.probe_server(value)
if response != value:
raise SampError('Server returned {} instead of {}'.format(response, value))
def is_online(self):
"""
Checks whether server is online
:return: True if online, False if offline (connection error)
"""
value = b'test'
try:
return self.probe_server(value=value) == value
except ConnectionError:
return False
@property
def rcon_password_bytes(self):
"""
password prefixed with its encoded length
"""
if not self.rcon_password:
raise RconError('Rcon password was not provided')
pass_len = len(self.rcon_password)
return encode_bytes(pass_len & 0xFF, pass_len >> 8 & 0xFF) + bytes(self.rcon_password, ENCODING)
def send_rcon_command(self, command, args=tuple(), fetch_response=True):
"""
Send any command to the server
leading whitespace is stripped from the response
:param command: the comand to send
:param args: tuple or list of arguments to be appended to the command. Can be also a string or an int if only one argument is expected.
:param fetch_response: Whether to receive response from server. Set this to False if you're not expecting a response; WARNING: If there is a response and you don't fetch it, it may be output as a response of your next command.
:return list of lines responded from the server or None if fetch_response == False
"""
command = build_rcon_command(command, args)
command_length = encode_bytes(len(command) & 0xFF, len(command) >> 8 & 0xFF)
payload = self.rcon_password_bytes + command_length + command
self.send_request(OPCODE_RCON, extras=payload, return_response=False)
if fetch_response:
result = []
while True:
response = self.receive()
if response is None:
break
line = decode_string(response, 0, 2)
if line:
result.append(line.lstrip())
else:
break
if len(result) == 1 and result[0] == 'Invalid RCON password.':
raise InvalidRconPassword
return result
def rcon_cmdlist(self):
""" List of rcon commands """
return self.send_rcon_command(RCON_CMDLIST)[1:]
def rcon_varlist(self):
""" List of server variables """
vars = self.send_rcon_command(RCON_VARLIST)[1:]
return [parse_server_var(var) for var in vars]
def rcon_varlist_dict(self):
""" Server vars as a dictionary mapping variable name to its value """
return {var.name: var.value for var in self.rcon_varlist()}
def rcon_exit(self):
return self.send_rcon_command(RCON_EXIT, fetch_response=False)
def rcon_echo(self, text):
""" Print message to server console and send it back as a string"""
return self.send_rcon_command(RCON_ECHO, args=(text,))[0]
def rcon_set_hostname(self, name):
return self.send_rcon_command(RCON_HOSTNAME, args=(name,), fetch_response=False)
def rcon_get_hostname(self):
response = self.send_rcon_command(RCON_HOSTNAME)[0]
return parse_server_var(response)
def rcon_set_gamemodetext(self, name):
return self.send_rcon_command(RCON_GAMEMODETEXT, args=(name,), fetch_response=False)
def rcon_get_gamemodetext(self):
response = self.send_rcon_command(RCON_GAMEMODETEXT)[0]
return parse_server_var(response)
def rcon_set_mapname(self, name):
return self.send_rcon_command(RCON_MAPNAME, args=(name,), fetch_response=False)
def rcon_get_mapname(self):
response = self.send_rcon_command(RCON_MAPNAME)[0]
return parse_server_var(response)
def rcon_exec(self, filename):
response = self.send_rcon_command(RCON_EXEC, args=(filename,))
if len(response) == 1:
# Error response is returned as a single string
raise SampError(response[0])
else:
return response
def rcon_kick(self, player_id):
return self.send_rcon_command(RCON_KICK, args=(player_id,))
def rcon_ban(self, player_id):
return self.send_rcon_command(RCON_BAN, args=(player_id,))
def rcon_banip(self, ip_address):
return self.send_rcon_command(RCON_BANIP, args=(ip_address,))
def rcon_unbanip(self, ip_address):
return self.send_rcon_command(RCON_UNBANIP, args=(ip_address,))
def rcon_changemode(self, mode):
return self.send_rcon_command(RCON_CHANGEMODE, args=(mode,))
def rcon_gmx(self):
return self.send_rcon_command(RCON_GMX)
def rcon_reloadbans(self):
return self.send_rcon_command(RCON_RELOADBANS)
def rcon_reloadlog(self):
return self.send_rcon_command(RCON_RELOADBANS)
def rcon_say(self, message):
return self.send_rcon_command(RCON_SAY, args=(message,))
def rcon_players(self):
result = []
for line in self.send_rcon_command(RCON_PLAYERS)[1:]:
player_id, name, ping, ip = line.split('\t')
player = RConPlayer(
id=int(player_id),
name=str(name),
ping=int(ping),
ip=str(ip),
)
result.append(player)
return result
def rcon_gravity(self, gravity=0.008):
return self.send_rcon_command(RCON_GRAVITY, args=(gravity,))
def rcon_weather(self, weather):
return self.send_rcon_command(RCON_WEATHER, args=(weather,))
def rcon_loadfs(self, name):
response = self.send_rcon_command(RCON_LOADFS, args=(name,))[0]
if 'load failed' in response:
raise SampError(response)
else:
return response
def rcon_unloadfs(self, name):
response = self.send_rcon_command(RCON_UNLOADFS, args=(name,))[0]
if 'unload failed' in response:
raise SampError(response)
else:
return response
def rcon_reloadfs(self, name):
response = self.send_rcon_command(RCON_RELOADFS, args=(name,))
if 'load failed' in response[-1]:
raise SampError(response[-1])
else:
return response
def rcon_get_weburl(self):
response = self.send_rcon_command(RCON_WEBURL)[0]
return parse_server_var(response)
def rcon_set_weburl(self, url):
return self.send_rcon_command(RCON_WEBURL, args=(url,))
def rcon_set_rcon_password(self, password):
"""
Set server's rcon password
local password will be updated for future rcon commands
"""
result = self.send_rcon_command(RCON_RCON_PASSWORD, args=(password,))
self.rcon_password = password
def rcon_get_rcon_password(self):
response = self.send_rcon_command(RCON_RCON_PASSWORD)[0]
return parse_server_var(response)
def rcon_get_password(self):
response = self.send_rcon_command(RCON_PASSWORD)[0]
return parse_server_var(response)
def rcon_set_password(self, password):
return self.send_rcon_command(RCON_PASSWORD, args=(password,))[0]
def rcon_get_messageslimit(self):
response = self.send_rcon_command(RCON_MESSAGESLIMIT)[0]
return parse_server_var(response)
def rcon_set_messageslimit(self, limit):
return self.send_rcon_command(RCON_MESSAGESLIMIT, args=(limit,), fetch_response=False)
def rcon_get_ackslimit(self):
response = self.send_rcon_command(RCON_ACKSLIMIT)[0]
return parse_server_var(response)
def rcon_set_ackslimit(self, limit):
return self.send_rcon_command(RCON_ACKSLIMIT, args=(limit,), fetch_response=False)
def rcon_get_messageholelimit(self):
response = self.send_rcon_command(RCON_MESSAGEHOLELIMIT)[0]
return parse_server_var(response)
def rcon_set_messageholelimit(self, limit):
return self.send_rcon_command(RCON_MESSAGEHOLELIMIT, args=(limit,), fetch_response=False)
def rcon_get_playertimeout(self):
response = self.send_rcon_command(RCON_PLAYERTIMEOUT)[0]
return parse_server_var(response)
def rcon_set_playertimeout(self, limit):
return self.send_rcon_command(RCON_PLAYERTIMEOUT, args=(limit,), fetch_response=False)
def rcon_get_language(self):
response = self.send_rcon_command(RCON_LANGUAGE)[0]
return parse_server_var(response)
def rcon_set_language(self, limit):
return self.send_rcon_command(RCON_LANGUAGE, args=(limit,), fetch_response=False)
|
/samp-client-3.0.1.tar.gz/samp-client-3.0.1/samp_client/client.py
| 0.600423 | 0.168532 |
client.py
|
pypi
|
import re
from samp_client.constants import ENCODING
from samp_client.models import ServerVar
VAR_PATTERN = re.compile(r'\s*'.join((
r'(?P<name>\w+)',
r'=',
r'(?P<value>.+?)',
r'\((?P<type>string|int|bool|float)\)',
r'(?P<read_only>\(read-only\))?',
)))
VAR_TYPES = {
'int': int,
'bool': bool,
'string': str,
'float': float,
}
def encode_bytes(*args):
"""
Encodes values into a byte string
bytestrings are left as-is
integer values are encoded into their char values
:return: bytestring representing all arguments joined together
"""
result = b''
for arg in args:
if isinstance(arg, bytes):
result += arg
elif isinstance(arg, str):
result += bytes(arg, ENCODING)
elif isinstance(arg, int):
result += bytes([arg])
return result
def decode_int(data):
""" Decodes integer from byte string """
assert isinstance(data, bytes)
return sum(c << (8 * n) for n, c in enumerate(data))
def decode_string(string, len_pos, len_bytes=4):
"""
Decodes string from a string
:param string: bytestring with the response
:param len_pos: position of the integer expressing the length
:param len_bytes: number of bytes used for string length
:return: substring of string starting at len_pos + len_bytes and ending at position indicated in length value at position len_pos
"""
assert isinstance(len_pos, int)
len_end = len_pos + len_bytes
length = decode_int(string[len_pos:len_end])
return string[len_end:len_end + length].decode(ENCODING)
def build_rcon_command(command, args=None):
"""
Appends args to the command
:param command: the command string
:param args: list of arguments or a single argument (bool, string or int)
"""
if isinstance(command, str):
command = command.encode(ENCODING)
if args is not None:
# Bool check must come first because bool extends int
if isinstance(args, bool):
args = int(args),
elif isinstance(args, (str, bytes, int, float)):
args = args,
if len(args):
args = map(str, args)
command += b' ' + b' '.join(bytes(arg, ENCODING) for arg in args)
return command
def parse_server_var(variable):
""" Parses server variable string into a ServerVar named tuple"""
matches = VAR_PATTERN.match(variable)
if matches:
groups = matches.groupdict()
val_type = VAR_TYPES[groups['type']]
# Strip surrounding whitespace and quotations from value
value = groups['value']
if val_type == bool:
# pre-parse boolean to int as it will be returned as 0 or 1 string
value = int(value)
elif val_type == str:
# strip surrounding quotations from string value
value = value.strip('"')
return ServerVar(
name=groups['name'],
value=val_type(value),
read_only=bool(groups['read_only']),
)
else:
raise ValueError('Failed to parse {}'.format(variable))
|
/samp-client-3.0.1.tar.gz/samp-client-3.0.1/samp_client/utils.py
| 0.573081 | 0.457137 |
utils.py
|
pypi
|
import socket
from samp_py.constants import *
from samp_py.exceptions import SampError, RconError, InvalidRconPassword, ConnectionError
from samp_py.models import ServerInfo, Rule, Client, ClientDetail, RConPlayer
from samp_py.utils import encode_bytes, decode_int, decode_string, build_rcon_command, parse_server_var
class SampClient(object):
timeout = 1.0
socket_cls = socket.socket
def __init__(self, address='127.0.0.1', port=7777, rcon_password=None):
super(SampClient, self).__init__()
assert isinstance(address, str)
self.address = address
self.port = int(port)
self.rcon_password = rcon_password
def connect(self):
try:
self.address = socket.gethostbyname(self.address)
self.header = MSG_PREFIX + encode_bytes(*map(int, self.address.split('.'))) + encode_bytes(self.port & 0xFF, self.port >> 8 & 0xFF)
self.socket = self.socket_cls(socket.AF_INET, socket.SOCK_DGRAM)
self.socket.settimeout(self.timeout)
return self
except socket.error as e:
raise ConnectionError(e)
def disconnect(self):
self.socket.close()
del self.socket
def __enter__(self):
return self.connect()
def __exit__(self, exc_type, exc_val, exc_tb):
if hasattr(self, 'socket'):
self.disconnect()
def send_request(self, opcode, extras=b'', return_response=True):
body = self.header + opcode + extras
self.socket.sendto(body, (self.address, self.port))
if return_response:
return self.receive()
def receive(self, buffersize=4096, strip_header=True):
try:
response = self.socket.recv(buffersize)
return response[11:] if strip_header else response
except socket.timeout as e:
pass
except socket.error as e:
raise ConnectionError(e)
def get_server_info(self):
response = self.send_request(OPCODE_INFO)
offset = 0
hostname = decode_string(response, 5, 4)
offset += len(hostname)
gamemode = decode_string(response, offset + 9, 4)
offset += len(gamemode)
language = decode_string(response, offset + 13, 4)
return ServerInfo(
password=bool(response[0]),
players=decode_int(response[1:3]),
max_players=decode_int(response[3:5]),
hostname=hostname,
gamemode=gamemode,
language=language,
)
def get_server_rules(self):
response = self.send_request(OPCODE_RULES)
num_rules = decode_int(response[:2])
offset = 2
result = []
for n in range(num_rules):
name = decode_string(response, offset, len_bytes=1)
offset += 1 + len(name)
value = decode_string(response, offset, len_bytes=1)
offset += 1 + len(value)
rule = Rule(
name=str(name),
value=value,
)
result.append(rule)
return result
def get_server_rules_dict(self):
return {rule.name: rule.value for rule in self.get_server_rules()}
def get_server_clients(self):
response = self.send_request(OPCODE_CLIENTS)
result = []
if response is None:
return result
num_clients = decode_int(response[:2])
offset = 2
for n in range(num_clients):
name = decode_string(response, offset, len_bytes=1)
offset += 1 + len(name)
score = decode_int(response[offset:offset + 4])
offset += 4
client = Client(
name=name,
score=score,
)
result.append(client)
return result
def get_server_clients_detailed(self):
response = self.send_request(OPCODE_CLIENTS_DETAILED)
result = []
if response is None:
return result
num_clients = decode_int(response[:2])
offset = 2
for n in range(num_clients):
player_id = decode_int(response[offset:offset])
offset += 1
name = decode_string(response, offset, len_bytes=1)
offset += 1 + len(name)
score = decode_int(response[offset:offset + 4])
offset += 4
ping = decode_int(response[offset:offset + 4])
offset += 4
detail = ClientDetail(
id=player_id,
name=name,
score=score,
ping=ping,
)
result.append(detail)
return result
def probe_server(self, value='ping'):
if isinstance(value, str):
value = bytes(value, ENCODING)
assert len(value) == 4, 'Value must be exactly 4 characters'
response = self.send_request(OPCODE_PSEUDORANDOM, extras=value)
return response
def validate_server(self, value='ping'):
response = self.probe_server(value)
if response != value:
raise SampError('Server returned {} instead of {}'.format(response, value))
def is_online(self):
value = b'test'
try:
return self.probe_server(value=value) == value
except ConnectionError:
return False
@property
def rcon_password_bytes(self):
if not self.rcon_password:
raise RconError('Rcon password was not provided')
pass_len = len(self.rcon_password)
return encode_bytes(pass_len & 0xFF, pass_len >> 8 & 0xFF) + bytes(self.rcon_password, ENCODING)
def send_rcon_command(self, command, args=tuple(), fetch_response=True):
command = build_rcon_command(command, args)
command_length = encode_bytes(len(command) & 0xFF, len(command) >> 8 & 0xFF)
payload = self.rcon_password_bytes + command_length + command
self.send_request(OPCODE_RCON, extras=payload, return_response=False)
if fetch_response:
result = []
while True:
response = self.receive()
if response is None:
break
line = decode_string(response, 0, 2)
if line:
result.append(line.lstrip())
else:
break
if len(result) == 1 and result[0] == 'Invalid RCON password.':
raise InvalidRconPassword
return result
def rcon_cmdlist(self):
return self.send_rcon_command(RCON_CMDLIST)[1:]
def rcon_varlist(self):
vars = self.send_rcon_command(RCON_VARLIST)[1:]
return [parse_server_var(var) for var in vars]
def rcon_varlist_dict(self):
return {var.name: var.value for var in self.rcon_varlist()}
def rcon_exit(self):
return self.send_rcon_command(RCON_EXIT, fetch_response=False)
def rcon_echo(self, text):
return self.send_rcon_command(RCON_ECHO, args=(text,))[0]
def rcon_set_hostname(self, name):
return self.send_rcon_command(RCON_HOSTNAME, args=(name,), fetch_response=False)
def rcon_get_hostname(self):
response = self.send_rcon_command(RCON_HOSTNAME)[0]
return parse_server_var(response)
def rcon_set_gamemodetext(self, name):
return self.send_rcon_command(RCON_GAMEMODETEXT, args=(name,), fetch_response=False)
def rcon_get_gamemodetext(self):
response = self.send_rcon_command(RCON_GAMEMODETEXT)[0]
return parse_server_var(response)
def rcon_set_mapname(self, name):
return self.send_rcon_command(RCON_MAPNAME, args=(name,), fetch_response=False)
def rcon_get_mapname(self):
response = self.send_rcon_command(RCON_MAPNAME)[0]
return parse_server_var(response)
def rcon_exec(self, filename):
response = self.send_rcon_command(RCON_EXEC, args=(filename,))
if len(response) == 1:
raise SampError(response[0])
else:
return response
def rcon_kick(self, player_id):
return self.send_rcon_command(RCON_KICK, args=(player_id,))
def rcon_ban(self, player_id):
return self.send_rcon_command(RCON_BAN, args=(player_id,))
def rcon_banip(self, ip_address):
return self.send_rcon_command(RCON_BANIP, args=(ip_address,))
def rcon_unbanip(self, ip_address):
return self.send_rcon_command(RCON_UNBANIP, args=(ip_address,))
def rcon_changemode(self, mode):
return self.send_rcon_command(RCON_CHANGEMODE, args=(mode,))
def rcon_gmx(self):
return self.send_rcon_command(RCON_GMX)
def rcon_reloadbans(self):
return self.send_rcon_command(RCON_RELOADBANS)
def rcon_reloadlog(self):
return self.send_rcon_command(RCON_RELOADBANS)
def rcon_say(self, message):
return self.send_rcon_command(RCON_SAY, args=(message,))
def rcon_players(self):
result = []
for line in self.send_rcon_command(RCON_PLAYERS)[1:]:
player_id, name, ping, ip = line.split('\t')
player = RConPlayer(
id=int(player_id),
name=str(name),
ping=int(ping),
ip=str(ip),
)
result.append(player)
return result
def rcon_gravity(self, gravity=0.008):
return self.send_rcon_command(RCON_GRAVITY, args=(gravity,))
def rcon_weather(self, weather):
return self.send_rcon_command(RCON_WEATHER, args=(weather,))
def rcon_loadfs(self, name):
response = self.send_rcon_command(RCON_LOADFS, args=(name,))[0]
if 'load failed' in response:
raise SampError(response)
else:
return response
def rcon_unloadfs(self, name):
response = self.send_rcon_command(RCON_UNLOADFS, args=(name,))[0]
if 'unload failed' in response:
raise SampError(response)
else:
return response
def rcon_reloadfs(self, name):
response = self.send_rcon_command(RCON_RELOADFS, args=(name,))
if 'load failed' in response[-1]:
raise SampError(response[-1])
else:
return response
def rcon_get_weburl(self):
response = self.send_rcon_command(RCON_WEBURL)[0]
return parse_server_var(response)
def rcon_set_weburl(self, url):
return self.send_rcon_command(RCON_WEBURL, args=(url,))
def rcon_set_rcon_password(self, password):
result = self.send_rcon_command(RCON_RCON_PASSWORD, args=(password,))
self.rcon_password = password
def rcon_get_rcon_password(self):
response = self.send_rcon_command(RCON_RCON_PASSWORD)[0]
return parse_server_var(response)
def rcon_get_password(self):
response = self.send_rcon_command(RCON_PASSWORD)[0]
return parse_server_var(response)
def rcon_set_password(self, password):
return self.send_rcon_command(RCON_PASSWORD, args=(password,))[0]
def rcon_get_messageslimit(self):
response = self.send_rcon_command(RCON_MESSAGESLIMIT)[0]
return parse_server_var(response)
def rcon_set_messageslimit(self, limit):
return self.send_rcon_command(RCON_MESSAGESLIMIT, args=(limit,), fetch_response=False)
def rcon_get_ackslimit(self):
response = self.send_rcon_command(RCON_ACKSLIMIT)[0]
return parse_server_var(response)
def rcon_set_ackslimit(self, limit):
return self.send_rcon_command(RCON_ACKSLIMIT, args=(limit,), fetch_response=False)
def rcon_get_messageholelimit(self):
response = self.send_rcon_command(RCON_MESSAGEHOLELIMIT)[0]
return parse_server_var(response)
def rcon_set_messageholelimit(self, limit):
return self.send_rcon_command(RCON_MESSAGEHOLELIMIT, args=(limit,), fetch_response=False)
def rcon_get_playertimeout(self):
response = self.send_rcon_command(RCON_PLAYERTIMEOUT)[0]
return parse_server_var(response)
def rcon_set_playertimeout(self, limit):
return self.send_rcon_command(RCON_PLAYERTIMEOUT, args=(limit,), fetch_response=False)
def rcon_get_language(self):
response = self.send_rcon_command(RCON_LANGUAGE)[0]
return parse_server_var(response)
def rcon_set_language(self, limit):
return self.send_rcon_command(RCON_LANGUAGE, args=(limit,), fetch_response=False)
|
/samp_python-1.0.0-py3-none-any.whl/samp_py/client.py
| 0.538012 | 0.162314 |
client.py
|
pypi
|
import re
from samp_py.constants import ENCODING
from samp_py.models import ServerVar
VAR_PATTERN = re.compile(r'\s*'.join((
r'(?P<name>\w+)',
r'=',
r'(?P<value>.+?)',
r'\((?P<type>string|int|bool|float)\)',
r'(?P<read_only>\(read-only\))?',
)))
VAR_TYPES = {
'int': int,
'bool': bool,
'string': str,
'float': float,
}
def encode_bytes(*args):
"""
Encodes values into a byte string
bytestrings are left as-is
integer values are encoded into their char values
:return: bytestring representing all arguments joined together
"""
result = b''
for arg in args:
if isinstance(arg, bytes):
result += arg
elif isinstance(arg, str):
result += bytes(arg, ENCODING)
elif isinstance(arg, int):
result += bytes([arg])
return result
def decode_int(data):
""" Decodes integer from byte string """
assert isinstance(data, bytes)
return sum(c << (8 * n) for n, c in enumerate(data))
def decode_string(string, len_pos, len_bytes=4):
"""
Decodes string from a string
:param string: bytestring with the response
:param len_pos: position of the integer expressing the length
:param len_bytes: number of bytes used for string length
:return: substring of string starting at len_pos + len_bytes and ending at position indicated in length value at position len_pos
"""
assert isinstance(len_pos, int)
len_end = len_pos + len_bytes
length = decode_int(string[len_pos:len_end])
return string[len_end:len_end + length].decode(ENCODING)
def build_rcon_command(command, args=None):
"""
Appends args to the command
:param command: the command string
:param args: list of arguments or a single argument (bool, string or int)
"""
if isinstance(command, str):
command = command.encode(ENCODING)
if args is not None:
# Bool check must come first because bool extends int
if isinstance(args, bool):
args = int(args),
elif isinstance(args, (str, bytes, int, float)):
args = args,
if len(args):
args = map(str, args)
command += b' ' + b' '.join(bytes(arg, ENCODING) for arg in args)
return command
def parse_server_var(variable):
""" Parses server variable string into a ServerVar named tuple"""
matches = VAR_PATTERN.match(variable)
if matches:
groups = matches.groupdict()
val_type = VAR_TYPES[groups['type']]
# Strip surrounding whitespace and quotations from value
value = groups['value']
if val_type == bool:
# pre-parse boolean to int as it will be returned as 0 or 1 string
value = int(value)
elif val_type == str:
# strip surrounding quotations from string value
value = value.strip('"')
return ServerVar(
name=groups['name'],
value=val_type(value),
read_only=bool(groups['read_only']),
)
else:
raise ValueError('Failed to parse {}'.format(variable))
|
/samp_python-1.0.0-py3-none-any.whl/samp_py/utils.py
| 0.584153 | 0.486636 |
utils.py
|
pypi
|
from __future__ import annotations
import struct
import random
from dataclasses import dataclass, field
import cchardet as chardet # type: ignore
import trio
# Assuming ratio between max and min ping can't be higher than this
MAX_LATENCY_VARIANCE = 5
def encode_codepage(string: str) -> bytes:
"""
Encode the given string into bytes using the first possible codepage.
:param str string: The string to encode.
:return: The encoded bytes.
:rtype: bytes
:raises UnicodeEncodeError: If no suitable codepage is found.
"""
for codepage in range(1250, 1259):
try:
return string.encode(f'cp{codepage}')
except UnicodeEncodeError:
continue
raise UnicodeEncodeError(
'cp1250-1258',
string,
0,
len(string),
'Unable to find a suitable codepage',
)
def pack_string(string: str, len_type: str) -> bytes:
"""
Pack a string into bytes with a length prefix.
:param str string: The string to pack.
:param str len_type: The format specifier for the length prefix.
:return: The packed bytes.
:rtype: bytes
"""
format = f'<{len_type}'
return struct.pack(format, len(string)) + encode_codepage(string)
def unpack_string(data: bytes, len_type: str) -> tuple[str, bytes]:
"""
Unpack a string from bytes with a length prefix.
:param bytes data: The data to unpack.
:param str len_type: The format specifier for the length prefix.
:return: The unpacked string and the remaining data.
:rtype: tuple[str, bytes]
"""
format = f'<{len_type}'
size = struct.calcsize(format)
str_len, data = *struct.unpack_from(format, data), data[size:]
string, data = data[:str_len], data[str_len:]
encoding = chardet.detect(string)['encoding'] or 'ascii'
return string.decode(encoding), data
class MissingRCONPassword(Exception):
"""Raised when no RCON password was provided."""
class InvalidRCONPassword(Exception):
"""Raised when an invalid RCON password is provided."""
class RCONDisabled(Exception):
"""Raised when RCON is disabled on the server or did not respond."""
@dataclass
class ServerInfo:
"""
Represents server information.
:param str name: The name of the server.
:param bool password: Indicates if the server requires a password to join.
:param int players: The number of players on the server.
:param int max_players:
The maximum number of players allowed on the server.
:param str gamemode: The current gamemode of the server.
:param str language: The language used by the server.
"""
name: str
password: bool
players: int
max_players: int
gamemode: str
language: str
@classmethod
def from_data(cls, data: bytes) -> ServerInfo:
"""
Create a ServerInfo object from the given raw data.
:param bytes data: The data to create the ServerInfo object from.
:return: The created ServerInfo object.
:rtype: ServerInfo
"""
password, players, max_players = struct.unpack_from('<?HH', data)
data = data[5:] # _Bool + short + short, see above
name, data = unpack_string(data, 'I')
gamemode, data = unpack_string(data, 'I')
language, data = unpack_string(data, 'I')
assert not data # We consumed all the buffer
return cls(
name=name,
password=password,
players=players,
max_players=max_players,
gamemode=gamemode,
language=language,
)
@dataclass
class PlayerInfo:
"""
Represents player information.
:param str name: The name of the player.
:param int score: The score of the player.
"""
name: str
score: int
@classmethod
def from_data(cls, data: bytes) -> tuple[PlayerInfo, bytes]:
"""
Create a PlayerInfo object from the given raw data.
:param bytes data: The data to create the PlayerInfo object from.
:return: The created PlayerInfo object and the remaining data.
:rtype: tuple[PlayerInfo, bytes]
"""
name, data = unpack_string(data, 'B')
score = struct.unpack_from('<i', data)[0]
data = data[4:] # int, see above
return cls(
name=name,
score=score,
), data
@dataclass
class PlayerList:
"""
Represents a list of players.
:param list[PlayerInfo] players: The list of players.
"""
players: list[PlayerInfo]
@classmethod
def from_data(cls, data: bytes) -> PlayerList:
"""
Create a PlayerList object from the given raw data.
:param bytes data: The data to create the PlayerList object from.
:return: The created PlayerList object.
:rtype: PlayerList
"""
player_count = struct.unpack_from('<H', data)[0]
data = data[2:] # short, see above
players = []
for _ in range(player_count):
player, data = PlayerInfo.from_data(data)
players.append(player)
assert not data # We consumed all the buffer
return cls(players=players)
@dataclass
class Rule:
"""
Represents a server rule.
:param str name: The name of the rule.
:param str value: The value of the rule.
"""
name: str
value: str
@classmethod
def from_data(cls, data: bytes) -> tuple[Rule, bytes]:
"""
Create a Rule object from the given raw data.
:param bytes data: The data to create the Rule object from.
:return: The created Rule object and the remaining data.
:rtype: tuple[Rule, bytes]
"""
name, data = unpack_string(data, 'B')
value, data = unpack_string(data, 'B')
return cls(
name=name,
value=value,
), data
@dataclass
class RuleList:
"""
Represents a list of server rules.
:param list[Rule] rules: The list of rules.
"""
rules: list[Rule]
@classmethod
def from_data(cls, data: bytes) -> RuleList:
"""
Create a RuleList object from the given raw data.
:param bytes data: The data to create the RuleList object from.
:return: The created RuleList object.
:rtype: RuleList
"""
rule_count = struct.unpack_from('<H', data)[0]
data = data[2:] # short, see above
rules = []
for _ in range(rule_count):
rule, data = Rule.from_data(data)
rules.append(rule)
assert not data # We consumed all the buffer
return cls(rules=rules)
@dataclass
class Client:
"""
Main query client class to interact with a given game server.
:param str ip: The IP address of the server.
:param int port: The port number of the server.
:param str | None rcon_password:
The RCON password for the server (optional).
"""
ip: str
port: int
rcon_password: str | None = field(default=None, repr=False)
prefix: bytes | None = field(default=None, repr=False)
_socket: trio.socket.SocketType | None = field(default=None, repr=False)
async def connect(self) -> None:
"""Connect to the server (called automatically)."""
family, type, proto, _, (ip, *_) = (await trio.socket.getaddrinfo(
self.ip,
self.port,
family=trio.socket.AF_INET,
proto=trio.socket.IPPROTO_UDP,
))[0]
self.ip = ip
self._socket = socket = trio.socket.socket(family, type, proto)
await socket.connect((self.ip, self.port))
self.prefix = (
b'SAMP'
+ trio.socket.inet_aton(self.ip)
+ self.port.to_bytes(2, 'little')
)
async def send(self, opcode: bytes, payload: bytes = b'') -> None:
"""
Send a query message to the server.
:param bytes opcode: The opcode of the message.
:param bytes payload: The payload of the message (optional).
"""
if not self._socket:
await self.connect()
assert self._socket and self.prefix
await self._socket.send(self.prefix + opcode + payload)
async def receive(self, header: bytes = b'') -> bytes:
"""
Receive a query response from the server.
:param bytes header: The expected header of the response (optional).
:return: The received response.
:rtype: bytes
"""
assert self._socket
while True:
data = await self._socket.recv(4096)
if data.startswith(header):
return data[len(header):]
async def ping(self) -> float:
"""
Send a ping request to the server and measure the round-trip time.
:return: The round-trip time in seconds.
:rtype: float
"""
payload = random.getrandbits(32).to_bytes(4, 'little')
start_time = trio.current_time()
await self.send(b'p', payload)
assert self.prefix
data = await self.receive(header=self.prefix + b'p' + payload)
assert not data # No data beyond expected header
return trio.current_time() - start_time
async def is_omp(self) -> bool:
"""
Check if the server uses open.mp.
:return: True if the server uses open.mp, False otherwise.
:rtype: bool
"""
ping = await self.ping()
payload = random.getrandbits(32).to_bytes(4, 'little')
with trio.move_on_after(MAX_LATENCY_VARIANCE * ping):
await self.send(b'o', payload)
assert self.prefix
data = await self.receive(header=self.prefix + b'o' + payload)
assert not data # No data beyond expected header
return True
return False
async def info(self) -> ServerInfo:
"""
Retrieve server information.
:return: The server information.
:rtype: ServerInfo
"""
await self.send(b'i')
assert self.prefix
data = await self.receive(header=self.prefix + b'i')
return ServerInfo.from_data(data)
async def players(self) -> PlayerList:
"""
Retrieve the list of players on the server.
:return: The list of players.
:rtype: PlayerList
"""
await self.send(b'c')
assert self.prefix
data = await self.receive(header=self.prefix + b'c')
return PlayerList.from_data(data)
async def rules(self) -> RuleList:
"""
Retrieve the list of server rules.
:return: The list of rules.
:rtype: RuleList
"""
await self.send(b'r')
assert self.prefix
data = await self.receive(header=self.prefix + b'r')
return RuleList.from_data(data)
async def rcon(self, command: str) -> str:
"""
Execute a RCON command on the server.
:param str command: The RCON command to execute.
:return: The response from the server.
:rtype: str
:raises MissingRCONPassword: If the RCON password is missing.
:raises InvalidRCONPassword: If an invalid RCON password is provided.
:raises RCONDisabled:
If RCON is disabled on the server or no response was received.
"""
if not self.rcon_password:
raise MissingRCONPassword()
ping = await self.ping()
payload = (
pack_string(self.rcon_password, 'H')
+ pack_string(command, 'H')
)
await self.send(b'x', payload)
assert self.prefix
response = ''
with trio.move_on_after(MAX_LATENCY_VARIANCE * ping) as cancel_scope:
while True:
start_time = trio.current_time()
data = await self.receive(header=self.prefix + b'x')
receive_duration = trio.current_time() - start_time
line, data = unpack_string(data, 'H')
assert not data
response += line + '\n'
cancel_scope.deadline += receive_duration
if not response:
raise RCONDisabled()
if response == 'Invalid RCON password.\n':
raise InvalidRCONPassword()
return response[:-1] # Strip trailing newline
|
/samp_query-0.5.0.tar.gz/samp_query-0.5.0/samp_query/__init__.py
| 0.887917 | 0.537952 |
__init__.py
|
pypi
|
Sample
======
``sample`` is a command-line tool for sampling data from a large,
newline-separated dataset (typically a CSV-like file).
Installation
------------
``sample`` is distributed with ``pip``. Once you've installed ``pip``,
simply run::
> pip install sample-cli
and sample will be installed into your Python environment.
Usage
-----
``sample`` requires one argument, the input file. If the input file
is ``-``, data will be read from standard input (in this case, only
the reservoir and approximate algorithms can be used).
Simple Example
**************
To take a sample of size 1000 from the file ``big_data.csv``,
run ``sample`` as follows::
> sample -n 1000 big_data.csv
This will print 1000 random lines from the file to the terminal.
File Redirection
****************
Usually we want to save the sample to another file instead.
``sample`` doesn't have file output built-in; instead it relies
on the output redirection features of your terminal. To save
to ``big_data_sample.csv``, run the following command::
> sample -n 1000 big_data.csv > big_data_sample.csv
Header Rows
***********
CSV files often have a header row with the column names. You can pass
the ``-r`` flag to ``sample`` to preserve the header row::
> sample -n 1000 big_data.csv -r > big_data_sample.csv
Rarely, you may need to sample from a file with a header spanning
multiple rows. The ``-r`` argument takes an optional number of
rows to preserve as a header::
> sample -n 1000 -r 3 data_with_header.csv > sample_with_header.csv
Note that if the ``-r`` argument is directly before the input filename,
it must have an argument or else it will try to interpret the input
filename as the number of header rows and fail. Putting the ``-r`` argument
after the input filename will avoid this.
Random Seed
***********
The output of ``sample`` is random and depend on the computer's random
state. Sometimes you may want to take a sample in a way that can be
reproduced. You can pass a random seed to ``sample`` with the ``-s`` flag
to accomplish this::
> sample -s 45906345 data_file.csv > reproducable_sample.csv
Sampling Algorithms
-------------------
Algorithm Comparison
********************
``sample`` implements three sampling algorithms, each with their own strengths
and weaknesses.
+------------------------+----------------+----------------+------------+
| | Reservoir | Approximate | Two-pass |
+========================+================+================+============+
| Flag | ``--res`` | ``--app`` | ``--tp`` |
+------------------------+----------------+----------------+------------+
| ``stdin``-compatible | yes | yes | no |
+------------------------+----------------+----------------+------------+
| space complexity | ``O(ss * rs)`` | ``O(1)`` | ``O(ss)`` |
+------------------------+----------------+----------------+------------+
| fixed sample size | compatible | not compatible | compatible |
+------------------------+----------------+----------------+------------+
| fractional sample size | not compatible | compatible | compatible |
+------------------------+----------------+----------------+------------+
For space complexity, `ss` is the number of records in the sample and `rs` is the maximum size of a record.
Reservoir Sampling
******************
Reservoir sampling (`Random Sampling with a Reservoir (Vitter 85) <http://www.mathcs.emory.edu/~cheung/papers/StreamDB/RandomSampling/1985-Vitter-Random-sampling-with-reservior.pdf>`__)
is a method of sampling from a stream of unknown size where the sample size is
fixed in advance. It is a one-pass algorithm and uses space proportional to the
amount of data in the sample.
Reservoir sampling is the default algorithm used by ``sample``. For consistency,
it can also be invoked with the argument ``--reservoir``.
If reservoir sampling, the sample size must be fixed rather than fractional.
Example::
> sample --reservoir -n 1000 big_data.csv > sample_data.csv
Approximate Sampling
********************
Approximate sampling simply includes each row in the sample with a probability
given as the sample proportion. It is a stateless algorithm with minimal space
requirements. Samples will have on average a size of ``fraction * population_size``,
but it will vary between each invocation. Because of this, approximate sampling
is only useful when the sample size does not have to be exact (hence the name).
Example::
> sample --approximate -f 0.15 my_data.csv > my_sample.csv
Equivalently, supply a percentage instead of a fraction by switching the
``-f`` to a ``-p``::
> sample --approximate -p 15 my_data.csv > my_sample.csv
Two-Pass Sampling
*****************
Two-pass sampling is allowed two passes, first to count the number of records
(ie. the population size) and second to emit the records which are part of the
sample. Because of this it is not compatible with ``stdin`` as an input.
As two-pass sampling knows the population size, it will accept the sample size
as either a fraction or a fixed number of elements.
Example::
> sample --two-pass -p 15 my_data.csv > my_sample.csv
Two-pass sampling uses memory proportional to the number of elements in the sample.
|
/sample-cli-0.0.1.tar.gz/sample-cli-0.0.1/README.rst
| 0.96859 | 0.705075 |
README.rst
|
pypi
|
import argparse
from sys import stderr
from itertools import chain
from algorithms import reservoir_sample, approximate_sample, two_pass_sample
from file_input import FileInput
DEFAULT_FRACTION = 0.01
DEFAULT_SAMPLE_SIZE = 100
PERCENT = 100
def main():
parser = argparse.ArgumentParser(prog='sample', description=__doc__)
parser.add_argument('input_file', default='-',
help='csv, tsv, or other newline-separated data file')
parser.add_argument('--seed', '-s', type=int, default=None,
help='random number generator seed, for reproducable results')
parser.add_argument('--header-rows', '-r', type=int, nargs='?', const=1, default=0,
help='number of header rows to preserve in sample')
parser.add_argument('--percent', '-p', type=float, default=None,
help='specify sample size as a percent of total')
parser.add_argument('--fraction', '-f', type=float, default=None,
help='specify sample size as a fraction of total')
parser.add_argument('--sample-size', '-n', type=int, default=None,
help='specify number of samples directy')
parser.add_argument('--approximate', '-app', action='store_true',
default=False,
help='Use approximate algorithm. Requires sample size to be specified '+
'as a percent or fraction. One-pass and constant space, but sample '+
'size is not guaranteed to be exact.')
parser.add_argument('--reservoir', '-res', action='store_true',
help='Use one-pass reservoir sampling algorithm. Sample size must be fixed. '+
'Sample must fit in memory. Used by default if no '+
'other algorithm is specified.')
parser.add_argument('--two-pass', '-tp', action='store_true',
default=False,
help='Use two-pass sampling algorithm. List of indices to sample must fit '+
'in memory.')
args = parser.parse_args()
if (not args.two_pass) and (not args.approximate):
args.reservoir = True
if args.two_pass and args.input_file == '-':
print >> stderr, ('The two-pass algorithm does not support standard input. '
'Use another algorithm or save to a file first.')
exit(1)
if args.percent is not None and args.fraction is not None:
print >> stderr, 'If percent is specified, fraction must not be'
exit(1)
if args.percent is not None:
args.fraction = args.percent / 100
if (args.fraction is not None) and (args.sample_size is not None):
print >> stderr, 'If sample size is specified, percent and fraction must not be.'
exit(1)
if (args.fraction is not None) and args.reservoir:
print >> stderr, ('percent and fraction cannot be used with reservoir algorithm; '
'use sample size instead.')
exit(1)
if (args.sample_size is not None) and args.approximate:
print >> stderr, ('sample size cannot be given with the approximate algorithm; '
'use fraction or percent instead.')
exit(1)
fi = FileInput(args.input_file, args.header_rows)
if args.seed is not None:
random.seed(args.seed)
if args.approximate:
if args.fraction is None:
args.fraction = DEFAULT_FRACTION
sample = approximate_sample(fi.get_input(), args.fraction)
elif args.two_pass:
if args.fraction:
sample = two_pass_sample(fi.get_input, fraction=args.fraction)
else:
if args.sample_size is None:
args.sample_size = DEFAULT_SAMPLE_SIZE
sample = two_pass_sample(fi.get_input, sample_size=args.sample_size)
else:
if args.sample_size is None:
args.sample_size = DEFAULT_SAMPLE_SIZE
sample = reservoir_sample(fi.get_input(), args.sample_size)
for line in chain(fi.header, sample):
print line,
if __name__ == '__main__':
main()
|
/sample-cli-0.0.1.tar.gz/sample-cli-0.0.1/sample/main.py
| 0.448426 | 0.157266 |
main.py
|
pypi
|
import os
import random
from typing import Annotated
import pandas as pd
import typer
from rich.console import Console
from rich.filesize import decimal as filesize_decimal
from rich.progress import Progress, SpinnerColumn, TextColumn, open
from rich.status import Status
from rich.table import Table
app = typer.Typer(add_completion=False)
console = Console()
input_help = "Path to the input CSV file."
pctg_help = "Percentage of data to sample. The value should be between 0.0 and 1.0."
@app.command()
def sample_csv(
input_path: Annotated[str, typer.Argument(help=input_help)],
percentage: Annotated[float, typer.Argument(help=pctg_help)] = 0.1,
):
"""
A minimal CLI tool for sampling data from large CSV files.
"""
console = Console()
# Check if the input file exists
if not os.path.exists(input_path):
raise typer.BadParameter(f"The file {input_path} does not exist.")
# Autogenerate output path
file_name = os.path.basename(input_path)
name, ext = os.path.splitext(file_name)
percentage_str = f"{percentage*100:.2g}"
sampled_file_name = f"{name}_sampled_{percentage_str}{ext}"
output_path = os.path.join(os.path.dirname(input_path), sampled_file_name)
console.print(
f"\nSampling [yellow]{percentage_str}%[/yellow] of rows into [magenta]{output_path}[/magenta]\n"
)
# Count the number of lines in the input file
with open(input_path, "rb", description="") as f:
num_lines = sum(1 for _ in f)
# Sample the lines
console.print()
with Status("Sampling", console=console):
skip = int(num_lines * (1 - percentage))
skip_ids = sorted(random.sample(range(1, num_lines + 1), skip)) # 0-indexed
df = pd.read_csv(input_path, skiprows=skip_ids)
# Save to a new file
if os.path.exists(output_path):
typer.confirm(
f"The file {output_path} already exists.\nDo you want to overwrite it?",
abort=True,
default=True,
)
console.print()
with Status("Writing to new file", console=console):
df.to_csv(output_path, index=False)
# Print the summary in a nice table
table = Table(title="")
table.add_column("Original")
table.add_column("Sampled")
table.add_row(input_path, output_path, style="magenta")
table.add_row(
filesize_decimal(os.path.getsize(input_path)),
filesize_decimal(os.path.getsize(output_path)),
style="green",
)
table.add_row(
f"{num_lines} rows",
f"{df.shape[0]} rows",
style="yellow",
)
console.print(table, end="\n\n")
|
/sample_csv-1.0.1-py3-none-any.whl/sample_csv/main.py
| 0.532425 | 0.206534 |
main.py
|
pypi
|
from random import randrange, choice, shuffle
from netaddr import *
from sample_data_utils.numeric import hexnum
NOT_NET = [10, 127, 172, 192, 169]
PRIVATE_NET = ['10.0.0.0/8', '172.16.0.0/12', '192.168.0.0/16']
DEDICATED_NET = ['100.64.0.0/10', ]
LOCAL_NET = ['169.254.0.0/16', '127.0.0.0/8']
NO_PUBLIC = PRIVATE_NET + DEDICATED_NET + LOCAL_NET + ['192.0.2.0/24', '192.88.99.0/24', '198.18.0.0/15']
def ipaddress(not_valid=None):
"""
returns a string representing a random ip address
:param not_valid: if passed must be a list of integers representing valid class A netoworks that must be ignored
"""
not_valid_class_A = not_valid or []
class_a = [r for r in range(1, 256) if r not in not_valid_class_A]
shuffle(class_a)
first = class_a.pop()
return ".".join([str(first), str(randrange(1, 256)),
str(randrange(1, 256)), str(randrange(1, 256))])
def ip(private=True, public=True, max_attempts=10000):
"""
returns a :class:`netaddr.IPAddress` instance with a random value
:param private: if False does not return private networks
:param public: if False does not return public networks
:param max_attempts:
"""
if not (private or public):
raise ValueError('Cannot disable both `private` and `public` network')
if private != public:
if private:
is_valid = lambda address: address.is_private()
not_valid = [n for n in range(1, 255) if n not in NOT_NET]
else:
is_valid = lambda address: not address.is_private()
not_valid = NOT_NET
attempt = 0
while attempt < max_attempts:
attempt += 1
ip = IPAddress(ipaddress(not_valid))
if is_valid(ip):
return ip
else:
return IPAddress(ipaddress())
def ipv4():
return IPAddress(ipaddress()).ipv4()
def ipv6():
return IPAddress(ipaddress()).ipv6()
def node(network):
return str(choice(list(IPNetwork(network))))
def mac_address(vendors=True):
manufactures = ('00:24:e8:', # Dell Inc.
'00:21:6a:', # intel
'08:00:27:', # cadmus computer systems
)
if vendors:
return choice(manufactures) + ":".join([hexnum(2) for i in range(3)])
else:
return ":".join([hexnum(2) for i in range(6)])
|
/sample-data-utils-0.5.tar.gz/sample-data-utils-0.5/sample_data_utils/net.py
| 0.737631 | 0.250569 |
net.py
|
pypi
|
import decimal
from random import choice, uniform
import sys
def amount(min=1, max=sys.maxsize, decimal_places=2):
"""
return a random floating number
:param min: minimum value
:param max: maximum value
:param decimal_places: decimal places
:return:
"""
q = '.%s1' % '0' * (decimal_places - 1)
return decimal.Decimal(uniform(min, max)).quantize(decimal.Decimal(q))
def currency():
"""
returns a random ISO 4217 currency code
"""
codes = ['AFN', 'ALL', 'DZD', 'USD', 'EUR', 'AOA', 'XCD', 'XCD', 'XCD', 'ARS', 'AMD', 'AWG', 'AUD', 'EUR', 'AZN',
'BSD', 'BHD', 'BDT', 'BBD', 'BYR', 'EUR', 'BZD', 'XOF', 'BMD', 'BTN', 'BOB', 'BAM', 'BWP', 'NOK', 'BRL',
'USD', 'BND', 'BGN', 'XOF', 'BIF', 'KHR', 'XAF', 'CAD', 'CVE', 'KYD', 'XAF', 'XAF', 'CLP', 'CNY', 'AUD',
'AUD', 'COP', 'KMF', 'XAF', 'CDF', 'NZD', 'CRC', 'HRK', 'CUP', 'EUR', 'CZK', 'DKK', 'DJF', 'XCD', 'DOP',
'ECS', 'EGP', 'SVC', 'XAF', 'ERN', 'EUR', 'ETB', 'EUR', 'FKP', 'DKK', 'FJD', 'EUR', 'EUR', 'EUR', 'EUR',
'XAF', 'GMD', 'GEL', 'EUR', 'GHS', 'GIP', 'GBP', 'EUR', 'DKK', 'XCD', 'EUR', 'USD', 'QTQ', 'GGP', 'GNF',
'GWP', 'GYD', 'HTG', 'AUD', 'HNL', 'HKD', 'HUF', 'ISK', 'INR', 'IDR', 'IRR', 'IQD', 'EUR', 'GBP', 'ILS',
'EUR', 'XOF', 'JMD', 'JPY', 'GBP', 'JOD', 'KZT', 'KES', 'AUD', 'KPW', 'KRW', 'KWD', 'KGS', 'LAK', 'LVL',
'LBP', 'LSL', 'LRD', 'LYD', 'CHF', 'LTL', 'EUR', 'MOP', 'MKD', 'MGF', 'MWK', 'MYR', 'MVR', 'XOF', 'EUR',
'USD', 'EUR', 'MRO', 'MUR', 'EUR', 'MXN', 'USD', 'MDL', 'EUR', 'MNT', 'EUR', 'XCD', 'MAD', 'MZN', 'MMK',
'NAD', 'AUD', 'NPR', 'EUR', 'ANG', 'XPF', 'NZD', 'NIO', 'XOF', 'NGN', 'NZD', 'AUD', 'USD', 'NOK', 'OMR',
'PKR', 'USD', 'PAB', 'PGK', 'PYG', 'PEN', 'PHP', 'NZD', 'PLN', 'XPF', 'EUR', 'USD', 'QAR', 'EUR', 'RON',
'RUB', 'RWF', 'SHP', 'XCD', 'XCD', 'EUR', 'XCD', 'WST', 'EUR', 'STD', 'SAR', 'XOF', 'RSD', 'SCR', 'SLL',
'SGD', 'EUR', 'EUR', 'SBD', 'SOS', 'ZAR', 'GBP', 'SSP', 'EUR', 'LKR', 'SDG', 'SRD', 'NOK', 'SZL', 'SEK',
'CHF', 'SYP', 'TWD', 'TJS', 'TZS', 'THB', 'XOF', 'NZD', 'TOP', 'TTD', 'TND', 'TRY', 'TMT', 'USD', 'AUD',
'GBP', 'UGX', 'UAH', 'AED', 'UYU', 'USD', 'USD', 'UZS', 'VUV', 'EUR', 'VEF', 'VND', 'USD', 'USD', 'XPF',
'MAD', 'YER', 'ZMW', 'ZWD']
return choice(codes)
|
/sample-data-utils-0.5.tar.gz/sample-data-utils-0.5/sample_data_utils/money.py
| 0.506347 | 0.382343 |
money.py
|
pypi
|
import random
from sample_data_utils.storage import load_data
from sample_data_utils.text import rtext
from sample_data_utils.utils import memoize
GENDER_MALE = 'm'
GENDER_FEMALE = 'f'
@memoize
def _get_firstnames(language, gender):
fn = load_data(language, "firstnames_%s.txt" % gender)
return fn
@memoize
def _get_lastnames(language):
fn = load_data(language, "lastnames.txt")
return fn
@memoize
def _get_titles(languages=None):
choices = []
languages = languages or ['en']
for lang in languages:
fn = load_data(lang, "title.txt")
choices.extend([line.split('/') for line in fn])
return choices
def title(languages=None, genders=None):
"""
returns a random title
.. code-block:: python
>>> d.title()
u'Mrs.'
>>> d.title(['es'])
u'El Sr.'
>>> d.title(None, [GENDER_FEMALE])
u'Mrs.'
:param languages: list of allowed languages. ['en'] if None
:param genders: list of allowed genders. (GENDER_FEMALE, GENDER_MALE) if None
"""
languages = languages or ['en']
genders = genders or (GENDER_FEMALE, GENDER_MALE)
choices = _get_titles(languages)
gender = {'m':0, 'f':1}[random.choice(genders)]
return random.choice(choices)[gender]
def gender():
"""
randomly returns 'm' or 'f'
"""
return random.choice((GENDER_FEMALE, GENDER_MALE))
def person(languages=None, genders=None):
"""
returns a random tuple representing person information
.. code-block:: python
>>> d.person()
(u'Derren', u'Powell', 'm')
>>> d.person(genders=['f'])
(u'Marge', u'Rodriguez', u'Mrs.', 'f')
>>> d.person(['es'],['m'])
(u'Jacinto', u'Delgado', u'El Sr.', 'm')
:param language:
:param genders:
"""
languages = languages or ['en']
genders = genders or (GENDER_FEMALE, GENDER_MALE)
lang = random.choice(languages)
g = random.choice(genders)
t = title([lang], [g])
return first_name([lang], [g]), last_name([lang]), t, g
def name():
return rtext(30, 10).capitalize()
def fullname():
return "%s %s" % (name(), name())
def first_name(languages=None, genders=None):
"""
return a random first name
:return:
>>> from mock import patch
>>> with patch('%s._get_firstnamess' % __name__, lambda *args: ['aaa']):
... first_name()
'Aaa'
"""
choices = []
languages = languages or ['en']
genders = genders or [GENDER_MALE, GENDER_FEMALE]
for lang in languages:
for gender in genders:
samples = _get_firstnames(lang, gender)
choices.extend(samples)
return random.choice(choices).title()
def last_name(languages=None):
"""
return a random last name
>>> from mock import patch
>>> with patch('%s._get_lastnames' % __name__, lambda *args: ['aaa']):
... last_name()
'Aaa'
>>> with patch('%s.get_lastnames' % __name__, lambda lang: ['%s_lastname'% lang]):
... last_name(['it'])
'It_Lastname'
"""
choices = []
languages = languages or ['en']
for lang in languages:
samples = _get_lastnames(lang)
choices.extend(samples)
return random.choice(choices).title()
|
/sample-data-utils-0.5.tar.gz/sample-data-utils-0.5/sample_data_utils/people.py
| 0.577138 | 0.173796 |
people.py
|
pypi
|
from functools import wraps
from sample_data_utils.exception import MaxAttemptException
def infinite():
"""
auto inc generator
"""
i = 0
while 1:
yield i
i += 1
_sequence_counters = {}
def sequence(prefix, cache=None):
"""
generator that returns an unique string
:param prefix: prefix of string
:param cache: cache used to store the last used number
>>> next(sequence('abc'))
'abc-0'
>>> next(sequence('abc'))
'abc-1'
"""
if cache is None:
cache = _sequence_counters
if cache == -1:
cache = {}
if prefix not in cache:
cache[prefix] = infinite()
while cache[prefix]:
yield "{0}-{1}".format(prefix, next(cache[prefix]))
def _get_memoized_value(func, args, kwargs):
"""Used internally by memoize decorator to get/store function results"""
key = (repr(args), repr(kwargs))
if not key in func._cache_dict:
ret = func(*args, **kwargs)
func._cache_dict[key] = ret
return func._cache_dict[key]
def memoize(func):
"""Decorator that stores function results in a dictionary to be used on the
next time that the same arguments were informed."""
func._cache_dict = {}
@wraps(func)
def _inner(*args, **kwargs):
return _get_memoized_value(func, args, kwargs)
return _inner
_cache_unique = {}
def unique(func, num_args=0, max_attempts=100, cache=None):
"""
wraps a function so that produce unique results
:param func:
:param num_args:
>>> import random
>>> choices = [1,2]
>>> a = unique(random.choice, 1)
>>> a,b = a(choices), a(choices)
>>> a == b
False
"""
if cache is None:
cache = _cache_unique
@wraps(func)
def wrapper(*args):
key = "%s_%s" % (str(func.__name__), str(args[:num_args]))
attempt = 0
while attempt < max_attempts:
attempt += 1
drawn = cache.get(key, [])
result = func(*args)
if result not in drawn:
drawn.append(result)
cache[key] = drawn
return result
raise MaxAttemptException()
return wrapper
|
/sample-data-utils-0.5.tar.gz/sample-data-utils-0.5/sample_data_utils/utils.py
| 0.877082 | 0.26479 |
utils.py
|
pypi
|
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Gaussian(Distribution):
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu=0, sigma=1):
Distribution.__init__(self, mu, sigma)
def calculate_mean(self):
"""Function to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Function to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.calculate_mean()
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def plot_histogram(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Function to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Function to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev)
|
/sample_distributions-0.1.tar.gz/sample_distributions-0.1/sample_distributions/Gaussiandistribution.py
| 0.688364 | 0.853058 |
Gaussiandistribution.py
|
pypi
|
import os
import re
from collections import OrderedDict
from os.path import join
import numpy as np
from sample_factory.utils.utils import log
class ParamGenerator:
def __init__(self):
pass
def generate_params(self, randomize=True):
"""Supposed to be a generator (so should yield dicts of parameters)."""
pass
class ParamList(ParamGenerator):
"""The most simple kind of generator, represents just the list of parameter combinations."""
def __init__(self, combinations):
super(ParamList, self).__init__()
self.combinations = combinations
def generate_params(self, randomize=True):
if randomize:
combinations = np.random.permutation(self.combinations)
else:
combinations = self.combinations
for combination in combinations:
yield combination
class ParamGrid(ParamGenerator):
"""Parameter generator for grid search."""
def __init__(self, grid_tuples):
"""Uses OrderedDict, so must be initialized with the list of tuples if you want to preserve order."""
super(ParamGrid, self).__init__()
self.grid = OrderedDict(grid_tuples)
def _generate_combinations(self, param_idx, params):
"""Recursively generate all parameter combinations in a grid."""
if param_idx == len(self.grid) - 1:
# last parameter, just return list of values for this parameter
return [[value] for value in self.grid[params[param_idx]]]
else:
subcombinations = self._generate_combinations(param_idx + 1, params) # returns list of param combinations
result = []
# iterate over all values of current parameter
for value in self.grid[params[param_idx]]:
for subcombination in subcombinations:
result.append([value] + subcombination)
return result
def generate_params(self, randomize=False):
if len(self.grid) == 0:
return dict()
# start with 0th value for every parameter
total_num_combinations = np.prod([len(p_values) for p_values in self.grid.values()])
param_names = tuple(self.grid.keys())
all_combinations = self._generate_combinations(0, param_names)
assert len(all_combinations) == total_num_combinations
if randomize:
all_combinations = np.random.permutation(all_combinations)
for combination in all_combinations:
combination_dict = dict()
for i, param_name in enumerate(param_names):
if isinstance(param_name, (list, tuple)):
for j, param in enumerate(param_name):
combination_dict[param] = combination[i][j]
else:
combination_dict[param_name] = combination[i]
yield combination_dict
class Experiment:
def __init__(self, name, cmd, param_generator=(), env_vars=None):
"""
:param cmd: base command to append the parameters to
:param param_generator: iterable of parameter dicts
"""
self.base_name = name
self.cmd = cmd
self.params = list(param_generator)
self.env_vars = env_vars
def generate_experiments(self, experiment_arg_name, customize_experiment_name, param_prefix):
"""Yields tuples of (cmd, experiment_name)"""
num_experiments = 1 if len(self.params) == 0 else len(self.params)
for experiment_idx in range(num_experiments):
cmd_tokens = [self.cmd]
experiment_name_tokens = [self.base_name]
# abbreviations for parameter names that we've used
param_shorthands = []
if len(self.params) > 0:
params = self.params[experiment_idx]
for param, value in params.items():
param_str = f"{param_prefix}{param}={value}"
cmd_tokens.append(param_str)
param_tokens = re.split("[._-]", param)
shorthand_tokens = [t[0] for t in param_tokens[:-1]]
last_token_l = min(3, len(param_tokens[-1]))
shorthand = ".".join(shorthand_tokens + [param_tokens[-1][:last_token_l]])
while last_token_l <= len(param_tokens[-1]) and shorthand in param_shorthands:
last_token_l += 1
shorthand = ".".join(shorthand_tokens + [param_tokens[-1][:last_token_l]])
param_shorthands.append(shorthand)
experiment_name_token = f"{shorthand}_{value}"
experiment_name_tokens.append(experiment_name_token)
if customize_experiment_name:
experiment_name = f"{experiment_idx:02d}_" + "_".join(experiment_name_tokens)
if len(experiment_name) > 100:
log.warning("Experiment name is extra long! (%d characters)", len(experiment_name))
else:
experiment_name = f"{experiment_idx:02d}_{self.base_name}"
cmd_tokens.append(f"{experiment_arg_name}={experiment_name}")
param_str = " ".join(cmd_tokens)
yield param_str, experiment_name
class RunDescription:
def __init__(
self,
run_name,
experiments,
experiment_arg_name="--experiment",
experiment_dir_arg_name="--train_dir",
customize_experiment_name=True,
param_prefix="--",
):
"""
:param run_name: overall name of the experiment and the name of the root folder
:param experiments: a list of Experiment objects to run
:param experiment_arg_name: CLI argument of the underlying experiment that determines it's unique name
to be generated by the launcher. Default: --experiment
:param experiment_dir_arg_name: CLI argument for the root train dir of your experiment. Default: --train_dir
:param customize_experiment_name: whether to add a hyperparameter combination to the experiment name
:param param_prefix: most experiments will use "--" prefix for each parameter, but some apps don't have this
prefix, i.e. with Hydra you should set it to empty string.
"""
self.run_name = run_name
self.experiments = experiments
self.experiment_suffix = ""
self.experiment_arg_name = experiment_arg_name
self.experiment_dir_arg_name = experiment_dir_arg_name
self.customize_experiment_name = customize_experiment_name
self.param_prefix = param_prefix
def generate_experiments(self, train_dir, makedirs=True):
"""Yields tuples (final cmd for experiment, experiment_name, root_dir)."""
for experiment in self.experiments:
root_dir = join(self.run_name, f"{experiment.base_name}_{self.experiment_suffix}")
experiment_cmds = experiment.generate_experiments(
self.experiment_arg_name, self.customize_experiment_name, self.param_prefix
)
for experiment_cmd, experiment_name in experiment_cmds:
experiment_dir = join(train_dir, root_dir)
if makedirs:
os.makedirs(experiment_dir, exist_ok=True)
experiment_cmd += f" {self.experiment_dir_arg_name}={experiment_dir}"
yield experiment_cmd, experiment_name, root_dir, experiment.env_vars
|
/sample-factory-2.1.1.tar.gz/sample-factory-2.1.1/sample_factory/launcher/run_description.py
| 0.646572 | 0.369486 |
run_description.py
|
pypi
|
import multiprocessing
import os
from argparse import ArgumentParser
from os.path import join
from sample_factory.utils.utils import str2bool
def add_basic_cli_args(p: ArgumentParser):
p.add_argument("-h", "--help", action="store_true", help="Print the help message", required=False)
p.add_argument("--algo", type=str, default="APPO", help="Algorithm to use")
p.add_argument("--env", type=str, default=None, required=True, help="Name of the environment to use")
p.add_argument(
"--experiment",
type=str,
default="default_experiment",
help="Unique experiment name. This will also be the name for the experiment folder in the train dir."
"If the experiment folder with this name aleady exists the experiment will be RESUMED!"
"Any parameters passed from command line that do not match the parameters stored in the experiment config.json file will be overridden.",
)
p.add_argument("--train_dir", default=join(os.getcwd(), "train_dir"), type=str, help="Root for all experiments")
p.add_argument(
"--restart_behavior",
default="resume",
choices=["resume", "restart", "overwrite"],
type=str,
help='How to handle the experiment if the directory with the same name already exists. "resume" (default) will resume the experiment, '
'"restart" will preserve the existing experiment folder under a different name (with "old" suffix) and will start training from scratch, '
'"overwrite" will delete the existing experiment folder and start from scratch. '
"This parameter does not have any effect if the experiment directory does not exist.",
)
p.add_argument(
"--device",
default="gpu",
type=str,
choices=["gpu", "cpu"],
help="CPU training is only recommended for smaller e.g. MLP policies",
)
p.add_argument("--seed", default=None, type=int, help="Set a fixed seed value")
def add_rl_args(p: ArgumentParser):
"""Arguments not specific to any particular RL algorithm."""
# RL training system configuration (i.e. whether sync or async, etc.)
p.add_argument(
"--num_policies",
default=1,
type=int,
help="Number of policies to train jointly, i.e. for multi-agent environments",
)
p.add_argument(
"--async_rl",
default=True,
type=str2bool,
help="Collect experience asynchronously while learning on the previous batch. "
"This is significantly different from standard synchronous actor-critic (or PPO) because "
"not all of the experience will be collected by the latest policy thus increasing policy lag. "
"Negative effects of using async_rl can range from negligible (just grants you throughput boost) "
"to quite serious where you can consider switching it off. It all depends how sensitive your experiment is to policy lag. "
"Envs with complex action spaces and RNN policies tend to be particularly sensitive. ",
)
p.add_argument(
"--serial_mode",
default=False,
type=str2bool,
help="Enable serial mode: run everything completely synchronously in the same process",
)
p.add_argument(
"--batched_sampling",
default=False,
type=str2bool,
help="Batched sampling allows the data to be processed in big batches on the rollout worker."
"This is especially important for GPU-accelerated vectorized environments such as Megaverse or IsaacGym. "
"As a downside, in batched mode we do not support (for now) some of the features, such as population-based self-play "
"or inactive agents, plus each batched sampler (rollout worker) process only collects data for a single policy. "
"Another issue between batched/non-batched sampling is handling of infos. "
"In batched mode we assume that infos is a single dictionary of lists/tensors containing info for each environment in a vector. "
"If you need some complex info dictionary handling and your environment might return dicts with different keys, "
"on different rollout steps, you probably need non-batched mode.",
)
p.add_argument(
"--num_batches_to_accumulate",
default=2,
type=int,
help="This parameter governs the maximum number of training batches the learner can accumulate before further experience collection is stopped. "
"The default value will set this to 2, so if the experience collection is faster than the training, "
"the learner will accumulate enough minibatches for 2 iterations of training but no more. This is a good balance between policy-lag and throughput. "
"When the limit is reached, the learner will notify the actor workers that they ought to stop the experience collection until accumulated minibatches "
"are processed. Set this parameter to 1 to further reduce policy-lag. "
"If the experience collection is very non-uniform, increasing this parameter can increase overall throughput, at the cost of increased policy-lag.",
)
p.add_argument(
"--worker_num_splits",
default=2,
type=int,
help='Typically we split a vector of envs into two parts for "double buffered" experience collection '
"Set this to 1 to disable double buffering. Set this to 3 for triple buffering!",
)
p.add_argument(
"--policy_workers_per_policy",
default=1,
type=int,
help="Number of policy workers that compute forward pass (per policy)",
)
p.add_argument(
"--max_policy_lag",
default=1000,
type=int,
help="Max policy lag in policy versions. Discard all experience that is older than this.",
)
# RL algorithm data collection & learning regime (rollout length, batch size, etc.)
p.add_argument(
"--num_workers",
default=multiprocessing.cpu_count(),
type=int,
help="Number of parallel environment workers. Should be less than num_envs and should divide num_envs."
"Use this in async mode.",
)
p.add_argument(
"--num_envs_per_worker",
default=2,
type=int,
help="Number of envs on a single CPU actor, in high-throughput configurations this should be in 10-30 range for Atari/VizDoom"
"Must be even for double-buffered sampling!",
)
p.add_argument("--batch_size", default=1024, type=int, help="Minibatch size for SGD")
p.add_argument(
"--num_batches_per_epoch",
default=1,
type=int,
help="This determines the training dataset size for each iteration of training. We collect this many minibatches before performing any SGD. "
"Example: if batch_size=128 and num_batches_per_epoch=2, then learner will process 2*128=256 environment transitions in one training iteration.",
)
p.add_argument(
"--num_epochs",
default=1,
type=int,
help="Number of training epochs on a dataset of collected experiences of size batch_size x num_batches_per_epoch",
)
p.add_argument(
"--rollout",
default=32,
type=int,
help="Length of the rollout from each environment in timesteps."
"Once we collect this many timesteps on actor worker, we send this trajectory to the learner."
"The length of the rollout will determine how many timesteps are used to calculate bootstrapped"
"Monte-Carlo estimates of discounted rewards, advantages, GAE, or V-trace targets. Shorter rollouts"
"reduce variance, but the estimates are less precise (bias vs variance tradeoff)."
"For RNN policies, this should be a multiple of --recurrence, so every rollout will be split"
"into (n = rollout / recurrence) segments for backpropagation. V-trace algorithm currently requires that"
"rollout == recurrence, which what you want most of the time anyway."
"Rollout length is independent from the episode length. Episode length can be both shorter or longer than"
"rollout, although for PBT training it is currently recommended that rollout << episode_len"
"(see function finalize_trajectory in actor_worker.py)",
)
p.add_argument(
"--recurrence",
default=-1,
type=int,
help="Trajectory length for backpropagation through time. "
"Default value (-1) sets recurrence to rollout length for RNNs and to 1 (no recurrence) for feed-forward nets. "
"If you train with V-trace recurrence should be equal to rollout length.",
)
p.add_argument(
"--shuffle_minibatches",
default=False,
type=str2bool,
help="Whether to randomize and shuffle minibatches between iterations (this is a slow operation when batches are large, disabling this increases learner throughput when training with multiple epochs/minibatches per epoch)",
)
# basic RL parameters
p.add_argument("--gamma", default=0.99, type=float, help="Discount factor")
p.add_argument(
"--reward_scale",
default=1.0,
type=float,
help=(
"Multiply all rewards by this factor before feeding into RL algorithm."
"Sometimes the overall scale of rewards is too high which makes value estimation a harder regression task."
"Loss values become too high which requires a smaller learning rate, etc."
),
)
p.add_argument(
"--reward_clip",
default=1000.0,
type=float,
help="Clip rewards between [-c, c]. Default [-1000, 1000] should mean no clipping for most envs (unless rewards are very large/small)",
)
p.add_argument(
"--value_bootstrap",
default=False,
type=str2bool,
help="Bootstrap returns from value estimates if episode is terminated by timeout. More info here: https://github.com/Denys88/rl_games/issues/128",
)
p.add_argument(
"--normalize_returns",
default=True,
type=str2bool,
help="Whether to use running mean and standard deviation to normalize discounted returns",
)
# components of the loss function
p.add_argument(
"--exploration_loss_coeff",
default=0.003,
type=float,
help="Coefficient for the exploration component of the loss function.",
)
p.add_argument("--value_loss_coeff", default=0.5, type=float, help="Coefficient for the critic loss")
p.add_argument(
"--kl_loss_coeff",
default=0.0,
type=float,
help="Coefficient for fixed KL loss (as used by Schulman et al. in https://arxiv.org/pdf/1707.06347.pdf). "
"Highly recommended for environments with continuous action spaces.",
)
p.add_argument(
"--exploration_loss",
default="entropy",
type=str,
choices=["entropy", "symmetric_kl"],
help="Usually the exploration loss is based on maximizing the entropy of the probability"
" distribution. Note that mathematically maximizing entropy of the categorical probability "
"distribution is exactly the same as minimizing the (regular) KL-divergence between"
" this distribution and a uniform prior. The downside of using the entropy term "
"(or regular asymmetric KL-divergence) is the fact that penalty does not increase as "
"probabilities of some actions approach zero. I.e. numerically, there is almost "
"no difference between an action distribution with a probability epsilon > 0 for "
"some action and an action distribution with a probability = zero for this action."
" For many tasks the first (epsilon) distribution is preferrable because we keep some "
"(albeit small) amount of exploration, while the second distribution will never explore "
"this action ever again."
"Unlike the entropy term, symmetric KL divergence between the action distribution "
"and a uniform prior approaches infinity when entropy of the distribution approaches zero,"
" so it can prevent the pathological situations where the agent stops exploring. "
"Empirically, symmetric KL-divergence yielded slightly better results on some problems.",
)
# more specific to policy gradient algorithms or PPO
p.add_argument(
"--gae_lambda",
default=0.95,
type=float,
help="Generalized Advantage Estimation discounting (only used when V-trace is False)",
)
p.add_argument(
"--ppo_clip_ratio",
default=0.1,
type=float,
help="We use unbiased clip(x, 1+e, 1/(1+e)) instead of clip(x, 1+e, 1-e) in the paper",
)
p.add_argument(
"--ppo_clip_value",
default=1.0,
type=float,
help="Maximum absolute change in value estimate until it is clipped. Sensitive to value magnitude",
)
p.add_argument(
"--with_vtrace",
default=False,
type=str2bool,
help="Enables V-trace off-policy correction. If this is True, then GAE is not used",
)
p.add_argument(
"--vtrace_rho",
default=1.0,
type=float,
help="rho_hat clipping parameter of the V-trace algorithm (importance sampling truncation)",
)
p.add_argument(
"--vtrace_c",
default=1.0,
type=float,
help="c_hat clipping parameter of the V-trace algorithm. Low values for c_hat can reduce variance of the advantage estimates (similar to GAE lambda < 1)",
)
# optimization
p.add_argument("--optimizer", default="adam", type=str, choices=["adam", "lamb"], help="Type of optimizer to use")
p.add_argument(
"--adam_eps",
default=1e-6,
type=float,
help="Adam epsilon parameter (1e-8 to 1e-5 seem to reliably work okay, 1e-3 and up does not work)",
)
p.add_argument("--adam_beta1", default=0.9, type=float, help="Adam momentum decay coefficient")
p.add_argument("--adam_beta2", default=0.999, type=float, help="Adam second momentum decay coefficient")
p.add_argument(
"--max_grad_norm",
default=4.0,
type=float,
help="Max L2 norm of the gradient vector, set to 0 to disable gradient clipping",
)
# learning rate
p.add_argument("--learning_rate", default=1e-4, type=float, help="LR")
p.add_argument(
"--lr_schedule",
default="constant",
choices=["constant", "kl_adaptive_minibatch", "kl_adaptive_epoch"],
type=str,
help=(
"Learning rate schedule to use. Constant keeps constant learning rate throughout training."
"kl_adaptive* schedulers look at --lr_schedule_kl_threshold and if KL-divergence with behavior policy"
"after the last minibatch/epoch significantly deviates from this threshold, lr is apropriately"
"increased or decreased"
),
)
p.add_argument("--lr_schedule_kl_threshold", default=0.008, type=float, help="Used with kl_adaptive_* schedulers")
p.add_argument("--lr_adaptive_min", default=1e-6, type=float, help="Minimum learning rate")
p.add_argument(
"--lr_adaptive_max",
default=1e-2,
type=float,
help=(
"Maximum learning rate. This is the best value tuned for IsaacGymEnvs environments such as Ant/Humanoid, "
"but it can be too high for some other envs. Set this to 1e-3 if you see instabilities with adaptive LR, "
"especially if reported LR on Tensorboard reaches this max value before the instability happens."
),
)
# observation preprocessing
p.add_argument(
"--obs_subtract_mean",
default=0.0,
type=float,
help="Observation preprocessing, mean value to subtract from observation (e.g. 128.0 for 8-bit RGB)",
)
p.add_argument(
"--obs_scale",
default=1.0,
type=float,
help="Observation preprocessing, divide observation tensors by this scalar (e.g. 128.0 for 8-bit RGB)",
)
p.add_argument(
"--normalize_input",
default=True,
type=str2bool,
help="Whether to use running mean and standard deviation to normalize observations",
)
p.add_argument(
"--normalize_input_keys",
default=None,
type=str,
nargs="*",
help="Which observation keys to use for normalization. If None, all observation keys are used (be careful with this!)",
)
# decorrelating experience on startup (optional)
p.add_argument(
"--decorrelate_experience_max_seconds",
default=0,
type=int,
help='Decorrelating experience serves two benefits. First: this is better for learning because samples from workers come from random moments in the episode, becoming more "i.i.d".'
"Second, and more important one: this is good for environments with highly non-uniform one-step times, including long and expensive episode resets. If experience is not decorrelated"
"then training batches will come in bursts e.g. after a bunch of environments finished resets and many iterations on the learner might be required,"
"which will increase the policy-lag of the new experience collected. The performance of the Sample Factory is best when experience is generated as more-or-less"
"uniform stream. Try increasing this to 100-200 seconds to smoothen the experience distribution in time right from the beginning (it will eventually spread out and settle anyways)",
)
p.add_argument(
"--decorrelate_envs_on_one_worker",
default=True,
type=str2bool,
help="In addition to temporal decorrelation of worker processes, also decorrelate envs within one worker process. "
"For environments with a fixed episode length it can prevent the reset from happening in the same rollout for all envs simultaneously, which makes experience collection more uniform.",
)
# performance optimizations
p.add_argument(
"--actor_worker_gpus",
default=[],
type=int,
nargs="*",
help="By default, actor workers only use CPUs. Changes this if e.g. you need GPU-based rendering on the actors",
)
p.add_argument(
"--set_workers_cpu_affinity",
default=True,
type=str2bool,
help="Whether to assign workers to specific CPU cores or not. The logic is beneficial for most workloads because prevents a lot of context switching."
"However for some environments it can be better to disable it, to allow one worker to use all cores some of the time. This can be the case for some DMLab environments with very expensive episode reset"
"that can use parallel CPU cores for level generation.",
)
p.add_argument(
"--force_envs_single_thread",
default=False,
type=str2bool,
help="Some environments may themselves use parallel libraries such as OpenMP or MKL. Since we parallelize environments on the level of workers, there is no need to keep this parallel semantic."
"This flag uses threadpoolctl to force libraries such as OpenMP and MKL to use only a single thread within the environment."
"Enabling this is recommended unless you are running fewer workers than CPU cores. "
"threadpoolctl has caused a bunch of crashes in the past, so this feature is disabled by default at this moment.",
)
p.add_argument(
"--default_niceness",
default=0,
type=int,
help="Niceness of the highest priority process (the learner). Values below zero require elevated privileges.",
)
# logging and summaries
p.add_argument(
"--log_to_file",
default=True,
type=str2bool,
help="Whether to log to a file (sf_log.txt in the experiment folder) or not. If False, logs to stdout only. "
"It can make sense to disable this in a slow server filesystem environment like NFS.",
)
p.add_argument(
"--experiment_summaries_interval",
default=10,
type=int,
help="How often in seconds we write avg. statistics about the experiment (reward, episode length, extra stats...)",
)
p.add_argument(
"--flush_summaries_interval",
default=30,
type=int,
help="How often do we flush tensorboard summaries (set to higher value for slow NFS-based server filesystems)",
)
p.add_argument(
"--stats_avg",
default=100,
type=int,
help="How many episodes to average to measure performance (avg. reward etc)",
)
p.add_argument(
"--summaries_use_frameskip",
default=True,
type=str2bool,
help="Whether to multiply training steps by frameskip when recording summaries, FPS, etc. When this flag is set to True, x-axis for all summaries corresponds to the total number of simulated steps, i.e. with frameskip=4 the x-axis value of 4 million will correspond to 1 million frames observed by the policy.",
)
p.add_argument(
"--heartbeat_interval",
default=20,
type=int,
help="How often in seconds components send a heartbeat signal to the runner to verify they are not stuck",
)
p.add_argument(
"--heartbeat_reporting_interval",
default=180,
type=int,
help="How often in seconds the runner checks for heartbeats",
)
# experiment termination
p.add_argument(
"--train_for_env_steps",
default=int(1e10),
type=int,
help="Stop after all policies are trained for this many env steps",
)
p.add_argument("--train_for_seconds", default=int(1e10), type=int, help="Stop training after this many seconds")
# model saving
p.add_argument("--save_every_sec", default=120, type=int, help="Checkpointing rate")
p.add_argument("--keep_checkpoints", default=2, type=int, help="Number of model checkpoints to keep")
p.add_argument(
"--load_checkpoint_kind",
default="latest",
choices=["latest", "best"],
help="Whether to load from latest or best checkpoint",
)
p.add_argument(
"--save_milestones_sec",
default=-1,
type=int,
help="Save intermediate checkpoints in a separate folder for later evaluation (default=never)",
)
p.add_argument(
"--save_best_every_sec",
default=5,
type=int,
help="How often we check if we should save the policy with the best score ever",
)
p.add_argument(
"--save_best_metric",
default="reward",
help='Save "best" policies based on this metric (just env reward by default)',
)
p.add_argument(
"--save_best_after",
default=100000,
type=int,
help='Start saving "best" policies after this many env steps to filter lucky episodes that succeed and dominate the statistics early on',
)
# debugging options
p.add_argument("--benchmark", default=False, type=str2bool, help="Benchmark mode")
def add_model_args(p: ArgumentParser):
"""
Policy size, configuration, etc.
Model builder automatically detects whether we should use conv or MLP encoder, then we use parameters to spectify
settings for one or the other. If we're using MLP encoder, conv encoder settings will be ignored.
"""
# policy with vector observations - encoder options
p.add_argument(
"--encoder_mlp_layers",
default=[512, 512],
type=int,
nargs="*",
help="In case of MLP encoder, sizes of layers to use. This is ignored if observations are images. "
"To use this parameter from command line, omit the = sign and separate values with spaces, e.g. "
"--encoder_mlp_layers 256 128 64",
)
# policy with image observations - convolutional encoder options
p.add_argument(
"--encoder_conv_architecture",
default="convnet_simple",
choices=["convnet_simple", "convnet_impala", "convnet_atari", "resnet_impala"],
type=str,
help="Architecture of the convolutional encoder. See models.py for details. "
"VizDoom and DMLab examples demonstrate how to define custom architectures.",
)
p.add_argument(
"--encoder_conv_mlp_layers",
default=[512],
type=int,
nargs="*",
help="Optional fully connected layers after the convolutional encoder head.",
)
# model core settings (core is identity function if we're not using RNNs)
p.add_argument("--use_rnn", default=True, type=str2bool, help="Whether to use RNN core in a policy or not")
p.add_argument(
"--rnn_size",
default=512,
type=int,
help="Size of the RNN hidden state in recurrent model (e.g. GRU or LSTM)",
)
p.add_argument(
"--rnn_type",
default="gru",
choices=["gru", "lstm"],
type=str,
help="Type of RNN cell to use if use_rnn is True",
)
p.add_argument("--rnn_num_layers", default=1, type=int, help="Number of RNN layers to use if use_rnn is True")
# Decoder settings. Decoder appears between policy core (RNN) and action/critic heads.
p.add_argument(
"--decoder_mlp_layers",
default=[],
type=int,
nargs="*",
help="Optional decoder MLP layers after the policy core. If empty (default) decoder is identity function.",
)
p.add_argument(
"--nonlinearity", default="elu", choices=["elu", "relu", "tanh"], type=str, help="Type of nonlinearity to use."
)
p.add_argument(
"--policy_initialization",
default="orthogonal",
choices=["orthogonal", "xavier_uniform", "torch_default"],
type=str,
help="NN weight initialization",
)
p.add_argument(
"--policy_init_gain",
default=1.0,
type=float,
help="Gain parameter of PyTorch initialization schemas (i.e. Xavier)",
)
p.add_argument(
"--actor_critic_share_weights",
default=True,
type=str2bool,
help="Whether to share the weights between policy and value function",
)
p.add_argument(
"--adaptive_stddev",
default=True,
type=str2bool,
help="Only for continuous action distributions, whether stddev is state-dependent or just a single learned parameter",
)
p.add_argument(
"--continuous_tanh_scale",
default=0.0,
type=float,
help="Only for continuous action distributions, whether to use tanh squashing and what scale to use. "
"Applies tanh(mu / scale) * scale to distribution means. "
"Experimental. Currently only works with adaptive_stddev=False (TODO).",
)
p.add_argument(
"--initial_stddev",
default=1.0,
type=float,
help="Initial value for non-adaptive stddev. Only makes sense for continuous action spaces",
)
def add_default_env_args(p: ArgumentParser):
"""Configuration related to the environments, i.e. things that might be difficult to query from an environment instance."""
p.add_argument("--use_env_info_cache", default=False, type=str2bool, help="Whether to use cached env info")
p.add_argument(
"--env_gpu_actions",
default=False,
type=str2bool,
help="Set to true if environment expects actions on GPU (i.e. as a GPU-side PyTorch tensor)",
)
p.add_argument(
"--env_gpu_observations",
default=True,
type=str2bool,
help="Setting this to True together with non-empty --actor_worker_gpus will make observations GPU-side PyTorch tensors. "
"Otherwise data will be on CPU. For CPU-based envs just set --actor_worker_gpus to empty list then this parameter does not matter.",
)
p.add_argument(
"--env_frameskip",
default=1,
type=int,
help="Number of frames for action repeat (frame skipping). "
"Setting this to >1 will not add any wrappers that will do frame-skipping, although this can be used "
"in the environment factory function to add these wrappers or to tell the environment itself to skip a desired number of frames "
"i.e. as it is done in VizDoom. "
"FPS metrics will be multiplied by the frameskip value, i.e. 100000FPS with frameskip=4 actually corresponds to "
"100000/4=25000 samples per second observed by the policy. "
"Frameskip=1 (default) means no frameskip, we process every frame.",
)
p.add_argument(
"--env_framestack", default=1, type=int, help="Frame stacking (only used in Atari, and it is usually set to 4)"
) # <-- this probably should be moved to environment-specific scripts
p.add_argument(
"--pixel_format", default="CHW", type=str, help="PyTorch expects CHW by default, Ray & TensorFlow expect HWC"
)
p.add_argument(
"--use_record_episode_statistics",
default=False,
type=str2bool,
help="Whether to use gym RecordEpisodeStatistics wrapper to keep track of reward",
)
def add_eval_args(parser):
"""Evaluation-related arguments, i.e. only used when testing/visualizing policies rather than training them."""
parser.add_argument(
"--fps",
default=0,
type=int,
help="Enable rendering with adjustable FPS. Default (0) means default, e.g. for Doom its FPS (~35), or unlimited if not specified by env. Leave at 0 for Doom multiplayer evaluation",
)
parser.add_argument(
"--eval_env_frameskip",
default=None,
type=int,
help="Env frameskip to use during evaluation. "
"If not specified, we use the same frameskip as during training (env_frameskip). "
"For some envs (i.e. VizDoom) we can set this to 1 to get smoother env rendering during evaluation. "
"If eval_env_frameskip is different from env_frameskip, we will repeat actions during evaluation "
"env_frameskip / eval_env_frameskip times to match the training regime.",
)
parser.add_argument("--no_render", action="store_true", help="Do not render the environment during evaluation")
parser.add_argument("--save_video", action="store_true", help="Save video instead of rendering during evaluation")
parser.add_argument(
"--video_frames",
default=1e9,
type=int,
help="Number of frames to render for the video. Defaults to 1e9 which will be the same as having video_frames = max_num_frames. You can also set to -1 which only renders one episode",
)
parser.add_argument("--video_name", default=None, type=str, help="Name of video to save")
parser.add_argument("--max_num_frames", default=1e9, type=int, help="Maximum number of frames for evaluation")
parser.add_argument("--max_num_episodes", default=1e9, type=int, help="Maximum number of episodes for evaluation")
parser.add_argument("--push_to_hub", action="store_true", help="Push experiment folder to HuggingFace Hub")
parser.add_argument(
"--hf_repository",
default=None,
type=str,
help="The full repo_id to push to on the HuggingFace Hub. Must be of the form <username>/<repo_name>",
)
parser.add_argument(
"--policy_index", default=0, type=int, help="Policy to evaluate in case of multi-policy training"
)
parser.add_argument(
"--eval_deterministic",
default=False,
type=str2bool,
help="False to sample from action distributions at test time. True to just use the argmax.",
)
parser.add_argument(
"--train_script",
default=None,
type=str,
help="Module name used to run training script. Used to generate HF model card",
)
parser.add_argument(
"--enjoy_script",
default=None,
type=str,
help="Module name used to run training script. Used to generate HF model card",
)
def add_wandb_args(p: ArgumentParser):
"""Weights and Biases experiment monitoring."""
p.add_argument("--with_wandb", default=False, type=str2bool, help="Enables Weights and Biases integration")
p.add_argument(
"--wandb_user",
default=None,
type=str,
help="WandB username (entity). Must be specified from command line! Also see https://docs.wandb.ai/quickstart#1.-set-up-wandb",
)
p.add_argument("--wandb_project", default="sample_factory", type=str, help='WandB "Project"')
p.add_argument(
"--wandb_group",
default=None,
type=str,
help='WandB "Group" (to group your experiments). By default this is the name of the env.',
)
p.add_argument("--wandb_job_type", default="SF", type=str, help="WandB job type")
p.add_argument(
"--wandb_tags",
default=[],
type=str,
nargs="*",
help="Tags can help with finding experiments in WandB web console",
)
def add_pbt_args(p: ArgumentParser):
"""Population-based training (PBT) arguments."""
p.add_argument("--with_pbt", default=False, type=str2bool, help="Enables population-based training (PBT)")
p.add_argument(
"--pbt_mix_policies_in_one_env",
default=True,
type=str2bool,
help="For multi-agent envs, whether we mix different policies in one env.",
)
p.add_argument(
"--pbt_period_env_steps",
default=int(5e6),
type=int,
help="Periodically replace the worst policies with the best ones and perturb the hyperparameters",
)
p.add_argument(
"--pbt_start_mutation",
default=int(2e7),
type=int,
help="Allow initial diversification, start PBT after this many env steps",
)
p.add_argument(
"--pbt_replace_fraction",
default=0.3,
type=float,
help="A portion of policies performing worst to be replace by better policies (rounded up)",
)
p.add_argument("--pbt_mutation_rate", default=0.15, type=float, help="Probability that a parameter mutates")
p.add_argument(
"--pbt_replace_reward_gap",
default=0.1,
type=float,
help="Relative gap in true reward when replacing weights of the policy with a better performing one",
)
p.add_argument(
"--pbt_replace_reward_gap_absolute",
default=1e-6,
type=float,
help="Absolute gap in true reward when replacing weights of the policy with a better performing one",
)
p.add_argument(
"--pbt_optimize_gamma",
default=False,
type=str2bool,
help="Whether to optimize gamma, discount factor, or not (experimental)",
)
p.add_argument(
"--pbt_target_objective",
default="true_objective",
type=str,
help="Policy stat to optimize with PBT. true_objective (default) is equal to raw env reward if not specified, but can also be any other per-policy stat."
'For DMlab-30 use value "dmlab_target_objective" (which is capped human normalized score)',
)
p.add_argument(
"--pbt_perturb_min",
default=1.1,
type=float,
help="When PBT mutates a float hyperparam, it samples the change magnitude randomly from the uniform distribution [pbt_perturb_min, pbt_perturb_max]",
)
p.add_argument(
"--pbt_perturb_max",
default=1.5,
type=float,
help="When PBT mutates a float hyperparam, it samples the change magnitude randomly from the uniform distribution [pbt_perturb_min, pbt_perturb_max]",
)
|
/sample-factory-2.1.1.tar.gz/sample-factory-2.1.1/sample_factory/cfg/cfg.py
| 0.660172 | 0.337804 |
cfg.py
|
pypi
|
from typing import List
from signal_slot.signal_slot import EventLoop, EventLoopProcess
from sample_factory.algo.learning.learner_worker import init_learner_process
from sample_factory.algo.runners.runner import Runner
from sample_factory.algo.sampling.sampler import ParallelSampler
from sample_factory.algo.utils.context import sf_global_context
from sample_factory.algo.utils.misc import ExperimentStatus
from sample_factory.algo.utils.multiprocessing_utils import get_mp_ctx
from sample_factory.utils.typing import StatusCode
from sample_factory.utils.utils import log
class ParallelRunner(Runner):
def __init__(self, cfg):
super().__init__(cfg)
self.processes: List[EventLoopProcess] = []
def init(self) -> StatusCode:
status = super().init()
if status != ExperimentStatus.SUCCESS:
return status
mp_ctx = get_mp_ctx(self.cfg.serial_mode)
for policy_id in range(self.cfg.num_policies):
batcher_event_loop = EventLoop("batcher_evt_loop")
self.batchers[policy_id] = self._make_batcher(batcher_event_loop, policy_id)
batcher_event_loop.owner = self.batchers[policy_id]
learner_proc = EventLoopProcess(f"learner_proc{policy_id}", mp_ctx, init_func=init_learner_process)
self.processes.append(learner_proc)
self.learners[policy_id] = self._make_learner(
learner_proc.event_loop,
policy_id,
self.batchers[policy_id],
)
learner_proc.event_loop.owner = self.learners[policy_id]
learner_proc.set_init_func_args((sf_global_context(), self.learners[policy_id]))
self.sampler = self._make_sampler(ParallelSampler, self.event_loop)
self.connect_components()
return status
def _on_start(self):
self._start_processes()
super()._on_start()
def _start_processes(self):
log.debug("Starting all processes...")
for p in self.processes:
log.debug(f"Starting process {p.name}")
p.start()
self.event_loop.process_events()
def _on_everything_stopped(self):
for p in self.processes:
log.debug(f"Waiting for process {p.name} to stop...")
p.join()
self.sampler.join()
super()._on_everything_stopped()
|
/sample-factory-2.1.1.tar.gz/sample-factory-2.1.1/sample_factory/algo/runners/runner_parallel.py
| 0.746046 | 0.16043 |
runner_parallel.py
|
pypi
|
from __future__ import annotations
from typing import Dict, List, Optional, Tuple
import torch
from sample_factory.algo.utils.env_info import EnvInfo
from sample_factory.cfg.configurable import Configurable
from sample_factory.utils.attr_dict import AttrDict
from sample_factory.utils.gpu_utils import gpus_for_process
from sample_factory.utils.timing import Timing
from sample_factory.utils.typing import PolicyID
class VectorEnvRunner(Configurable):
def __init__(self, cfg: AttrDict, env_info: EnvInfo, worker_idx, split_idx, buffer_mgr, sampling_device: str):
super().__init__(cfg)
self.env_info: EnvInfo = env_info
self.worker_idx = worker_idx
self.split_idx = split_idx
self.rollout_step: int = 0 # current position in the rollout across all envs
self.env_step_ready = False
self.buffer_mgr = buffer_mgr
self.traj_buffer_queue = buffer_mgr.traj_buffer_queues[sampling_device]
self.traj_tensors = buffer_mgr.traj_tensors_torch[sampling_device]
self.policy_output_tensors = buffer_mgr.policy_output_tensors_torch[sampling_device][worker_idx, split_idx]
def init(self, timing: Timing):
raise NotImplementedError()
def advance_rollouts(self, policy_id: PolicyID, timing) -> Tuple[List[Dict], List[Dict]]:
raise NotImplementedError()
def update_trajectory_buffers(self, timing) -> bool:
raise NotImplementedError()
def generate_policy_request(self) -> Optional[Dict]:
raise NotImplementedError()
def synchronize_devices(self) -> None:
raise NotImplementedError()
def close(self):
raise NotImplementedError()
def rollout_worker_device(worker_idx, cfg: AttrDict, env_info: EnvInfo) -> torch.device:
# TODO: this should correspond to whichever device we have observations on, not just whether we use this device at all
# TODO: test with Megaverse on a multi-GPU system
# TODO: actions on a GPU device? Convert to CPU for some envs?
if not env_info.gpu_observations:
return torch.device("cpu")
gpus_to_use = gpus_for_process(worker_idx, num_gpus_per_process=1, gpu_mask=cfg.actor_worker_gpus)
assert len(gpus_to_use) <= 1
sampling_device = torch.device("cuda", index=gpus_to_use[0]) if gpus_to_use else torch.device("cpu")
return sampling_device
def record_episode_statistics_wrapper_stats(info: Dict) -> Optional[Tuple[float, float]]:
"""
Some envs like Atari use a special wrapper gym.wrappers.RecordEpisodeStatistics to record episode stats.
This accounts for things like reward clipping in the wrappers or frameskip affecting length.
"""
if ep_info := info.get("episode", None):
return ep_info["r"], ep_info["l"]
return None
|
/sample-factory-2.1.1.tar.gz/sample-factory-2.1.1/sample_factory/algo/sampling/sampling_utils.py
| 0.81582 | 0.237267 |
sampling_utils.py
|
pypi
|
from abc import ABC
from typing import Callable, Dict, List
from signal_slot.queue_utils import get_queue
from signal_slot.signal_slot import BoundMethod, EventLoop, EventLoopObject, EventLoopProcess, signal
from sample_factory.algo.sampling.inference_worker import InferenceWorker, init_inference_process
from sample_factory.algo.sampling.rollout_worker import RolloutWorker, init_rollout_worker_process
from sample_factory.algo.utils.context import sf_global_context
from sample_factory.algo.utils.env_info import EnvInfo
from sample_factory.algo.utils.heartbeat import HeartbeatStoppableEventLoopObject
from sample_factory.algo.utils.misc import advance_rollouts_signal, new_trajectories_signal
from sample_factory.algo.utils.model_sharing import ParameterServer
from sample_factory.algo.utils.multiprocessing_utils import get_mp_ctx
from sample_factory.algo.utils.shared_buffers import BufferMgr
from sample_factory.cfg.configurable import Configurable
from sample_factory.utils.typing import Config, MpQueue, PolicyID
from sample_factory.utils.utils import log
class AbstractSampler(EventLoopObject, Configurable):
def __init__(
self,
event_loop: EventLoop,
buffer_mgr: BufferMgr,
param_servers: Dict[PolicyID, ParameterServer],
cfg: Config,
env_info: EnvInfo,
unique_name: str,
):
EventLoopObject.__init__(self, event_loop, object_id=unique_name)
Configurable.__init__(self, cfg)
self.buffer_mgr: BufferMgr = buffer_mgr
self.policy_param_server: Dict[PolicyID, ParameterServer] = param_servers
self.env_info: EnvInfo = env_info
@signal
def started(self):
...
@signal
def initialized(self):
...
def init(self) -> None:
raise NotImplementedError()
def connect_model_initialized(self, policy_id: PolicyID, model_initialized_signal: signal) -> None:
raise NotImplementedError()
def connect_on_new_trajectories(self, policy_id: PolicyID, on_new_trajectories_handler: BoundMethod) -> None:
raise NotImplementedError()
def connect_trajectory_buffers_available(self, buffers_available_signal: signal) -> None:
raise NotImplementedError()
def connect_stop_experience_collection(self, stop_experience_collection_signal: signal) -> None:
raise NotImplementedError()
def connect_resume_experience_collection(self, resume_experience_collection_signal: signal) -> None:
raise NotImplementedError()
def connect_report_msg(self, report_msg_handler: Callable) -> None:
raise NotImplementedError()
def connect_update_training_info(self, update_training_info_signal: signal) -> None:
raise NotImplementedError()
def stoppable_components(self) -> List[HeartbeatStoppableEventLoopObject]:
raise NotImplementedError()
def heartbeat_components(self) -> List[HeartbeatStoppableEventLoopObject]:
raise NotImplementedError()
def join(self) -> None:
"""This is where we could join processes or threads if sampler starts any."""
raise NotImplementedError()
class Sampler(AbstractSampler, ABC):
def __init__(
self,
event_loop: EventLoop,
buffer_mgr: BufferMgr,
param_servers: Dict[PolicyID, ParameterServer],
cfg: Config,
env_info: EnvInfo,
):
unique_name = Sampler.__name__
AbstractSampler.__init__(self, event_loop, buffer_mgr, param_servers, cfg, env_info, unique_name)
self.inference_queues: Dict[PolicyID, MpQueue] = {
p: get_queue(cfg.serial_mode) for p in range(self.cfg.num_policies)
}
self.inference_workers: Dict[PolicyID, List[InferenceWorker]] = dict()
self.rollout_workers: List[RolloutWorker] = []
# internal signals used for communication with the workers, these are not a part of the interface
@signal
def _init_inference_workers(self):
...
@signal
def _inference_workers_initialized(self):
...
def _make_inference_worker(self, event_loop, policy_id: PolicyID, worker_idx: int, param_server: ParameterServer):
return InferenceWorker(
event_loop,
policy_id,
worker_idx,
self.buffer_mgr,
param_server,
self.inference_queues[policy_id],
self.cfg,
self.env_info,
)
def _make_rollout_worker(self, event_loop, worker_idx: int):
return RolloutWorker(event_loop, worker_idx, self.buffer_mgr, self.inference_queues, self.cfg, self.env_info)
def _for_each_inference_worker(self, func: Callable[[InferenceWorker], None]) -> None:
for policy_id in range(self.cfg.num_policies):
for inference_worker in self.inference_workers[policy_id]:
func(inference_worker)
def _for_each_rollout_worker(self, func: Callable[[RolloutWorker], None]) -> None:
for rollout_worker in self.rollout_workers:
func(rollout_worker)
def _for_each_worker(self, func: Callable[[HeartbeatStoppableEventLoopObject], None]) -> None:
self._for_each_inference_worker(func)
self._for_each_rollout_worker(func)
def _connect_internal_components(self):
"""Setup basic signal-slot internal connections specfic for the sampler."""
self._for_each_inference_worker(lambda w: w.initialized.connect(self._inference_worker_ready))
for rollout_worker_idx in range(self.cfg.num_workers):
# once all learners and inference workers are initialized, we can initialize rollout workers
rollout_worker = self.rollout_workers[rollout_worker_idx]
self._inference_workers_initialized.connect(rollout_worker.init)
# inference worker signals to advance rollouts when actions are ready
for policy_id in range(self.cfg.num_policies):
for inference_worker_idx in range(self.cfg.policy_workers_per_policy):
self.inference_workers[policy_id][inference_worker_idx].connect(
advance_rollouts_signal(rollout_worker_idx),
rollout_worker.advance_rollouts,
)
# We also connect to our own advance_rollouts signal to avoid getting stuck when we have nothing
# to send to the inference worker. This can happen if we have an entire trajectory of inactive agents.
rollout_worker.connect(advance_rollouts_signal(rollout_worker_idx), rollout_worker.advance_rollouts)
def connect_model_initialized(self, policy_id: PolicyID, model_initialized_signal: signal) -> None:
for inference_worker in self.inference_workers[policy_id]:
model_initialized_signal.connect(inference_worker.init)
def connect_on_new_trajectories(self, policy_id: PolicyID, on_new_trajectories_handler: BoundMethod) -> None:
signal_name = new_trajectories_signal(policy_id)
self._for_each_rollout_worker(lambda w: w.connect(signal_name, on_new_trajectories_handler))
def connect_trajectory_buffers_available(self, buffers_available_signal: signal) -> None:
self._for_each_rollout_worker(lambda w: buffers_available_signal.connect(w.on_trajectory_buffers_available))
def connect_stop_experience_collection(self, stop_collect_signal: signal) -> None:
self._for_each_inference_worker(lambda w: stop_collect_signal.connect(w.should_stop_experience_collection))
def connect_resume_experience_collection(self, resume_collect_signal: signal) -> None:
self._for_each_inference_worker(lambda w: resume_collect_signal.connect(w.should_resume_experience_collection))
def connect_report_msg(self, report_msg_handler: BoundMethod) -> None:
self._for_each_inference_worker(lambda w: w.report_msg.connect(report_msg_handler))
self._for_each_rollout_worker(lambda w: w.report_msg.connect(report_msg_handler))
def connect_update_training_info(self, update_training_info: signal) -> None:
self._for_each_rollout_worker(lambda w: update_training_info.connect(w.on_update_training_info))
def _inference_worker_ready(self, policy_id: PolicyID, worker_idx: int):
assert not self.inference_workers[policy_id][worker_idx].is_ready
log.info(f"Inference worker {policy_id}-{worker_idx} is ready!")
self.inference_workers[policy_id][worker_idx].is_ready = True
# check if all workers for all policies are ready
all_ready = True
for policy_id in range(self.cfg.num_policies):
all_ready &= all(w.is_ready for w in self.inference_workers[policy_id])
if all_ready:
log.info("All inference workers are ready! Signal rollout workers to start!")
self._inference_workers_initialized.emit()
# during initialization it serves no purpose to wait for all rollout workers to finish initialization,
# instead we can just report that we are ready and rollout workers will start producing trajectories
# as soon as all env.reset() calls are done
self.initialized.emit()
def stoppable_components(self) -> List[HeartbeatStoppableEventLoopObject]:
stoppable = []
self._for_each_worker(lambda w: stoppable.append(w))
return stoppable
def heartbeat_components(self) -> List[HeartbeatStoppableEventLoopObject]:
heartbeat = []
self._for_each_worker(lambda w: heartbeat.append(w))
return heartbeat
class SerialSampler(Sampler):
def __init__(
self,
event_loop: EventLoop,
buffer_mgr,
param_servers: Dict[PolicyID, ParameterServer],
cfg: Config,
env_info: EnvInfo,
):
Sampler.__init__(self, event_loop, buffer_mgr, param_servers, cfg, env_info)
for policy_id in range(self.cfg.num_policies):
self.inference_workers[policy_id] = []
for i in range(self.cfg.policy_workers_per_policy):
param_server = self.policy_param_server[policy_id]
inference_worker = self._make_inference_worker(self.event_loop, policy_id, i, param_server)
self.inference_workers[policy_id].append(inference_worker)
for i in range(self.cfg.num_workers):
rollout_worker = self._make_rollout_worker(self.event_loop, i)
self.rollout_workers.append(rollout_worker)
self._connect_internal_components()
def init(self) -> None:
self.started.emit()
def join(self) -> None:
pass
class ParallelSampler(Sampler):
def __init__(
self,
event_loop: EventLoop,
buffer_mgr,
param_servers: Dict[PolicyID, ParameterServer],
cfg: Config,
env_info: EnvInfo,
):
Sampler.__init__(self, event_loop, buffer_mgr, param_servers, cfg, env_info)
self.processes: List[EventLoopProcess] = []
mp_ctx = get_mp_ctx(cfg.serial_mode)
for policy_id in range(self.cfg.num_policies):
self.inference_workers[policy_id] = []
for i in range(self.cfg.policy_workers_per_policy):
inference_proc = EventLoopProcess(
f"inference_proc{policy_id}-{i}", mp_ctx, init_func=init_inference_process
)
self.processes.append(inference_proc)
inference_worker = self._make_inference_worker(
inference_proc.event_loop,
policy_id,
i,
self.policy_param_server[policy_id],
)
inference_proc.event_loop.owner = inference_worker
inference_proc.set_init_func_args((sf_global_context(), inference_worker))
self.inference_workers[policy_id].append(inference_worker)
for i in range(self.cfg.num_workers):
rollout_proc = EventLoopProcess(f"rollout_proc{i}", mp_ctx, init_func=init_rollout_worker_process)
self.processes.append(rollout_proc)
rollout_worker = self._make_rollout_worker(rollout_proc.event_loop, i)
rollout_proc.event_loop.owner = rollout_worker
rollout_proc.set_init_func_args((sf_global_context(), rollout_worker))
self.rollout_workers.append(rollout_worker)
self._connect_internal_components()
def init(self) -> None:
log.debug("Starting all processes...")
def start_process(p):
log.debug(f"Starting process {p.name}")
p.start()
pool_size = min(16, len(self.processes))
from multiprocessing.pool import ThreadPool
with ThreadPool(pool_size) as pool:
pool.map(start_process, self.processes)
self.started.emit()
def join(self) -> None:
for p in self.processes:
log.debug(f"Waiting for process {p.name} to join...")
p.join()
|
/sample-factory-2.1.1.tar.gz/sample-factory-2.1.1/sample_factory/algo/sampling/sampler.py
| 0.874801 | 0.161883 |
sampler.py
|
pypi
|
import sys
from typing import Optional
import torch
from torch import Tensor
from sample_factory.algo.utils.multiprocessing_utils import get_lock, get_mp_ctx
from sample_factory.model.actor_critic import create_actor_critic
from sample_factory.utils.timing import Timing
from sample_factory.utils.utils import log
class ParameterServer:
def __init__(self, policy_id, policy_versions: Tensor, serial_mode: bool):
self.policy_id = policy_id
self.actor_critic = None
self.policy_versions = policy_versions
self.device: Optional[torch.device] = None
mp_ctx = get_mp_ctx(serial_mode)
self._policy_lock = get_lock(serial_mode, mp_ctx)
@property
def policy_lock(self):
return self._policy_lock
def init(self, actor_critic, policy_version, device: torch.device):
self.actor_critic = actor_critic
self.policy_versions[self.policy_id] = policy_version
self.device = device
log.debug("Initialized policy %d weights for model version %d", self.policy_id, policy_version)
def update_weights(self, policy_version):
"""
In async algorithms policy_versions tensor is in shared memory.
Therefore clients can just look at the location in shared memory once in a while to see if the
weights are updated.
"""
self.policy_versions[self.policy_id] = policy_version
class ParameterClient:
def __init__(self, param_server: ParameterServer, cfg, env_info, timing: Timing):
self.server = param_server
self.policy_id = param_server.policy_id
self.policy_versions = param_server.policy_versions
self.cfg = cfg
self.env_info = env_info
self.latest_policy_version = -1
self._actor_critic = None
self._policy_lock = param_server.policy_lock
self.timing = timing
@property
def actor_critic(self):
return self._actor_critic
@property
def policy_version(self):
return self.latest_policy_version
def _get_server_policy_version(self):
return self.policy_versions[self.policy_id].item()
def on_weights_initialized(self, state_dict, device: torch.device, policy_version: int) -> None:
self.latest_policy_version = policy_version
def ensure_weights_updated(self):
raise NotImplementedError()
def cleanup(self):
pass
class ParameterClientSerial(ParameterClient):
def on_weights_initialized(self, state_dict, device: torch.device, policy_version: int) -> None:
"""
Literally just save the reference to actor critic since we're in the same process.
Model should be fully initialized at this point.
"""
super().on_weights_initialized(state_dict, device, policy_version)
self._actor_critic = self.server.actor_critic
def ensure_weights_updated(self):
"""In serial case we don't need to do anything."""
self.latest_policy_version = self._get_server_policy_version()
class ParameterClientAsync(ParameterClient):
def __init__(self, param_server: ParameterServer, cfg, env_info, timing: Timing):
super().__init__(param_server, cfg, env_info, timing)
self._shared_model_weights = None
self.num_policy_updates = 0
@property
def actor_critic(self):
assert self.latest_policy_version >= 0, "Trying to access actor critic before it is initialized"
return self._actor_critic
def _init_local_copy(self, device, cfg, obs_space, action_space):
self._actor_critic = create_actor_critic(cfg, obs_space, action_space)
self._actor_critic.model_to_device(device)
for p in self._actor_critic.parameters():
p.requires_grad = False # we don't train anything here
self._actor_critic.eval()
def on_weights_initialized(self, state_dict, device: torch.device, policy_version: int) -> None:
super().on_weights_initialized(state_dict, device, policy_version)
self._init_local_copy(device, self.cfg, self.env_info.obs_space, self.env_info.action_space)
with self._policy_lock:
if state_dict is None:
log.warning(f"Parameter client {self.policy_id} received empty state dict, using random weights...")
else:
self._actor_critic.load_state_dict(state_dict)
self._shared_model_weights = state_dict
def ensure_weights_updated(self):
server_policy_version = self._get_server_policy_version()
if self.latest_policy_version < server_policy_version and self._shared_model_weights is not None:
with self.timing.time_avg("weight_update"), self._policy_lock:
self._actor_critic.load_state_dict(self._shared_model_weights)
self.latest_policy_version = server_policy_version
self.num_policy_updates += 1
if self.num_policy_updates % 10 == 0:
log.info(
"Updated weights for policy %d, policy_version %d (%s)",
self.policy_id,
self.latest_policy_version,
str(self.timing.weight_update),
)
def cleanup(self):
# TODO: fix termination problems related to shared CUDA tensors (they are harmless but annoying)
weights = self._shared_model_weights
del self._actor_critic
del self._shared_model_weights
del self.policy_versions
if weights is not None:
import gc
weights_referrers = gc.get_referrers(weights)
log.debug(f"Weights refcount: {sys.getrefcount(weights)} {len(weights_referrers)}")
def make_parameter_client(is_serial_mode, parameter_server, cfg, env_info, timing: Timing) -> ParameterClient:
"""Parameter client factory."""
cls = ParameterClientSerial if is_serial_mode else ParameterClientAsync
return cls(parameter_server, cfg, env_info, timing)
|
/sample-factory-2.1.1.tar.gz/sample-factory-2.1.1/sample_factory/algo/utils/model_sharing.py
| 0.84759 | 0.236693 |
model_sharing.py
|
pypi
|
import math
from typing import Callable, Optional, Tuple
import torch
from torch.optim import Optimizer
class Lamb(Optimizer):
def __init__(
self,
params,
bias_correction=True,
lr=1e-3,
betas=(0.9, 0.999),
eps=1e-6,
weight_decay=1e-4,
min_trust=0.01,
use_look_ahead=False,
look_ahead_alpha=0.5,
look_ahead_k=10,
):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
if not 0.0 <= min_trust <= 1.0:
raise ValueError("min_trust must be in [0, 1], got {}".format(min_trust))
defaults = dict(
lr=lr,
bias_correction=bias_correction,
betas=betas,
eps=eps,
weight_decay=weight_decay,
min_trust=min_trust,
use_look_ahead=use_look_ahead,
look_ahead_alpha=look_ahead_alpha,
look_ahead_k=look_ahead_k,
)
super().__init__(params, defaults)
def zero_grad(self, **kwargs):
for group in self.param_groups:
for p in group["params"]:
if p.grad is not None:
p.grad.zero_()
def _compute_adam_step(self, group, p, weight_decay, use_look_ahead):
grad = p.grad.data
state = self.state[p]
# State initialization
if len(state) == 0:
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(p.data)
state["step"] = 1
if use_look_ahead:
state["slow_param"] = p.data.clone()
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
beta1, beta2 = group["betas"]
# Decay the first and second moment running average coefficient
# m_t
exp_avg.mul_(beta1).add_(grad, alpha=(1 - beta1))
# v_t
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=(1 - beta2))
m = exp_avg.clone()
v = exp_avg_sq.sqrt()
if group["bias_correction"]:
m = m.mul_(1 / (1 - beta1 ** state["step"]))
v = v.mul_(1 / math.sqrt(1 - beta2 ** state["step"]))
adam_step = m.div_(v.add_(group["eps"]))
if weight_decay > 0:
adam_step.add_(p.data, alpha=weight_decay)
return adam_step
def _step_list_params(self, group):
min_trust = group["min_trust"]
weight_decay = group["weight_decay"]
step_size = group["lr"]
alpha = group["look_ahead_alpha"]
k = group["look_ahead_k"]
use_look_ahead = group["use_look_ahead"]
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError("Lamb does not support sparse gradients, consider SparseAdam instead.")
adam_step = self._compute_adam_step(group, p, weight_decay, use_look_ahead)
if min_trust != 1.0:
weight_norm = torch.norm(p.data.detach()).item()
step_norm = torch.norm(adam_step.detach()).item()
if weight_norm == 0 or step_norm == 0 or min_trust == 1.0:
trust_ratio = 1
else:
trust_ratio = min(weight_norm, 10.0) / step_norm
trust_ratio = min(max(trust_ratio, min_trust), 1.0 / min_trust)
else:
trust_ratio = 1.0
state = self.state[p]
p.data.add_(adam_step, alpha=-step_size * trust_ratio)
if use_look_ahead and (state["step"] % k) == 0:
state["slow_param"].mul_(1 - alpha).add_(p.data, alpha=alpha)
p.data.copy_(state["slow_param"])
state["step"] += 1
def _step_flat_params(self, group):
min_trust = group["min_trust"]
weight_decay = group["weight_decay"]
step_size = group["lr"]
alpha = group["look_ahead_alpha"]
k = group["look_ahead_k"]
use_look_ahead = group["use_look_ahead"]
adam_step = self._compute_adam_step(group, group["params"][0], weight_decay, use_look_ahead)
if min_trust != 1.0:
ptr = 0
for p in group["list_params"]:
weight_norm = torch.norm(p.data.detach()).item()
step_norm = torch.norm(adam_step[ptr : ptr + p.numel()].data.detach()).item()
if weight_norm == 0 or step_norm == 0 or min_trust == 1.0:
trust_ratio = 1
else:
trust_ratio = min(weight_norm, 10.0) / step_norm
trust_ratio = min(max(trust_ratio, min_trust), 1.0 / min_trust)
adam_step[ptr : ptr + p.numel()].mul_(trust_ratio)
ptr += p.numel()
p = group["params"][0]
state = self.state[p]
p.data.add_(adam_step, alpha=-step_size)
if use_look_ahead and (state["step"] % k) == 0:
state["slow_param"].mul_(1 - alpha).add_(p.data, alpha=alpha)
p.data.copy_(state["slow_param"])
state["step"] += 1
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
if "list_params" in group:
self._step_flat_params(group)
else:
self._step_list_params(group)
return loss
|
/sample-factory-2.1.1.tar.gz/sample-factory-2.1.1/sample_factory/algo/utils/optimizers.py
| 0.845496 | 0.273344 |
optimizers.py
|
pypi
|
from __future__ import annotations
from typing import Dict, Optional, Sequence, Union
import numpy as np
import torch
from torch import Tensor
from torch.nn import Module
from sample_factory.algo.utils.env_info import EnvInfo
from sample_factory.algo.utils.tensor_dict import TensorDict
from sample_factory.algo.utils.tensor_utils import ensure_torch_tensor
from sample_factory.utils.typing import Config
def trajectories_per_minibatch(cfg: Config) -> int:
return cfg.batch_size // cfg.rollout
def trajectories_per_training_iteration(cfg: Config) -> int:
return cfg.num_batches_per_epoch * trajectories_per_minibatch(cfg)
def total_num_envs(cfg: Config) -> int:
return cfg.num_workers * cfg.num_envs_per_worker
def total_num_agents(cfg: Config, env_info: EnvInfo) -> int:
return total_num_envs(cfg) * env_info.num_agents
def num_agents_per_worker(cfg: Config, env_info: EnvInfo) -> int:
return cfg.num_envs_per_worker * env_info.num_agents
def prepare_and_normalize_obs(model: Module, obs: TensorDict | Dict[str, Tensor]) -> TensorDict | Dict[str, Tensor]:
for key, x in obs.items():
obs[key] = ensure_torch_tensor(x).to(model.device_for_input_tensor(key))
normalized_obs = model.normalize_obs(obs)
for key, x in normalized_obs.items():
normalized_obs[key] = x.type(model.type_for_input_tensor(key))
return normalized_obs
def samples_per_trajectory(trajectory: TensorDict) -> int:
shape = trajectory["rewards"].shape
batch, rollout = shape[0], shape[1]
return batch * rollout
@torch.jit.script
def calculate_discounted_sum_torch(
x: Tensor, dones: Tensor, valids: Tensor, discount: float, x_last: Optional[Tensor] = None
) -> Tensor:
"""
Computing cumulative sum (of something) for the trajectory, taking episode termination into consideration.
"""
if x_last is None:
x_last = x[-1].clone().fill_(0.0)
cumulative = x_last
discounted_sum = torch.zeros_like(x)
i = len(x) - 1
while i >= 0:
# do not discount invalid steps so we can entirely skip a part of the trajectory
# x should be already multiplied by valids
discount_valid = discount * valids[i] + (1 - valids[i])
cumulative = x[i] + discount_valid * cumulative * (1.0 - dones[i])
discounted_sum[i] = cumulative
i -= 1
return discounted_sum
# noinspection NonAsciiCharacters
@torch.jit.script
def gae_advantages(rewards: Tensor, dones: Tensor, values: Tensor, valids: Tensor, γ: float, λ: float) -> Tensor:
rewards = rewards.transpose(0, 1) # [E, T] -> [T, E]
dones = dones.transpose(0, 1).float() # [E, T] -> [T, E]
values = values.transpose(0, 1) # [E, T+1] -> [T+1, E]
valids = valids.transpose(0, 1).float() # [E, T+1] -> [T+1, E]
assert len(rewards) == len(dones)
assert len(rewards) + 1 == len(values)
# section 3 in GAE paper: calculating advantages
deltas = (rewards - values[:-1]) * valids[:-1] + (1 - dones) * (γ * values[1:] * valids[1:])
advantages = calculate_discounted_sum_torch(deltas, dones, valids[:-1], γ * λ)
# transpose advantages back to [E, T] before creating a single experience buffer
advantages.transpose_(0, 1)
return advantages
DonesType = Union[bool, np.ndarray, Tensor, Sequence[bool]]
def make_dones(terminated: DonesType, truncated: DonesType) -> DonesType:
"""
Make dones from terminated/truncated (gym 0.26.0 changes).
Assumes that terminated and truncated are the same type and shape.
"""
if isinstance(terminated, (bool, np.ndarray, Tensor)):
return terminated | truncated
elif isinstance(terminated, Sequence):
return [t | truncated[i] for i, t in enumerate(terminated)]
raise ValueError(f"Unknown {type(terminated)=}")
|
/sample-factory-2.1.1.tar.gz/sample-factory-2.1.1/sample_factory/algo/utils/rl_utils.py
| 0.933294 | 0.682349 |
rl_utils.py
|
pypi
|
from typing import Dict, Final, List, Optional, Union
import gymnasium as gym
import torch
import torch.nn as nn
from torch import Tensor
from torch.jit import RecursiveScriptModule, ScriptModule
from sample_factory.utils.utils import log
_NORM_EPS = 1e-5
_DEFAULT_CLIP = 5.0
# noinspection PyAttributeOutsideInit,NonAsciiCharacters
class RunningMeanStdInPlace(nn.Module):
def __init__(self, input_shape, epsilon=_NORM_EPS, clip=_DEFAULT_CLIP, per_channel=False, norm_only=False):
super().__init__()
log.debug("RunningMeanStd input shape: %r", input_shape)
self.input_shape: Final = input_shape
self.eps: Final[float] = epsilon
self.clip: Final[float] = clip
self.norm_only: Final[bool] = norm_only
self.per_channel: Final[bool] = per_channel
if per_channel:
if len(self.input_shape) == 3:
self.axis = [0, 2, 3]
if len(self.input_shape) == 2:
self.axis = [0, 2]
if len(self.input_shape) == 1:
self.axis = [0]
shape = self.input_shape[0]
else:
self.axis = [0]
shape = input_shape
self.register_buffer("running_mean", torch.zeros(shape, dtype=torch.float64))
self.register_buffer("running_var", torch.ones(shape, dtype=torch.float64))
self.register_buffer("count", torch.ones([1], dtype=torch.float64))
@staticmethod
@torch.jit.script
def _update_mean_var_count_from_moments(
mean: Tensor, var: Tensor, count: Tensor, batch_mean: Tensor, batch_var: Tensor, batch_count: int
):
delta = batch_mean - mean
tot_count = count + batch_count
new_mean = mean + delta * batch_count / tot_count
m_a = var * count
m_b = batch_var * batch_count
M2 = m_a + m_b + (delta**2) * count * batch_count / tot_count
new_var = M2 / tot_count
return new_mean, new_var, tot_count
def forward(self, x: Tensor, denormalize: bool = False) -> None:
"""Normalizes in-place! This function modifies the input tensor and returns nothing."""
if self.training and not denormalize:
# check if the shape exactly matches or it's a scalar for which we use shape (1, )
assert x.shape[1:] == self.input_shape or (
x.shape[1:] == () and self.input_shape == (1,)
), f"RMS expected input shape {self.input_shape}, got {x.shape[1:]}"
batch_count = x.size()[0]
μ = x.mean(self.axis) # along channel axis
σ2 = x.var(self.axis)
self.running_mean[:], self.running_var[:], self.count[:] = self._update_mean_var_count_from_moments(
self.running_mean, self.running_var, self.count, μ, σ2, batch_count
)
# change shape
if self.per_channel:
if len(self.input_shape) == 3:
current_mean = self.running_mean.view([1, self.input_shape[0], 1, 1]).expand_as(x)
current_var = self.running_var.view([1, self.input_shape[0], 1, 1]).expand_as(x)
elif len(self.input_shape) == 2:
current_mean = self.running_mean.view([1, self.input_shape[0], 1]).expand_as(x)
current_var = self.running_var.view([1, self.input_shape[0], 1]).expand_as(x)
elif len(self.input_shape) == 1:
current_mean = self.running_mean.view([1, self.input_shape[0]]).expand_as(x)
current_var = self.running_var.view([1, self.input_shape[0]]).expand_as(x)
else:
raise RuntimeError(f"RunningMeanStd input shape {self.input_shape} not supported")
else:
current_mean = self.running_mean
current_var = self.running_var
μ = current_mean.float()
σ2 = current_var.float()
σ = torch.sqrt(σ2 + self.eps)
clip = self.clip
if self.norm_only:
if denormalize:
x.mul_(σ)
else:
x.mul_(1 / σ)
else:
if denormalize:
x.clamp_(-clip, clip).mul_(σ).add_(μ)
else:
x.sub_(μ).mul_(1 / σ).clamp_(-clip, clip)
class RunningMeanStdDictInPlace(nn.Module):
def __init__(
self,
obs_space: gym.spaces.Dict,
keys_to_normalize: Optional[List[str]] = None,
epsilon=_NORM_EPS,
clip=_DEFAULT_CLIP,
per_channel=False,
norm_only=False,
):
super(RunningMeanStdDictInPlace, self).__init__()
self.obs_space: Final = obs_space
self.running_mean_std = nn.ModuleDict(
{
k: RunningMeanStdInPlace(space.shape, epsilon, clip, per_channel, norm_only)
for k, space in obs_space.spaces.items()
if keys_to_normalize is None or k in keys_to_normalize
}
)
def forward(self, x: Dict[str, Tensor]) -> None:
"""Normalize in-place!"""
for k, module in self.running_mean_std.items():
module(x[k])
def running_mean_std_summaries(running_mean_std_module: Union[nn.Module, ScriptModule, RecursiveScriptModule]):
m = running_mean_std_module
res = dict()
for name, buf in m.named_buffers():
# converts MODULE_NAME.running_mean_std.obs.running_mean to obs.running_mean
name = "_".join(name.split(".")[-2:])
if name.endswith("running_mean"):
res[name] = buf.float().mean()
elif name.endswith("running_var"):
res[name.replace("_var", "_std")] = torch.sqrt(buf.float() + _NORM_EPS).mean()
return res
|
/sample-factory-2.1.1.tar.gz/sample-factory-2.1.1/sample_factory/algo/utils/running_mean_std.py
| 0.932515 | 0.524821 |
running_mean_std.py
|
pypi
|
import math
import gymnasium as gym
import numpy as np
import torch
from torch import Tensor
from torch.distributions import Independent, Normal
from torch.nn import functional
from sample_factory.utils.typing import ActionSpace
from sample_factory.utils.utils import log
def calc_num_actions(action_space):
if isinstance(action_space, gym.spaces.Discrete):
return 1
elif isinstance(action_space, gym.spaces.Tuple):
return sum([calc_num_actions(a) for a in action_space])
elif isinstance(action_space, gym.spaces.Box):
if len(action_space.shape) != 1:
raise Exception("Non-trivial shape Box action spaces not currently supported. Try to flatten the space.")
return action_space.shape[0]
else:
raise NotImplementedError(f"Action space type {type(action_space)} not supported!")
def calc_num_action_parameters(action_space: ActionSpace) -> int:
"""Returns the number of paramaters required to represent the given action space."""
if isinstance(action_space, gym.spaces.Discrete):
return action_space.n
elif isinstance(action_space, gym.spaces.Tuple):
return sum([calc_num_action_parameters(a) for a in action_space])
elif isinstance(action_space, gym.spaces.Box):
# one mean and one standard deviation for every action
return np.prod(action_space.shape) * 2
else:
raise NotImplementedError(f"Action space type {type(action_space)} not supported!")
def is_continuous_action_space(action_space: ActionSpace) -> bool:
return isinstance(action_space, gym.spaces.Box)
def get_action_distribution(action_space, raw_logits):
"""
Create the distribution object based on provided action space and unprocessed logits.
:param action_space: Gym action space object
:param raw_logits: this function expects unprocessed raw logits (not after log-softmax!)
:return: action distribution that you can sample from
"""
assert calc_num_action_parameters(action_space) == raw_logits.shape[-1]
if isinstance(action_space, gym.spaces.Discrete):
return CategoricalActionDistribution(raw_logits)
elif isinstance(action_space, gym.spaces.Tuple):
return TupleActionDistribution(action_space, logits_flat=raw_logits)
elif isinstance(action_space, gym.spaces.Box):
return ContinuousActionDistribution(params=raw_logits)
else:
raise NotImplementedError(f"Action space type {type(action_space)} not supported!")
def sample_actions_log_probs(distribution):
if isinstance(distribution, TupleActionDistribution):
return distribution.sample_actions_log_probs()
else:
actions = distribution.sample()
log_prob_actions = distribution.log_prob(actions)
return actions, log_prob_actions
def argmax_actions(distribution):
if isinstance(distribution, TupleActionDistribution):
return distribution.argmax()
elif hasattr(distribution, "probs"):
return torch.argmax(distribution.probs, dim=-1)
elif hasattr(distribution, "means"):
return distribution.means
else:
raise NotImplementedError(f"Action distribution type {type(distribution)} does not support argmax!")
# noinspection PyAbstractClass
class CategoricalActionDistribution:
def __init__(self, raw_logits):
"""
Ctor.
:param raw_logits: unprocessed logits, typically an output of a fully-connected layer
"""
self.raw_logits = raw_logits
self.log_p = self.p = None
@property
def probs(self):
if self.p is None:
self.p = functional.softmax(self.raw_logits, dim=-1)
return self.p
@property
def log_probs(self):
if self.log_p is None:
self.log_p = functional.log_softmax(self.raw_logits, dim=-1)
return self.log_p
def sample_gumbel(self):
sample = torch.argmax(self.raw_logits - torch.empty_like(self.raw_logits).exponential_().log_(), -1)
return sample
def sample(self):
samples = torch.multinomial(self.probs, 1, True)
return samples
def log_prob(self, value):
value = value.long()
log_probs = torch.gather(self.log_probs, -1, value).view(-1)
return log_probs
def entropy(self):
p_log_p = self.log_probs * self.probs
return -p_log_p.sum(-1)
def _kl(self, other_log_probs):
probs, log_probs = self.probs, self.log_probs
kl = probs * (log_probs - other_log_probs)
kl = kl.sum(dim=-1)
return kl
def _kl_inverse(self, other_log_probs):
kl = torch.exp(other_log_probs) * (other_log_probs - self.log_probs)
kl = kl.sum(dim=-1)
return kl
def _kl_symmetric(self, other_log_probs):
return 0.5 * (self._kl(other_log_probs) + self._kl_inverse(other_log_probs))
def symmetric_kl_with_uniform_prior(self):
probs, log_probs = self.probs, self.log_probs
num_categories = log_probs.shape[-1]
uniform_prob = 1 / num_categories
log_uniform_prob = math.log(uniform_prob)
return 0.5 * (
(probs * (log_probs - log_uniform_prob)).sum(dim=-1)
+ (uniform_prob * (log_uniform_prob - log_probs)).sum(dim=-1)
)
def kl_divergence(self, other):
return self._kl(other.log_probs)
def dbg_print(self):
dbg_info = dict(
entropy=self.entropy().mean(),
min_logit=self.raw_logits.min(),
max_logit=self.raw_logits.max(),
min_prob=self.probs.min(),
max_prob=self.probs.max(),
)
msg = ""
for key, value in dbg_info.items():
msg += f"{key}={value.cpu().item():.3f} "
log.debug(msg)
class TupleActionDistribution:
"""
Basically, a tuple of independent action distributions.
Useful when the environment requires multiple independent action heads, e.g.:
- moving in the environment
- selecting a weapon
- jumping
- strafing
Empirically, it seems to be better to represent such an action distribution as a tuple of independent action
distributions, rather than a one-hot over potentially big cartesian product of all action spaces, like it's
usually done in Atari.
Entropy of such a distribution is just a sum of entropies of individual distributions.
"""
def __init__(self, action_space, logits_flat):
self.logit_lengths = [calc_num_action_parameters(s) for s in action_space.spaces]
self.split_logits = torch.split(logits_flat, self.logit_lengths, dim=1)
self.action_lengths = [calc_num_actions(s) for s in action_space.spaces]
assert len(self.split_logits) == len(action_space.spaces)
self.distributions = []
for i, space in enumerate(action_space.spaces):
self.distributions.append(get_action_distribution(space, self.split_logits[i]))
@staticmethod
def _flatten_actions(list_of_action_batches):
batch_of_action_tuples = torch.cat(list_of_action_batches, 1)
return batch_of_action_tuples
def _calc_log_probs(self, list_of_action_batches):
# calculate batched log probs for every distribution
log_probs = [d.log_prob(a) for d, a in zip(self.distributions, list_of_action_batches)]
log_probs = [lp.unsqueeze(dim=1) for lp in log_probs]
# concatenate and calculate sum of individual log-probs
# this is valid under the assumption that action distributions are independent
log_probs = torch.cat(log_probs, dim=1)
log_probs = log_probs.sum(dim=1)
return log_probs
def sample_actions_log_probs(self):
list_of_action_batches = [d.sample() for d in self.distributions]
batch_of_action_tuples = self._flatten_actions(list_of_action_batches)
log_probs = self._calc_log_probs(list_of_action_batches)
return batch_of_action_tuples, log_probs
def sample(self):
list_of_action_batches = [d.sample() for d in self.distributions]
return self._flatten_actions(list_of_action_batches)
def argmax(self):
list_of_action_batches = [argmax_actions(d) for d in self.distributions]
return torch.cat(list_of_action_batches).unsqueeze(0)
def log_prob(self, actions):
# split into batches of actions from individual distributions
list_of_action_batches = torch.split(actions, self.action_lengths, dim=1)
log_probs = self._calc_log_probs(list_of_action_batches)
return log_probs
def entropy(self):
entropies = [d.entropy().unsqueeze(dim=1) for d in self.distributions]
entropies = torch.cat(entropies, dim=1)
entropy = entropies.sum(dim=1)
return entropy
def kl_divergence(self, other):
kls = [d.kl_divergence(other_d).unsqueeze(dim=1) for d, other_d in zip(self.distributions, other.distributions)]
kls = torch.cat(kls, dim=1)
kl = kls.sum(dim=1)
return kl
def symmetric_kl_with_uniform_prior(self):
sym_kls = [d.symmetric_kl_with_uniform_prior().unsqueeze(dim=1) for d in self.distributions]
sym_kls = torch.cat(sym_kls, dim=1)
sym_kl = sym_kls.sum(dim=1)
return sym_kl
def dbg_print(self):
for d in self.distributions:
d.dbg_print()
# noinspection PyAbstractClass
class ContinuousActionDistribution(Independent):
stddev_min: float = 1e-4
stddev_max: float = 1e4
def __init__(self, params):
self.means, self.log_std, self.stddevs = self._init_impl(params, self.stddev_min, self.stddev_max)
normal_dist = Normal(self.means, self.stddevs, validate_args=False)
super().__init__(normal_dist, 1, validate_args=False)
@staticmethod
@torch.jit.script
def _init_impl(params: Tensor, stddev_min: float, stddev_max: float):
# using torch.chunk here is slightly faster than plain indexing
means, log_std = torch.chunk(params, 2, dim=1)
stddevs = log_std.exp()
stddevs = torch.clamp(stddevs, stddev_min, stddev_max)
return means, log_std, stddevs
def kl_divergence(self, other):
kl = torch.distributions.kl.kl_divergence(self, other)
return kl
def summaries(self):
return dict(
action_mean=self.means.mean(),
action_mean_min=self.means.min(),
action_mean_max=self.means.max(),
action_log_std_mean=self.log_std.mean(),
action_log_std_min=self.log_std.min(),
action_log_std_max=self.log_std.max(),
action_stddev_mean=self.stddev.mean(),
action_stddev_min=self.stddev.min(),
action_stddev_max=self.stddev.max(),
)
|
/sample-factory-2.1.1.tar.gz/sample-factory-2.1.1/sample_factory/algo/utils/action_distributions.py
| 0.882713 | 0.545225 |
action_distributions.py
|
pypi
|
from __future__ import annotations
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
import gymnasium as gym
import numpy as np
import torch
from gymnasium import Wrapper, spaces
from gymnasium.core import ActType, ObsType
from torch import Tensor
from sample_factory.algo.utils.tensor_utils import dict_of_lists_cat
from sample_factory.envs.create_env import create_env
from sample_factory.envs.env_utils import (
RewardShapingInterface,
TrainingInfoInterface,
find_training_info_interface,
find_wrapper_interface,
)
from sample_factory.utils.dicts import dict_of_lists_append, list_of_dicts_to_dict_of_lists
from sample_factory.utils.typing import Config
Actions = Any
ListActions = Sequence[Actions]
TensorActions = Tensor
SeqBools = Sequence[bool]
DictObservations = Dict[str, Any]
DictOfListsObservations = Dict[str, Sequence[Any]]
DictOfTensorObservations = Dict[str, Tensor]
ListObservations = Sequence[Any]
ListOfDictObservations = Sequence[DictObservations]
def get_multiagent_info(env: Any) -> Tuple[bool, int]:
num_agents = env.num_agents if hasattr(env, "num_agents") else 1
is_multiagent = env.is_multiagent if hasattr(env, "is_multiagent") else num_agents > 1
assert is_multiagent or num_agents == 1, f"Invalid configuration: {is_multiagent=} and {num_agents=}"
return is_multiagent, num_agents
def is_multiagent_env(env: Any) -> bool:
is_multiagent, num_agents = get_multiagent_info(env)
return is_multiagent
class _DictObservationsWrapper(Wrapper):
def __init__(self, env):
super().__init__(env)
is_multiagent, num_agents = get_multiagent_info(env)
self.is_multiagent: bool = is_multiagent
self.num_agents: int = num_agents
self.observation_space: gym.spaces.Dict = gym.spaces.Dict(dict(obs=self.observation_space))
class BatchedDictObservationsWrapper(_DictObservationsWrapper):
"""Guarantees that the environment returns observations as dictionaries of lists (batches)."""
def reset(self, **kwargs):
obs, info = self.env.reset(**kwargs)
return dict(obs=obs), info
def step(self, action):
obs, rew, terminated, truncated, info = self.env.step(action)
return dict(obs=obs), rew, terminated, truncated, info
class BatchedMultiAgentWrapper(Wrapper):
"""Assumes wrapped environment has dictionary obs space."""
def __init__(self, env):
super().__init__(env)
self.num_agents: int = 1
self.is_multiagent: bool = True
assert isinstance(env.observation_space, spaces.Dict), "Wrapped environment must have dictionary obs space."
self.obs_dict = {}
def _obs(self, obs: Dict) -> DictOfListsObservations:
for key, value in obs.items():
self.obs_dict[key] = [value]
return self.obs_dict
def reset(self, **kwargs) -> Tuple[DictOfListsObservations, Sequence[Dict]]:
obs, info = self.env.reset(**kwargs)
return self._obs(obs), [info]
def step(self, action) -> Tuple[DictOfListsObservations, Sequence, SeqBools, SeqBools, Sequence[Dict]]:
action = action[0]
obs, rew, terminated, truncated, info = self.env.step(action)
if terminated | truncated: # auto-resetting
obs, info["reset_info"] = self.env.reset()
return self._obs(obs), [rew], [terminated], [truncated], [info]
class NonBatchedMultiAgentWrapper(Wrapper):
"""
This wrapper allows us to treat a single-agent environment as multi-agent with 1 agent.
That is, the data (obs, rewards, etc.) is converted into lists of length 1
"""
def __init__(self, env):
super().__init__(env)
self.num_agents: int = 1
self.is_multiagent: bool = True
def reset(self, **kwargs) -> ListObservations:
obs, info = self.env.reset(**kwargs)
return [obs], [info]
def step(self, action: ListActions) -> Tuple[ListObservations, Sequence, SeqBools, SeqBools, Sequence[Dict]]:
action = action[0]
obs, rew, terminated, truncated, info = self.env.step(action)
if terminated or truncated: # auto-resetting
obs, info["reset_info"] = self.env.reset()
return [obs], [rew], [terminated], [truncated], [info]
class NonBatchedDictObservationsWrapper(_DictObservationsWrapper):
"""Guarantees that the environment returns observations as lists of dictionaries."""
def reset(self, **kwargs) -> ListOfDictObservations:
obs, info = self.env.reset(**kwargs)
return [dict(obs=o) for o in obs], info
def step(self, action: ListActions) -> Tuple[ListOfDictObservations, Any, Any, Any, Any]:
obs, rew, terminated, truncated, info = self.env.step(action)
return [dict(obs=o) for o in obs], rew, terminated, truncated, info
class BatchedListToDictWrapper(Wrapper):
def reset(self, **kwargs):
obs, info = self.env.reset(**kwargs)
if isinstance(obs, list):
return list_of_dicts_to_dict_of_lists(obs), info
return obs, info
def step(self, action):
obs, rew, terminated, truncated, info = self.env.step(action)
if isinstance(obs, list):
return list_of_dicts_to_dict_of_lists(obs), rew, terminated, truncated, info
return obs, rew, terminated, truncated, info
class BatchedVecEnv(Wrapper):
"""Ensures that the env returns a dictionary of tensors for observations, and tensors for rewards and dones."""
ConvertFunc = Callable[[Any], Tensor]
def __init__(self, env):
if not isinstance(env.observation_space, spaces.Dict):
env = BatchedDictObservationsWrapper(env)
if not is_multiagent_env(env):
env = BatchedMultiAgentWrapper(env)
else:
env = BatchedListToDictWrapper(env)
is_multiagent, num_agents = get_multiagent_info(env)
self.is_multiagent: bool = is_multiagent
self.num_agents: int = num_agents
self._convert_obs_func: Dict[str, BatchedVecEnv.ConvertFunc] = dict()
self._convert_rew_func = self._convert_terminated_func = self._convert_truncated_func = None
self._seed: Optional[int] = None
self._seeded: bool = False
super().__init__(env)
def _convert(self, obs: Dict[str, Any]) -> DictOfTensorObservations:
result = dict()
for key, value in obs.items():
result[key] = self._convert_obs_func[key](value)
return result
@staticmethod
def _get_convert_func(x: Union[Tensor, np.ndarray, List, Tuple]) -> ConvertFunc:
"""Depending on type of x, determines the conversion function from x to a tensor."""
if isinstance(x, Tensor):
return lambda x_: x_ # do nothing
elif isinstance(x, np.ndarray):
return lambda x_: torch.from_numpy(x_)
elif isinstance(x, (list, tuple)):
if isinstance(x[0], np.ndarray) or isinstance(x[0], (list, tuple)):
# creating a tensor from a list of numpy.ndarrays is extremely slow
# so we first create a numpy array which is then converted to a tensor
return lambda x_: torch.tensor(np.array(x_))
elif isinstance(x[0], Tensor):
return lambda x_: torch.tensor(x_)
else:
# just make a tensor and hope for the best
# leave it like this for now, we can add more cases later if we need to
return lambda x_: torch.tensor(x_)
else:
raise RuntimeError(f"Cannot convert data type {type(x)} to torch.Tensor")
def seed(self, seed: Optional[int] = None):
"""
Since Gym 0.26 seeding is done in reset().
Sample Factory uses its own wrappers around gym.Env so we just keep this function and forward the seed to
the first reset() if needed.
"""
self._seed = seed
def reset(self, **kwargs) -> Tuple[DictOfTensorObservations, Dict]:
if not self._seeded and self._seed is not None:
kwargs["seed"] = self._seed
self._seeded = True
obs, info = self.env.reset(**kwargs)
assert isinstance(obs, dict)
for key, value in obs.items():
if key not in self._convert_obs_func:
self._convert_obs_func[key] = self._get_convert_func(value)
return self._convert(obs), info
def step(self, action: TensorActions) -> Tuple[DictOfTensorObservations, Tensor, Tensor, Tensor, Dict]:
obs, rew, terminated, truncated, infos = self.env.step(action)
obs = self._convert(obs)
if not self._convert_rew_func:
# the only way to reliably find out the format of data is to actually look what the environment returns
# noinspection PyTypeChecker
self._convert_rew_func = self._get_convert_func(rew)
# noinspection PyTypeChecker
self._convert_terminated_func = self._get_convert_func(terminated)
# noinspection PyTypeChecker
self._convert_truncated_func = self._get_convert_func(truncated)
rew = self._convert_rew_func(rew)
terminated = self._convert_terminated_func(terminated)
truncated = self._convert_truncated_func(truncated)
return obs, rew, terminated, truncated, infos
class SequentialVectorizeWrapper(Wrapper, TrainingInfoInterface, RewardShapingInterface):
"""Vector interface for multiple environments simulated sequentially on one worker."""
def __init__(self, envs: Sequence):
Wrapper.__init__(self, envs[0])
TrainingInfoInterface.__init__(self)
self.single_env_agents = envs[0].num_agents
assert all(
e.num_agents == self.single_env_agents for e in envs
), f"Expect all envs to have the same number of agents {self.single_env_agents}"
self.envs = envs
self.num_agents = self.single_env_agents * len(envs)
self.obs = self.rew = self.terminated = self.truncated = self.infos = None
self.training_info_interfaces: Optional[List[TrainingInfoInterface]] = []
self.reward_shaping_interfaces: Optional[List[RewardShapingInterface]] = []
for env in envs:
env_train_info = find_training_info_interface(env)
if env_train_info is None:
self.training_info_interfaces = None
break
else:
self.training_info_interfaces.append(env_train_info)
env_rew_shaping = find_wrapper_interface(env, RewardShapingInterface)
if env_rew_shaping is None:
self.reward_shaping_interfaces = None
break
else:
self.reward_shaping_interfaces.append(env_rew_shaping)
def reset(self, **kwargs) -> Tuple[Dict, List[Dict]]:
infos = []
self.obs = dict()
for e in self.envs:
obs, info = e.reset(**kwargs)
dict_of_lists_append(self.obs, obs)
infos.extend(info)
dict_of_lists_cat(self.obs)
return self.obs, infos
def step(self, actions: Tensor):
infos = []
ofs = 0
next_ofs = self.single_env_agents
for i, e in enumerate(self.envs):
idx = slice(ofs, next_ofs)
env_actions = actions[idx]
obs, rew, terminated, truncated, info = e.step(env_actions)
# TODO: test if this works for multi-agent envs
for key, x in obs.items():
self.obs[key][idx] = x
if self.rew is None:
self.rew = rew.repeat(len(self.envs))
self.terminated = terminated.repeat(len(self.envs))
self.truncated = truncated.repeat(len(self.envs))
self.rew[idx] = rew
self.terminated[idx] = terminated
self.truncated[idx] = truncated
infos.extend(info)
ofs += self.single_env_agents
next_ofs += self.single_env_agents
return self.obs, self.rew, self.terminated, self.truncated, infos
def set_training_info(self, training_info: Dict) -> None:
if self.training_info_interfaces is None:
return
for env_train_info in self.training_info_interfaces:
env_train_info.set_training_info(training_info)
def get_default_reward_shaping(self) -> Optional[Dict[str, Any]]:
if self.reward_shaping_interfaces is not None:
return self.reward_shaping_interfaces[0].get_default_reward_shaping()
else:
return None
def set_reward_shaping(self, reward_shaping: Dict[str, Any], agent_indices: int | slice) -> None:
assert isinstance(agent_indices, slice)
for agent_idx in range(agent_indices.start, agent_indices.stop):
env_idx = agent_idx // self.single_env_agents
env_agent_idx = agent_idx % self.single_env_agents
self.reward_shaping_interfaces[env_idx].set_reward_shaping(reward_shaping, env_agent_idx)
def close(self):
for e in self.envs:
e.close()
def make_env_func_batched(cfg, env_config, render_mode: Optional[str] = None) -> BatchedVecEnv:
"""
This should yield an environment that always returns a dict of PyTorch tensors (CPU- or GPU-side) or
a dict of numpy arrays or a dict of lists (depending on what the environment returns in the first place).
"""
env = create_env(cfg.env, cfg=cfg, env_config=env_config, render_mode=render_mode)
# At this point we can be sure that our environment outputs a dictionary of lists (or numpy arrays or tensors)
# containing obs, rewards, etc. for each agent in the environment.
# Now we just want the environment to return a tensor dict for observations and tensors for rewards and dones.
# We leave infos intact for now, because format of infos isn't really specified and can be inconsistent between
# timesteps.
env = BatchedVecEnv(env)
return env
class NonBatchedVecEnv(Wrapper):
"""
reset() returns a list of dict observations.
step(action) returns a list of dict observations, list of rewards, list of dones, list of infos.
"""
def __init__(self, env):
if not is_multiagent_env(env):
env = NonBatchedMultiAgentWrapper(env)
if not isinstance(env.observation_space, spaces.Dict):
env = NonBatchedDictObservationsWrapper(env)
is_multiagent, num_agents = get_multiagent_info(env)
self.is_multiagent: bool = is_multiagent
self.num_agents: int = num_agents
super().__init__(env)
def make_env_func_non_batched(cfg: Config, env_config, render_mode: Optional[str] = None) -> NonBatchedVecEnv:
"""
This should yield an environment that always returns a list of {observations, rewards,
dones, etc.}
This is for the non-batched sampler which processes each agent's data independently without any vectorization
(and therefore enables more sophisticated configurations where agents in the same env can be controlled
by different policies and so on).
"""
env = create_env(cfg.env, cfg=cfg, env_config=env_config, render_mode=render_mode)
env = NonBatchedVecEnv(env)
return env
|
/sample-factory-2.1.1.tar.gz/sample-factory-2.1.1/sample_factory/algo/utils/make_env.py
| 0.958421 | 0.581838 |
make_env.py
|
pypi
|
import random
import numpy as np
from sample_factory.algo.utils.env_info import EnvInfo
from sample_factory.algo.utils.rl_utils import total_num_envs
from sample_factory.utils.typing import Config
class AgentPolicyMapping:
"""
This class currently implements the most simple mapping between agents in the envs and their associated policies.
We just pick a random policy from the population for every agent at the beginning of the episode.
Methods of this class can potentially be overloaded to provide a more clever mapping, e.g. we can minimize the
number of different policies per rollout worker thus minimizing the amount of communication required.
TODO: would be nice to have a mechanism to provide custom mappings through API.
"""
def __init__(self, cfg: Config, env_info: EnvInfo):
self.rng = np.random.RandomState(seed=random.randint(0, 2**32 - 1))
self.num_agents = env_info.num_agents
self.num_policies = cfg.num_policies
self.mix_policies_in_one_env = (
cfg.pbt_mix_policies_in_one_env if hasattr(cfg, "pbt_mix_policies_in_one_env") else False
) # TODO
self.resample_env_policy_every = 10 # episodes
self.env_policies = dict()
self.env_policy_requests = dict()
total_envs = total_num_envs(cfg)
self.sync_mode = not cfg.async_rl
if self.sync_mode:
assert total_envs % self.num_policies == 0, f"{total_envs=} must be divisible by {self.num_policies=}"
def get_policy_for_agent(self, agent_idx: int, env_idx: int, global_env_idx: int) -> int:
if self.sync_mode:
# env_id here is a global index of the policy
# deterministic mapping ensures we always collect the same amount of experience per policy per iteration
# Sync mode is an experimental feature. This code can be further improved to allow more sophisticated
# agent-policy mapping.
return global_env_idx % self.num_policies
num_requests = self.env_policy_requests.get(env_idx, 0)
# extra cheeky flag to make sure this code executes early in the training so we spot any potential problems
early_in_the_training = num_requests < 5
if num_requests % (self.num_agents * self.resample_env_policy_every) == 0 or early_in_the_training:
if self.mix_policies_in_one_env:
self.env_policies[env_idx] = [self._sample_policy() for _ in range(self.num_agents)]
else:
policy = self._sample_policy()
self.env_policies[env_idx] = [policy] * self.num_agents
self.env_policy_requests[env_idx] = num_requests + 1
return self.env_policies[env_idx][agent_idx]
def _sample_policy(self):
return self.rng.randint(0, self.num_policies)
|
/sample-factory-2.1.1.tar.gz/sample-factory-2.1.1/sample_factory/algo/utils/agent_policy_mapping.py
| 0.41253 | 0.33406 |
agent_policy_mapping.py
|
pypi
|
import os
import cv2
import numpy as np
from huggingface_hub import HfApi, Repository, repocard, upload_folder
from sample_factory.utils.typing import Config
from sample_factory.utils.utils import log, project_tmp_dir
MIN_FRAME_SIZE = 180
def generate_replay_video(dir_path: str, frames: list, fps: int, cfg: Config):
video_fname = "replay.mp4" if cfg.video_name is None else cfg.video_name
if not video_fname.endswith(".mp4"):
video_fname += ".mp4"
tmp_name = os.path.join(project_tmp_dir(), video_fname)
video_name = os.path.join(dir_path, video_fname)
if frames[0].shape[0] == 3:
frame_size = (frames[0].shape[2], frames[0].shape[1])
else:
frame_size = (frames[0].shape[1], frames[0].shape[0])
resize = False
if min(frame_size) < MIN_FRAME_SIZE:
resize = True
scaling_factor = MIN_FRAME_SIZE / min(frame_size)
frame_size = (int(frame_size[0] * scaling_factor), int(frame_size[1] * scaling_factor))
video = cv2.VideoWriter(tmp_name, cv2.VideoWriter_fourcc(*"mp4v"), fps, frame_size)
for frame in frames:
if frame.shape[0] == 3:
frame = frame.transpose(1, 2, 0)
if resize:
frame = cv2.resize(frame, frame_size, interpolation=cv2.INTER_AREA)
video.write(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
video.release()
os.system(f"ffmpeg -y -i {tmp_name} -vcodec libx264 {video_name}")
log.debug(f"Replay video saved to {video_name}!")
def generate_model_card(
dir_path: str,
algo: str,
env: str,
repo_id: str,
rewards: list = None,
enjoy_name: str = None,
train_name: str = None,
):
readme_path = os.path.join(dir_path, "README.md")
repo_name = repo_id.split("/")[1]
readme = f"""
A(n) **{algo}** model trained on the **{env}** environment.\n
This model was trained using Sample-Factory 2.0: https://github.com/alex-petrenko/sample-factory.
Documentation for how to use Sample-Factory can be found at https://www.samplefactory.dev/\n\n
## Downloading the model\n
After installing Sample-Factory, download the model with:
```
python -m sample_factory.huggingface.load_from_hub -r {repo_id}
```\n
"""
if enjoy_name is None:
enjoy_name = "<path.to.enjoy.module>"
readme += f"""
## Using the model\n
To run the model after download, use the `enjoy` script corresponding to this environment:
```
python -m {enjoy_name} --algo={algo} --env={env} --train_dir=./train_dir --experiment={repo_name}
```
\n
You can also upload models to the Hugging Face Hub using the same script with the `--push_to_hub` flag.
See https://www.samplefactory.dev/10-huggingface/huggingface/ for more details
"""
if train_name is None:
train_name = "<path.to.train.module>"
readme += f"""
## Training with this model\n
To continue training with this model, use the `train` script corresponding to this environment:
```
python -m {train_name} --algo={algo} --env={env} --train_dir=./train_dir --experiment={repo_name} --restart_behavior=resume --train_for_env_steps=10000000000
```\n
Note, you may have to adjust `--train_for_env_steps` to a suitably high number as the experiment will resume at the number of steps it concluded at.
"""
with open(readme_path, "w", encoding="utf-8") as f:
f.write(readme)
metadata = {}
metadata["library_name"] = "sample-factory"
metadata["tags"] = [
"deep-reinforcement-learning",
"reinforcement-learning",
"sample-factory",
]
if rewards is not None:
mean_reward = np.mean(rewards)
std_reward = np.std(rewards)
eval = repocard.metadata_eval_result(
model_pretty_name=algo,
task_pretty_name="reinforcement-learning",
task_id="reinforcement-learning",
metrics_pretty_name="mean_reward",
metrics_id="mean_reward",
metrics_value=f"{mean_reward:.2f} +/- {std_reward:.2f}",
dataset_pretty_name=env,
dataset_id=env,
)
metadata = {**metadata, **eval}
repocard.metadata_save(readme_path, metadata)
def push_to_hf(dir_path: str, repo_name: str):
repo_url = HfApi().create_repo(
repo_id=repo_name,
private=False,
exist_ok=True,
)
upload_folder(
repo_id=repo_name,
folder_path=dir_path,
path_in_repo=".",
ignore_patterns=[".git/*"],
)
log.info(f"The model has been pushed to {repo_url}")
def load_from_hf(dir_path: str, repo_id: str):
temp = repo_id.split("/")
repo_name = temp[1]
local_dir = os.path.join(dir_path, repo_name)
Repository(local_dir, repo_id)
log.info(f"The repository {repo_id} has been cloned to {local_dir}")
|
/sample-factory-2.1.1.tar.gz/sample-factory-2.1.1/sample_factory/huggingface/huggingface_utils.py
| 0.423816 | 0.494751 |
huggingface_utils.py
|
pypi
|
import collections
import dataclasses
import time
import typing
from collections import deque
from dataclasses import dataclass
from typing import Any, Optional, Union
import psutil
from sample_factory.algo.utils.misc import EPS
from sample_factory.utils.attr_dict import AttrDict
from sample_factory.utils.utils import log
class AvgTime:
def __init__(self, num_values_to_avg):
self.values = deque([], maxlen=num_values_to_avg)
def __str__(self):
avg_time = sum(self.values) / max(1, len(self.values))
return f"{avg_time:.4f}"
@dataclass
class TimingTreeNode:
self_time: Union[float, AvgTime] = 0
timing: typing.OrderedDict[str, Any] = dataclasses.field(default_factory=collections.OrderedDict)
# noinspection PyProtectedMember
class TimingContext:
def __init__(self, timing, key: str, additive=False, average=None):
super().__init__()
self.timing_tree_node: Optional[TimingTreeNode] = None
self._timing = timing
self._key = key
self._additive = additive
self._average = average
self._time_enter = None
self._time = 0
def set_tree_node(self, node):
self.timing_tree_node = node
def initial_value(self):
if self._average is not None:
return AvgTime(num_values_to_avg=self._average)
return 0.0
def _record_measurement(self, key, value):
if self._additive:
self._timing[key] += value
self.timing_tree_node.self_time += value
elif self._average is not None:
self._timing[key].values.append(value)
self.timing_tree_node.self_time.values.append(value)
else:
self._timing[key] = value
self.timing_tree_node.self_time = value
def __enter__(self):
self._time_enter = time.time()
self._timing._open_contexts_stack.append(self)
def __exit__(self, type_, value, traceback):
time_passed = max(time.time() - self._time_enter, EPS) # EPS to prevent div by zero
self._record_measurement(self._key, time_passed)
self._timing._open_contexts_stack.pop()
class Timing(AttrDict):
def __init__(self, name="Profile", *args, **kwargs):
super().__init__(*args, **kwargs)
self._name = name
self._root_context = TimingContext(self, "~")
self._root_context.set_tree_node(TimingTreeNode())
self._open_contexts_stack = [self._root_context]
def _push_context(self, ctx):
self._open_contexts_stack.append(ctx)
return ctx
def _init_context(self, key, *args, **kwargs):
ctx = TimingContext(self, key, *args, **kwargs)
if key not in self:
self[key] = ctx.initial_value()
parent_ctx = self._open_contexts_stack[-1]
parent_tree_node = parent_ctx.timing_tree_node
if key not in parent_tree_node.timing:
parent_tree_node.timing[key] = TimingTreeNode(ctx.initial_value())
ctx.set_tree_node(parent_tree_node.timing[key])
return ctx
def timeit(self, key):
return self._init_context(key)
def add_time(self, key):
return self._init_context(key, additive=True)
def time_avg(self, key, average=10):
return self._init_context(key, average=average)
@staticmethod
def _time_str(value):
return f"{value:.4f}" if isinstance(value, float) else str(value)
def flat_str(self):
# skip data members of Timing
skip_names = ["_root_context", "_open_contexts_stack"]
s = []
for key, value in self.items():
if key not in skip_names:
s.append(f"{key}: {self._time_str(value)}")
return ", ".join(s)
@classmethod
def _tree_str_func(cls, node: TimingTreeNode, depth: int):
indent = " " * 2 * depth
leaf_nodes = ((k, v) for k, v in node.timing.items() if not v.timing)
nonleaf_nodes = ((k, v) for k, v in node.timing.items() if v.timing)
def node_str(k, node_):
return f"{k}: {cls._time_str(node_.self_time)}"
tokens = []
for key, child_node in leaf_nodes:
tokens.append(node_str(key, child_node))
lines = []
if tokens:
lines.append(f'{indent}{", ".join(tokens)}')
for key, child_node in nonleaf_nodes:
lines.append(f"{indent}{node_str(key, child_node)}")
lines.extend(cls._tree_str_func(child_node, depth + 1))
return lines
def tree_str(self):
lines = [f"{self._name} tree view:"]
lines.extend(self._tree_str_func(self._root_context.timing_tree_node, 0))
return "\n".join(lines)
def __str__(self):
return self.tree_str()
def init_global_profiler(t):
"""This is for debugging purposes. Normally prefer to pass it around."""
global TIMING
log.info("Setting global profiler in process %r", psutil.Process())
TIMING = t
|
/sample-factory-2.1.1.tar.gz/sample-factory-2.1.1/sample_factory/utils/timing.py
| 0.826397 | 0.284626 |
timing.py
|
pypi
|
from __future__ import annotations
from typing import Any, Dict, Iterable, List, Optional, OrderedDict
def dict_of_lists_append(d: Dict[Any, List], new_data):
for key, x in new_data.items():
if key in d:
d[key].append(x)
else:
d[key] = [x]
def dict_of_lists_append_idx(d: Dict[Any, List], new_data, index):
for key, x in new_data.items():
if key in d:
d[key].append(x[index])
else:
d[key] = [x[index]]
def iterate_recursively(d):
"""
Generator for a dictionary that can potentially include other dictionaries.
Yields tuples of (dict, key, value), where key, value are "leaf" elements of the "dict".
"""
for k, v in d.items():
if isinstance(v, (dict, OrderedDict)):
yield from iterate_recursively(v)
else:
yield d, k, v
def iterate_recursively_with_prefix(d: Dict, prefix=None):
"""
Generator for a dictionary that can potentially include other dictionaries.
Yields tuples of (dict, key, value, prefix), where key, value are "leaf" elements of the "dict" and prefix is a
list of keys that lead to the current element (exluding the current key).
"""
if prefix is None:
prefix = []
for k, v in d.items():
if isinstance(v, (dict, OrderedDict)):
yield from iterate_recursively_with_prefix(v, prefix + [k])
else:
yield d, k, v, prefix
def copy_dict_structure(d):
"""Copy dictionary layout without copying the actual values (populated with Nones)."""
d_copy = type(d)()
_copy_dict_structure_func(d, d_copy)
return d_copy
def _copy_dict_structure_func(d, d_copy):
for key, value in d.items():
if isinstance(value, (dict, OrderedDict)):
d_copy[key] = type(value)()
_copy_dict_structure_func(value, d_copy[key])
else:
d_copy[key] = None
def iter_dicts_recursively(d1, d2):
"""
Assuming structure of d1 is strictly included into d2.
I.e. each key at each recursion level is also present in d2. This is also true when d1 and d2 have the same
structure.
"""
for k, v in d1.items():
assert k in d2
if isinstance(v, (dict, OrderedDict)):
yield from iter_dicts_recursively(d1[k], d2[k])
else:
yield d1, d2, k, d1[k], d2[k]
def list_of_dicts_to_dict_of_lists(list_of_dicts: List[Dict]) -> Dict[Any, List]:
if not list_of_dicts:
return dict()
res = copy_dict_structure(list_of_dicts[0])
for d in list_of_dicts:
for d1, d2, key, v1, v2 in iter_dicts_recursively(d, res):
if v2 is None:
d2[key] = [v1]
else:
d2[key].append(v1)
return res
def get_first_present(d: Dict, keys: Iterable, default: Optional[Any] = None) -> Optional[Any]:
for key in keys:
if key in d:
return d[key]
return default
|
/sample-factory-2.1.1.tar.gz/sample-factory-2.1.1/sample_factory/utils/dicts.py
| 0.742515 | 0.430088 |
dicts.py
|
pypi
|
from typing import Dict
import torch
from torch import nn
from sample_factory.algo.utils.misc import EPS
from sample_factory.algo.utils.running_mean_std import RunningMeanStdDictInPlace, running_mean_std_summaries
from sample_factory.utils.dicts import copy_dict_structure, iter_dicts_recursively
class ObservationNormalizer(nn.Module):
def __init__(self, obs_space, cfg):
super().__init__()
self.sub_mean = cfg.obs_subtract_mean
self.scale = cfg.obs_scale
self.running_mean_std = None
if cfg.normalize_input:
self.running_mean_std = RunningMeanStdDictInPlace(obs_space, cfg.normalize_input_keys)
self.should_sub_mean = abs(self.sub_mean) > EPS
self.should_scale = abs(self.scale - 1.0) > EPS
self.should_normalize = self.should_sub_mean or self.should_scale or self.running_mean_std is not None
@staticmethod
def _clone_tensordict(obs_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
obs_clone = copy_dict_structure(obs_dict) # creates an identical dict but with None values
for d, d_clone, k, x, _ in iter_dicts_recursively(obs_dict, obs_clone):
if x.dtype != torch.float:
# type conversion requires a copy, do this check to make sure we don't do it twice
d_clone[k] = x.float() # this will create a copy of a tensor
else:
d_clone[k] = x.clone() # otherwise, we explicitly clone it since normalization is in-place
return obs_clone
def forward(self, obs_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
if not self.should_normalize:
return obs_dict
with torch.no_grad():
# since we are creating a clone, it is safe to use in-place operations
obs_clone = self._clone_tensordict(obs_dict)
# subtraction of mean and scaling is only applied to default "obs"
# this should be modified for custom obs dicts
if self.should_sub_mean:
obs_clone["obs"].sub_(self.sub_mean)
if self.should_scale:
obs_clone["obs"].mul_(1.0 / self.scale)
if self.running_mean_std:
self.running_mean_std(obs_clone) # in-place normalization
return obs_clone
def summaries(self) -> Dict:
res = dict()
if self.running_mean_std:
res.update(running_mean_std_summaries(self.running_mean_std))
return res
|
/sample-factory-2.1.1.tar.gz/sample-factory-2.1.1/sample_factory/utils/normalize.py
| 0.934924 | 0.385028 |
normalize.py
|
pypi
|
from __future__ import annotations
from functools import wraps
from time import sleep
from typing import Any, Dict, Optional
from sample_factory.algo.utils.context import global_env_registry
from sample_factory.utils.typing import CreateEnvFunc
from sample_factory.utils.utils import is_module_available, log
def register_env(env_name: str, make_env_func: CreateEnvFunc) -> None:
"""
Register a callable that creates an environment.
This callable is called like:
make_env_func(full_env_name, cfg, env_config)
Where full_env_name is the name of the environment to be created, cfg is a namespace or AttrDict containing
necessary configuration parameters and env_config is an auxiliary dictionary containing information such as worker index on which the environment lives
(some envs may require this information)
env_name: name of the environment
make_env_func: callable that creates an environment
"""
env_registry = global_env_registry()
if env_name in env_registry:
log.warning(f"Environment {env_name} already registered, overwriting...")
assert callable(make_env_func), f"{make_env_func=} must be callable"
env_registry[env_name] = make_env_func
class EnvCriticalError(Exception):
pass
# TODO: move to their respective folders
def vizdoom_available():
return is_module_available("vizdoom")
def retry(exception_class=Exception, num_attempts=3, sleep_time=1):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
for i in range(num_attempts):
try:
return func(*args, **kwargs)
except exception_class as e:
if i == num_attempts - 1:
raise
else:
log.error("Failed with error %r, trying again", e)
sleep(sleep_time)
return wrapper
return decorator
def find_wrapper_interface(env, interface_type):
"""Unwrap the env until we find the wrapper that implements interface_type."""
unwrapped = env.unwrapped
while True:
if isinstance(env, interface_type):
return env
elif env == unwrapped:
return None # unwrapped all the way and didn't find the interface
else:
env = env.env # unwrap by one layer
class RewardShapingInterface:
def get_default_reward_shaping(self) -> Optional[Dict[str, Any]]:
"""Should return a dictionary of string:float key-value pairs defining the current reward shaping scheme."""
raise NotImplementedError
def set_reward_shaping(self, reward_shaping: Dict[str, Any], agent_idx: int | slice) -> None:
"""
Sets the new reward shaping scheme.
:param reward_shaping dictionary of string-float key-value pairs
:param agent_idx: integer agent index (for multi-agent envs). Can be a slice if we're training in batched mode
(set a single reward shaping scheme for a range of agents)
"""
raise NotImplementedError
def get_default_reward_shaping(env) -> Optional[Dict[str, Any]]:
"""
The current convention is that when the environment supports reward shaping, the env.unwrapped should contain
a reference to the object implementing RewardShapingInterface.
We use this object to get/set reward shaping schemes generated by PBT.
"""
reward_shaping_interface = find_wrapper_interface(env, RewardShapingInterface)
if reward_shaping_interface:
return reward_shaping_interface.get_default_reward_shaping()
return None
def set_reward_shaping(env, reward_shaping: Optional[Dict], agent_idx: int | slice) -> None:
if reward_shaping is None:
return
reward_shaping_interface = find_wrapper_interface(env, RewardShapingInterface)
if reward_shaping_interface:
reward_shaping_interface.set_reward_shaping(reward_shaping, agent_idx)
class TrainingInfoInterface:
def __init__(self):
self.training_info: Dict[str, Any] = dict()
def set_training_info(self, training_info):
"""
Send the training information to the environment, i.e. number of training steps so far.
Some environments rely on that i.e. to implement curricula.
:param training_info: dictionary containing information about the current training session. Guaranteed to
contain 'approx_total_training_steps' (approx because it lags a bit behind due to multiprocess synchronization)
"""
self.training_info = training_info
def find_training_info_interface(env):
"""Unwrap the env until we find the wrapper that implements TrainingInfoInterface."""
return find_wrapper_interface(env, TrainingInfoInterface)
def set_training_info(training_info_interface: Optional[TrainingInfoInterface], training_info: Dict[str, Any]) -> None:
if training_info_interface:
training_info_interface.set_training_info(training_info)
def num_env_steps(infos):
"""Calculate number of environment frames in a batch of experience."""
total_num_frames = 0
for info in infos:
total_num_frames += info.get("num_frames", 1)
return total_num_frames
|
/sample-factory-2.1.1.tar.gz/sample-factory-2.1.1/sample_factory/envs/env_utils.py
| 0.82573 | 0.428652 |
env_utils.py
|
pypi
|
from typing import Dict, List, Optional
import torch
from gymnasium import spaces
from torch import Tensor, nn
from sample_factory.algo.utils.torch_utils import calc_num_elements
from sample_factory.model.model_utils import ModelModule, create_mlp, model_device, nonlinearity
from sample_factory.utils.attr_dict import AttrDict
from sample_factory.utils.typing import Config, ObsSpace
from sample_factory.utils.utils import log
# noinspection PyMethodMayBeStatic,PyUnusedLocal
class Encoder(ModelModule):
def __init__(self, cfg: Config):
super().__init__(cfg)
def get_out_size(self) -> int:
raise NotImplementedError()
def model_to_device(self, device):
"""Default implementation, can be overridden in derived classes."""
self.to(device)
def device_for_input_tensor(self, input_tensor_name: str) -> Optional[torch.device]:
return model_device(self)
def type_for_input_tensor(self, input_tensor_name: str) -> torch.dtype:
return torch.float32
class MultiInputEncoder(Encoder):
def __init__(self, cfg: Config, obs_space: ObsSpace):
super().__init__(cfg)
self.obs_keys = list(sorted(obs_space.keys())) # always the same order
self.encoders = nn.ModuleDict()
out_size = 0
for obs_key in self.obs_keys:
shape = obs_space[obs_key].shape
if len(shape) == 1:
encoder_fn = MlpEncoder
elif len(shape) > 1:
encoder_fn = make_img_encoder
else:
raise NotImplementedError(f"Unsupported observation space {obs_space}")
self.encoders[obs_key] = encoder_fn(cfg, obs_space[obs_key])
out_size += self.encoders[obs_key].get_out_size()
self.encoder_out_size = out_size
def forward(self, obs_dict):
if len(self.obs_keys) == 1:
key = self.obs_keys[0]
return self.encoders[key](obs_dict[key])
encodings = []
for key in self.obs_keys:
x = self.encoders[key](obs_dict[key])
encodings.append(x)
return torch.cat(encodings, 1)
def get_out_size(self) -> int:
return self.encoder_out_size
class MlpEncoder(Encoder):
def __init__(self, cfg: Config, obs_space: ObsSpace):
super().__init__(cfg)
mlp_layers: List[int] = cfg.encoder_mlp_layers
self.mlp_head = create_mlp(mlp_layers, obs_space.shape[0], nonlinearity(cfg))
if len(mlp_layers) > 0:
self.mlp_head = torch.jit.script(self.mlp_head)
self.encoder_out_size = calc_num_elements(self.mlp_head, obs_space.shape)
def forward(self, obs: Tensor):
x = self.mlp_head(obs)
return x
def get_out_size(self) -> int:
return self.encoder_out_size
class ConvEncoderImpl(nn.Module):
"""
After we parse all the configuration and figure out the exact architecture of the model,
we devote a separate module to it to be able to use torch.jit.script (hopefully benefit from some layer
fusion).
"""
def __init__(self, obs_shape: AttrDict, conv_filters: List, extra_mlp_layers: List[int], activation: nn.Module):
super().__init__()
conv_layers = []
for layer in conv_filters:
if layer == "maxpool_2x2":
conv_layers.append(nn.MaxPool2d((2, 2)))
elif isinstance(layer, (list, tuple)):
inp_ch, out_ch, filter_size, stride = layer
conv_layers.append(nn.Conv2d(inp_ch, out_ch, filter_size, stride=stride))
conv_layers.append(activation)
else:
raise NotImplementedError(f"Layer {layer} not supported!")
self.conv_head = nn.Sequential(*conv_layers)
self.conv_head_out_size = calc_num_elements(self.conv_head, obs_shape)
self.mlp_layers = create_mlp(extra_mlp_layers, self.conv_head_out_size, activation)
def forward(self, obs: Tensor) -> Tensor:
x = self.conv_head(obs)
x = x.contiguous().view(-1, self.conv_head_out_size)
x = self.mlp_layers(x)
return x
class ConvEncoder(Encoder):
def __init__(self, cfg: Config, obs_space: ObsSpace):
super().__init__(cfg)
input_channels = obs_space.shape[0]
log.debug(f"{ConvEncoder.__name__}: {input_channels=}")
if cfg.encoder_conv_architecture == "convnet_simple":
conv_filters = [[input_channels, 32, 8, 4], [32, 64, 4, 2], [64, 128, 3, 2]]
elif cfg.encoder_conv_architecture == "convnet_impala":
conv_filters = [[input_channels, 16, 8, 4], [16, 32, 4, 2]]
elif cfg.encoder_conv_architecture == "convnet_atari":
conv_filters = [[input_channels, 32, 8, 4], [32, 64, 4, 2], [64, 64, 3, 1]]
else:
raise NotImplementedError(f"Unknown encoder architecture {cfg.encoder_conv_architecture}")
activation = nonlinearity(self.cfg)
extra_mlp_layers: List[int] = cfg.encoder_conv_mlp_layers
enc = ConvEncoderImpl(obs_space.shape, conv_filters, extra_mlp_layers, activation)
self.enc = torch.jit.script(enc)
self.encoder_out_size = calc_num_elements(self.enc, obs_space.shape)
log.debug(f"Conv encoder output size: {self.encoder_out_size}")
def get_out_size(self) -> int:
return self.encoder_out_size
def forward(self, obs: Tensor) -> Tensor:
return self.enc(obs)
class ResBlock(nn.Module):
def __init__(self, cfg, input_ch, output_ch):
super().__init__()
layers = [
nonlinearity(cfg),
nn.Conv2d(input_ch, output_ch, kernel_size=3, stride=1, padding=1), # padding SAME
nonlinearity(cfg),
nn.Conv2d(output_ch, output_ch, kernel_size=3, stride=1, padding=1), # padding SAME
]
self.res_block_core = nn.Sequential(*layers)
def forward(self, x: Tensor):
identity = x
out = self.res_block_core(x)
out = out + identity
return out
class ResnetEncoder(Encoder):
def __init__(self, cfg, obs_space):
super().__init__(cfg)
input_ch = obs_space.shape[0]
log.debug("Num input channels: %d", input_ch)
if cfg.encoder_conv_architecture == "resnet_impala":
# configuration from the IMPALA paper
resnet_conf = [[16, 2], [32, 2], [32, 2]]
else:
raise NotImplementedError(f"Unknown resnet architecture {cfg.encode_conv_architecture}")
curr_input_channels = input_ch
layers = []
for i, (out_channels, res_blocks) in enumerate(resnet_conf):
layers.extend(
[
nn.Conv2d(curr_input_channels, out_channels, kernel_size=3, stride=1, padding=1), # padding SAME
nn.MaxPool2d(kernel_size=3, stride=2, padding=1), # padding SAME
]
)
for j in range(res_blocks):
layers.append(ResBlock(cfg, out_channels, out_channels))
curr_input_channels = out_channels
activation = nonlinearity(cfg)
layers.append(activation)
self.conv_head = nn.Sequential(*layers)
self.conv_head_out_size = calc_num_elements(self.conv_head, obs_space.shape)
log.debug(f"Convolutional layer output size: {self.conv_head_out_size}")
self.mlp_layers = create_mlp(cfg.encoder_conv_mlp_layers, self.conv_head_out_size, activation)
# should we do torch.jit here?
self.encoder_out_size = calc_num_elements(self.mlp_layers, (self.conv_head_out_size,))
def forward(self, obs: Tensor):
x = self.conv_head(obs)
x = x.contiguous().view(-1, self.conv_head_out_size)
x = self.mlp_layers(x)
return x
def get_out_size(self) -> int:
return self.encoder_out_size
def make_img_encoder(cfg: Config, obs_space: ObsSpace) -> Encoder:
"""Make (most likely convolutional) encoder for image-based observations."""
if cfg.encoder_conv_architecture.startswith("convnet"):
return ConvEncoder(cfg, obs_space)
elif cfg.encoder_conv_architecture.startswith("resnet"):
return ResnetEncoder(cfg, obs_space)
else:
raise NotImplementedError(f"Unknown convolutional architecture {cfg.encoder_conv_architecture}")
def default_make_encoder_func(cfg: Config, obs_space: ObsSpace) -> Encoder:
"""
Analyze the observation space and create either a convolutional or an MLP encoder depending on
whether this is an image-based environment or environment with vector observations.
"""
# we only support dict observation spaces - envs with non-dict obs spaces use a wrapper
# main subspace used to determine the encoder type is called "obs". For envs with multiple subspaces,
# this function needs to be overridden (see vizdoom or dmlab encoders for example)
return MultiInputEncoder(cfg, obs_space)
|
/sample-factory-2.1.1.tar.gz/sample-factory-2.1.1/sample_factory/model/encoder.py
| 0.959269 | 0.512754 |
encoder.py
|
pypi
|
from abc import ABC
import torch
from torch import nn
from sample_factory.model.model_utils import ModelModule
from sample_factory.utils.typing import Config
class ModelCore(ModelModule, ABC):
def __init__(self, cfg: Config):
super().__init__(cfg)
self.core_output_size = -1 # to be overridden in derived classes
def get_out_size(self) -> int:
return self.core_output_size
class ModelCoreRNN(ModelCore):
def __init__(self, cfg, input_size):
super().__init__(cfg)
self.cfg = cfg
self.is_gru = False
if cfg.rnn_type == "gru":
self.core = nn.GRU(input_size, cfg.rnn_size, cfg.rnn_num_layers)
self.is_gru = True
elif cfg.rnn_type == "lstm":
self.core = nn.LSTM(input_size, cfg.rnn_size, cfg.rnn_num_layers)
else:
raise RuntimeError(f"Unknown RNN type {cfg.rnn_type}")
self.core_output_size = cfg.rnn_size
self.rnn_num_layers = cfg.rnn_num_layers
def forward(self, head_output, rnn_states):
is_seq = not torch.is_tensor(head_output)
if not is_seq:
head_output = head_output.unsqueeze(0)
if self.rnn_num_layers > 1:
rnn_states = rnn_states.view(rnn_states.size(0), self.cfg.rnn_num_layers, -1)
rnn_states = rnn_states.permute(1, 0, 2)
else:
rnn_states = rnn_states.unsqueeze(0)
if self.is_gru:
x, new_rnn_states = self.core(head_output, rnn_states.contiguous())
else:
h, c = torch.split(rnn_states, self.cfg.rnn_size, dim=2)
x, (h, c) = self.core(head_output, (h.contiguous(), c.contiguous()))
new_rnn_states = torch.cat((h, c), dim=2)
if not is_seq:
x = x.squeeze(0)
if self.rnn_num_layers > 1:
new_rnn_states = new_rnn_states.permute(1, 0, 2)
new_rnn_states = new_rnn_states.reshape(new_rnn_states.size(0), -1)
else:
new_rnn_states = new_rnn_states.squeeze(0)
return x, new_rnn_states
class ModelCoreIdentity(ModelCore):
"""A noop core (no recurrency)."""
def __init__(self, cfg, input_size):
super().__init__(cfg)
self.cfg = cfg
self.core_output_size = input_size
# noinspection PyMethodMayBeStatic
def forward(self, head_output, fake_rnn_states):
return head_output, fake_rnn_states
def default_make_core_func(cfg: Config, core_input_size: int) -> ModelCore:
if cfg.use_rnn:
core = ModelCoreRNN(cfg, core_input_size)
else:
core = ModelCoreIdentity(cfg, core_input_size)
return core
|
/sample-factory-2.1.1.tar.gz/sample-factory-2.1.1/sample_factory/model/core.py
| 0.919295 | 0.220259 |
core.py
|
pypi
|
from typing import Callable
from sample_factory.model.actor_critic import ActorCritic, default_make_actor_critic_func
from sample_factory.model.core import ModelCore, default_make_core_func
from sample_factory.model.decoder import Decoder, default_make_decoder_func
from sample_factory.model.encoder import Encoder, default_make_encoder_func
from sample_factory.utils.typing import ActionSpace, Config, ObsSpace
from sample_factory.utils.utils import log
MakeActorCriticFunc = Callable[[Config, ObsSpace, ActionSpace], ActorCritic]
MakeEncoderFunc = Callable[[Config, ObsSpace], Encoder]
MakeCoreFunc = Callable[[Config, int], ModelCore]
MakeDecoderFunc = Callable[[Config, int], Decoder]
class ModelFactory:
def __init__(self):
"""
Optional custom functions for creating parts of the model (encoders, decoders, etc.), or
even overriding the entire actor-critic with a custom model.
"""
self.make_actor_critic_func: MakeActorCriticFunc = default_make_actor_critic_func
# callables user can specify to generate parts of the policy
# the computational graph structure is:
# observations -> encoder -> core -> decoder -> actions
self.make_model_encoder_func: MakeEncoderFunc = default_make_encoder_func
self.make_model_core_func: MakeCoreFunc = default_make_core_func
self.make_model_decoder_func: MakeDecoderFunc = default_make_decoder_func
def register_actor_critic_factory(self, make_actor_critic_func: MakeActorCriticFunc):
"""
Override the default actor-critic with a custom model.
"""
log.debug(f"register_actor_critic_factory: {make_actor_critic_func}")
self.make_actor_critic_func = make_actor_critic_func
def register_encoder_factory(self, make_model_encoder_func: MakeEncoderFunc):
"""
Override the default encoder with a custom model.
The computational graph structure is: observations -> encoder -> core -> decoder -> actions
"""
log.debug(f"register_encoder_factory: {make_model_encoder_func}")
self.make_model_encoder_func = make_model_encoder_func
def register_model_core_factory(self, make_model_core_func: MakeCoreFunc):
"""
Override the default core with a custom model.
The computational graph structure is: observations -> encoder -> core -> decoder -> actions
"""
log.debug(f"register_model_core_factory: {make_model_core_func}")
self.make_model_core_func = make_model_core_func
def register_decoder_factory(self, make_model_decoder_func: MakeDecoderFunc):
"""
Override the default decoder with a custom model.
The computational graph structure is: observations -> encoder -> core -> decoder -> actions
"""
log.debug(f"register_decoder_factory: {make_model_decoder_func}")
self.make_model_decoder_func = make_model_decoder_func
|
/sample-factory-2.1.1.tar.gz/sample-factory-2.1.1/sample_factory/model/model_factory.py
| 0.899817 | 0.191252 |
model_factory.py
|
pypi
|
[](https://github.com/alex-petrenko/sample-factory/actions/workflows/test-ci.yml)
[](https://codecov.io/gh/alex-petrenko/sample-factory)
[](https://github.com/alex-petrenko/sample-factory/actions/workflows/pre-commit.yml)
[](https://samplefactory.dev)
[](https://github.com/psf/black)
[](https://pycqa.github.io/isort/)
[](https://github.com/alex-petrenko/sample-factory/blob/master/LICENSE)
[](https://pepy.tech/project/sample-factory)
[<img src="https://img.shields.io/discord/987232982798598164?label=discord">](https://discord.gg/BCfHWaSMkr)
<!-- [](https://results.pre-commit.ci/latest/github/wmFrank/sample-factory/master)-->
<!-- [](https://wakatime.com/badge/github/alex-petrenko/sample-factory)-->
# Sample Factory
High-throughput reinforcement learning codebase. Resources:
* **Paper:** https://arxiv.org/abs/2006.11751
* **Discord:** [https://discord.gg/BCfHWaSMkr](https://discord.gg/BCfHWaSMkr)
* **Twitter (for updates):** [@petrenko_ai](https://twitter.com/petrenko_ai)
[//]: # (* **Talk (circa 2021):** https://youtu.be/lLG17LKKSZc)
## What is Sample Factory?
Sample Factory is one of the fastest RL libraries
focused on very efficient synchronous and asynchronous implementations of policy gradients (PPO).
Sample Factory is thoroughly tested, used by many researchers and practitioners, and is actively maintained.
Our implementation is known to reach SOTA performance in a variety of domains while minimizing RL experiment training time and hardware requirements.
Clips below demonstrate ViZDoom, IsaacGym, DMLab-30, Megaverse, Mujoco, and Atari agents trained with Sample Factory:
<p align="middle">
<img src="https://github.com/alex-petrenko/sf_assets/blob/main/gifs/vizdoom.gif?raw=true" width="360" alt="VizDoom agents traned using Sample Factory 2.0">
<img src="https://github.com/alex-petrenko/sf_assets/blob/main/gifs/isaac.gif?raw=true" width="360" alt="IsaacGym agents traned using Sample Factory 2.0">
<br/>
<img src="https://github.com/alex-petrenko/sf_assets/blob/main/gifs/dmlab.gif?raw=true" width="380" alt="DMLab-30 agents traned using Sample Factory 2.0">
<img src="https://github.com/alex-petrenko/sf_assets/blob/main/gifs/megaverse.gif?raw=true" width="340" alt="Megaverse agents traned using Sample Factory 2.0">
<br/>
<img src="https://github.com/alex-petrenko/sf_assets/blob/main/gifs/mujoco.gif?raw=true" width="390" alt="Mujoco agents traned using Sample Factory 2.0">
<img src="https://github.com/alex-petrenko/sf_assets/blob/main/gifs/atari.gif?raw=true" width="330" alt="Atari agents traned using Sample Factory 2.0">
</p>
## Key features
* Highly optimized algorithm [architecture](06-architecture/overview.md) for maximum learning throughput
* [Synchronous and asynchronous](07-advanced-topics/sync-async.md) training regimes
* [Serial (single-process) mode](07-advanced-topics/serial-mode.md) for easy debugging
* Optimal performance in both CPU-based and [GPU-accelerated environments](09-environment-integrations/isaacgym.md)
* Single- & multi-agent training, self-play, supports [training multiple policies](07-advanced-topics/multi-policy-training.md) at once on one or many GPUs
* Population-Based Training ([PBT](07-advanced-topics/pbt.md))
* Discrete, continuous, hybrid action spaces
* Vector-based, image-based, dictionary observation spaces
* Automatically creates a model architecture by parsing action/observation space specification. Supports [custom model architectures](03-customization/custom-models.md)
* Library is designed to be imported into other projects, [custom environments](03-customization/custom-environments.md) are first-class citizens
* Detailed [WandB and Tensorboard summaries](05-monitoring/metrics-reference.md), [custom metrics](05-monitoring/custom-metrics.md)
* [HuggingFace 🤗 integration](10-huggingface/huggingface.md) (upload trained models and metrics to the Hub)
* [Multiple](09-environment-integrations/mujoco.md) [example](09-environment-integrations/atari.md) [environment](09-environment-integrations/vizdoom.md) [integrations](09-environment-integrations/dmlab.md) with tuned parameters and trained models
## Next steps
Check out the following guides to get started:
* [Installation](01-get-started/installation.md)
* [Basic Usage](01-get-started/basic-usage.md)
|
/sample-factory-2.1.1.tar.gz/sample-factory-2.1.1/docs/index.md
| 0.900456 | 0.792424 |
index.md
|
pypi
|
# MuJoCo
<video width="800" controls autoplay><source src="https://huggingface.co/datasets/edbeeching/sample_factory_videos/resolve/main/mujoco_grid_9.mp4" type="video/mp4"></video>
### Installation
Install Sample Factory with MuJoCo dependencies with PyPI:
```
pip install sample-factory[mujoco]
```
### Running Experiments
Run MuJoCo experiments with the scripts in `sf_examples.mujoco`.
The default parameters have been chosen to match CleanRL's results in the report below (please note
that we can achieve even faster training on a multi-core machine with more optimal parameters).
To train a model in the `Ant-v4` environment:
```
python -m sf_examples.mujoco.train_mujoco --algo=APPO --env=mujoco_ant --experiment=<experiment_name>
```
To visualize the training results, use the `enjoy_mujoco` script:
```
python -m sf_examples.mujoco.enjoy_mujoco --algo=APPO --env=mujoco_ant --experiment=<experiment_name>
```
Multiple experiments can be run in parallel with the launcher module. `mujoco_all_envs` is an example launcher script that runs all mujoco envs with 10 seeds.
```
python -m sample_factory.launcher.run --run=sf_examples.mujoco.experiments.mujoco_all_envs --backend=processes --max_parallel=4 --pause_between=1 --experiments_per_gpu=10000 --num_gpus=1 --experiment_suffix=0
```
#### List of Supported Environments
Specify the environment to run with the `--env` command line parameter. The following MuJoCo v4 environments are supported out of the box, and more environments can be added as needed in `sf_examples.mujoco.mujoco.mujoco_utils`
| MuJoCo Environment Name | Sample Factory Command Line Parameter |
| ------------------------- |---------------------------------------|
| Ant-v4 | mujoco_ant |
| HalfCheetah-v4 | mujoco_halfcheetah |
| Hopper-v4 | mujoco_hopper |
| Humanoid-v4 | mujoco_humanoid |
| Walker2d-v4 | mujoco_walker |
| InvertedDoublePendulum-v4 | mujoco_doublependulum |
| InvertedPendulum-v4 | mujoco_pendulum |
| Reacher-v4 | mujoco_reacher |
| Swimmer-v4 | mujoco_swimmer |
### Results
#### Reports
1. Sample Factory was benchmarked on MuJoCo against CleanRL. Sample-Factory was able to achieve similar sample efficiency as CleanRL using the same parameters.
- https://wandb.ai/andrewzhang505/sample_factory/reports/MuJoCo-Sample-Factory-vs-CleanRL-w-o-EnvPool--VmlldzoyMjMyMTQ0
2. Sample Factory can run experiments synchronously or asynchronously, with asynchronous execution usually having worse sample efficiency but runs faster. MuJoCo's environments were compared using the two modes in Sample-Factory
- https://wandb.ai/andrewzhang505/sample_factory/reports/MuJoCo-Synchronous-vs-Asynchronous--VmlldzoyMzEzNDUz
3. Sample Factory comparison with CleanRL in terms of wall time. Both experiments are run on a 16 core machine with 1 GPU. Sample-Factory was able to complete 10M samples 5 times as fast as CleanRL
- https://wandb.ai/andrewzhang505/sample_factory/reports/MuJoCo-Sample-Factory-vs-CleanRL-Wall-Time--VmlldzoyMzg2MDA3
#### Models
Various APPO models trained on MuJoCo environments are uploaded to the HuggingFace Hub. The models have all been trained for 10M steps. Videos of the agents after training can be found on the HuggingFace Hub.
The models below are the best models from the experiment against CleanRL above. The evaluation metrics here are obtained by running the model 10 times.
| Environment | HuggingFace Hub Models | Evaluation Metrics |
| ------------------------- | ---------------------------------------------------------------------------- | ------------------- |
| Ant-v4 | https://huggingface.co/andrewzhang505/sample-factory-2-mujoco-ant | 5876.09 +/- 166.99 |
| HalfCheetah-v4 | https://huggingface.co/andrewzhang505/sample-factory-2-mujoco-halfcheetah | 6262.56 +/- 67.29 |
| Humanoid-v4 | https://huggingface.co/andrewzhang505/sample-factory-2-mujoco-humanoid | 5439.48 +/- 1314.24 |
| Walker2d-v4 | https://huggingface.co/andrewzhang505/sample-factory-2-mujoco-walker | 5487.74 +/- 48.96 |
| Hopper-v4 | https://huggingface.co/andrewzhang505/sample-factory-2-mujoco-hopper | 2793.44 +/- 642.58 |
| InvertedDoublePendulum-v4 | https://huggingface.co/andrewzhang505/sample-factory-2-mujoco-doublependulum | 9350.13 +/- 1.31 |
| InvertedPendulum-v4 | https://huggingface.co/andrewzhang505/sample-factory-2-mujoco-pendulum | 1000.00 +/- 0.00 |
| Reacher-v4 | https://huggingface.co/andrewzhang505/sample-factory-2-mujoco-reacher | -4.53 +/- 1.79 |
| Swimmer-v4 | https://huggingface.co/andrewzhang505/sample-factory-2-mujoco-swimmer | 117.28 +/- 2.91 |
#### Videos
Below are some video examples of agents in various MuJoCo envioronments. Videos for all environments can be found in the HuggingFace Hub pages linked above.
##### HalfCheetah-v4
<p align="center">
<video class="w-full" src="https://huggingface.co/andrewzhang505/sample-factory-2-mujoco-halfcheetah/resolve/main/replay.mp4" controls="" autoplay="" loop=""></video></p>
##### Ant-v4
<p align="center">
<video class="w-full" src="https://huggingface.co/andrewzhang505/sample-factory-2-mujoco-ant/resolve/main/replay.mp4" controls="" autoplay="" loop=""></video></p>
##### InvertedDoublePendulum-v4
<p align="center">
<video class="w-full" src="https://huggingface.co/andrewzhang505/sample-factory-2-mujoco-doublependulum/resolve/main/replay.mp4" controls="" autoplay="" loop=""></video></p>
|
/sample-factory-2.1.1.tar.gz/sample-factory-2.1.1/docs/09-environment-integrations/mujoco.md
| 0.488283 | 0.876158 |
mujoco.md
|
pypi
|
# Atari
<video width="800" controls autoplay><source src="https://huggingface.co/datasets/edbeeching/sample_factory_videos/resolve/main/atari_grid_57_60s.mp4" type="video/mp4"></video>
### Installation
Install Sample Factory with Atari dependencies with PyPI:
```
pip install sample-factory[atari]
```
### Running Experiments
Run Atari experiments with the scripts in `sf_examples.atari`.
The default parameters have been chosen to match CleanRL's configuration (see reports below) and are not tuned for throughput.
(see some better parameters at the end of the document).
To train a model in the `BreakoutNoFrameskip-v4` environment:
```
python -m sf_examples.atari.train_atari --algo=APPO --env=atari_breakout --experiment="Experiment Name"
```
To visualize the training results, use the `enjoy_atari` script:
```
python -m sf_examples.atari.enjoy_atari --algo=APPO --env=atari_breakout --experiment="Experiment Name"
```
Multiple experiments can be run in parallel with the launcher module. `atari_envs` is an example launcher script that runs atari envs with 4 seeds.
```
python -m sample_factory.launcher.run --run=sf_examples.atari.experiments.atari_envs --backend=processes --max_parallel=8 --pause_between=1 --experiments_per_gpu=10000 --num_gpus=1
```
### List of Supported Environments
Specify the environment to run with the `--env` command line parameter. The following Atari v4 environments are supported out of the box.
Various APPO models trained on Atari environments are uploaded to the HuggingFace Hub. The models have all been trained for 2 billion steps with 3 seeds per experiment. Videos of the agents after training can be found on the HuggingFace Hub.
| Atari Command Line Parameter | Atari Environment name | Model Checkpooints |
| ---------------------------- | ------------------------------ | -------------------------------------------------------------------------------------------------- |
| atari_alien | AlienNoFrameskip-v4 | [🤗 Hub Atari-2B checkpoints](https://huggingface.co/edbeeching/atari_2B_atari_alien_1111) |
| atari_amidar | AmidarNoFrameskip-v4 | [🤗 Hub Atari-2B checkpoints](https://huggingface.co/edbeeching/atari_2B_atari_amidar_1111) |
| atari_assault | AssaultNoFrameskip-v4 | [🤗 Hub Atari-2B checkpoints](https://huggingface.co/edbeeching/atari_2B_atari_assault_1111) |
| atari_asterix | AsterixNoFrameskip-v4 | [🤗 Hub Atari-2B checkpoints](https://huggingface.co/edbeeching/atari_2B_atari_asterix_1111) |
| atari_asteroid | AsteroidsNoFrameskip-v4 | [🤗 Hub Atari-2B checkpoints](https://huggingface.co/edbeeching/atari_2B_atari_asteroid_1111) |
| atari_atlantis | AtlantisNoFrameskip-v4 | [🤗 Hub Atari-2B checkpoints](https://huggingface.co/edbeeching/atari_2B_atari_atlantis_1111) |
| atari_bankheist | BankHeistNoFrameskip-v4 | [🤗 Hub Atari-2B checkpoints](https://huggingface.co/edbeeching/atari_2B_atari_bankheist_1111) |
| atari_battlezone | BattleZoneNoFrameskip-v4 | [🤗 Hub Atari-2B checkpoints](https://huggingface.co/edbeeching/atari_2B_atari_battlezone_1111) |
| atari_beamrider | BeamRiderNoFrameskip-v4 | [🤗 Hub Atari-2B checkpoints](https://huggingface.co/edbeeching/atari_2B_atari_beamrider_1111) |
| atari_berzerk | BerzerkNoFrameskip-v4 | [🤗 Hub Atari-2B checkpoints](https://huggingface.co/edbeeching/atari_2B_atari_berzerk_1111) |
| atari_bowling | BowlingNoFrameskip-v4 | [🤗 Hub Atari-2B checkpoints](https://huggingface.co/edbeeching/atari_2B_atari_bowling_1111) |
| atari_boxing | BoxingNoFrameskip-v4 | [🤗 Hub Atari-2B checkpoints](https://huggingface.co/edbeeching/atari_2B_atari_boxing_1111) |
| atari_breakout | BreakoutNoFrameskip-v4 | [🤗 Hub Atari-2B checkpoints](https://huggingface.co/edbeeching/atari_2B_atari_breakout_1111) |
| atari_centipede | CentipedeNoFrameskip-v4 | [🤗 Hub Atari-2B checkpoints](https://huggingface.co/edbeeching/atari_2B_atari_centipede_1111) |
| atari_choppercommand | ChopperCommandNoFrameskip-v4 | [🤗 Hub Atari-2B checkpoints](https://huggingface.co/edbeeching/atari_2B_atari_choppercommand_1111) |
| atari_crazyclimber | CrazyClimberNoFrameskip-v4 | [🤗 Hub Atari-2B checkpoints](https://huggingface.co/edbeeching/atari_2B_atari_crazyclimber_1111) |
| atari_defender | DefenderNoFrameskip-v4 | [🤗 Hub Atari-2B checkpoints](https://huggingface.co/edbeeching/atari_2B_atari_defender_1111) |
| atari_demonattack | DemonAttackNoFrameskip-v4 | [🤗 Hub Atari-2B checkpoints](https://huggingface.co/edbeeching/atari_2B_atari_demonattack_1111) |
| atari_doubledunk | DoubleDunkNoFrameskip-v4 | [🤗 Hub Atari-2B checkpoints](https://huggingface.co/edbeeching/atari_2B_atari_doubledunk_1111) |
| atari_enduro | EnduroNoFrameskip-v4 | [🤗 Hub Atari-2B checkpoints](https://huggingface.co/edbeeching/atari_2B_atari_enduro_1111) |
| atari_fishingderby | FishingDerbyNoFrameskip-v4 | [🤗 Hub Atari-2B checkpoints](https://huggingface.co/edbeeching/atari_2B_atari_fishingderby_1111) |
| atari_freeway | FreewayNoFrameskip-v4 | [🤗 Hub Atari-2B checkpoints](https://huggingface.co/edbeeching/atari_2B_atari_freeway_1111) |
| atari_frostbite | FrostbiteNoFrameskip-v4 | [🤗 Hub Atari-2B checkpoints](https://huggingface.co/edbeeching/atari_2B_atari_frostbite_1111) |
| atari_gopher | GopherNoFrameskip-v4 | [🤗 Hub Atari-2B checkpoints](https://huggingface.co/edbeeching/atari_2B_atari_gopher_1111) |
| atari_gravitar | GravitarNoFrameskip-v4 | [🤗 Hub Atari-2B checkpoints](https://huggingface.co/edbeeching/atari_2B_atari_gravitar_1111) |
| atari_hero | HeroNoFrameskip-v4 | [🤗 Hub Atari-2B checkpoints](https://huggingface.co/edbeeching/atari_2B_atari_hero_1111) |
| atari_icehockey | IceHockeyNoFrameskip-v4 | [🤗 Hub Atari-2B checkpoints](https://huggingface.co/edbeeching/atari_2B_atari_icehockey_1111) |
| atari_jamesbond | JamesbondNoFrameskip-v4 | [🤗 Hub Atari-2B checkpoints](https://huggingface.co/edbeeching/atari_2B_atari_jamesbond_1111) |
| atari_kangaroo | KangarooNoFrameskip-v4 | [🤗 Hub Atari-2B checkpoints](https://huggingface.co/edbeeching/atari_2B_atari_kangaroo_1111) |
| atari_krull | KrullNoFrameskip-v4 | [🤗 Hub Atari-2B checkpoints](https://huggingface.co/edbeeching/atari_2B_atari_krull_1111) |
| atari_kongfumaster | KungFuMasterNoFrameskip-v4 | [🤗 Hub Atari-2B checkpoints](https://huggingface.co/edbeeching/atari_2B_atari_kongfumaster_1111) |
| atari_montezuma | MontezumaRevengeNoFrameskip-v4 | [🤗 Hub Atari-2B checkpoints](https://huggingface.co/edbeeching/atari_2B_atari_montezuma_1111) |
| atari_mspacman | MsPacmanNoFrameskip-v4 | [🤗 Hub Atari-2B checkpoints](https://huggingface.co/edbeeching/atari_2B_atari_mspacman_1111) |
| atari_namethisgame | NameThisGameNoFrameskip-v4 | [🤗 Hub Atari-2B checkpoints](https://huggingface.co/edbeeching/atari_2B_atari_namethisgame_1111) |
| atari_phoenix | PhoenixNoFrameskip-v4 | [🤗 Hub Atari-2B checkpoints](https://huggingface.co/edbeeching/atari_2B_atari_phoenix_1111) |
| atari_pitfall | PitfallNoFrameskip-v4 | [🤗 Hub Atari-2B checkpoints](https://huggingface.co/edbeeching/atari_2B_atari_pitfall_1111) |
| atari_pong | PongNoFrameskip-v4 | [🤗 Hub Atari-2B checkpoints](https://huggingface.co/edbeeching/atari_2B_atari_pong_1111) |
| atari_privateye | PrivateEyeNoFrameskip-v4 | [🤗 Hub Atari-2B checkpoints](https://huggingface.co/edbeeching/atari_2B_atari_privateye_1111) |
| atari_qbert | QbertNoFrameskip-v4 | [🤗 Hub Atari-2B checkpoints](https://huggingface.co/edbeeching/atari_2B_atari_qbert_1111) |
| atari_riverraid | RiverraidNoFrameskip-v4 | [🤗 Hub Atari-2B checkpoints](https://huggingface.co/edbeeching/atari_2B_atari_riverraid_1111) |
| atari_roadrunner | RoadRunnerNoFrameskip-v4 | [🤗 Hub Atari-2B checkpoints](https://huggingface.co/edbeeching/atari_2B_atari_roadrunner_1111) |
| atari_robotank | RobotankNoFrameskip-v4 | [🤗 Hub Atari-2B checkpoints](https://huggingface.co/edbeeching/atari_2B_atari_robotank_1111) |
| atari_seaquest | SeaquestNoFrameskip-v4 | [🤗 Hub Atari-2B checkpoints](https://huggingface.co/edbeeching/atari_2B_atari_seaquest_1111) |
| atari_skiing | SkiingNoFrameskip-v4 | [🤗 Hub Atari-2B checkpoints](https://huggingface.co/edbeeching/atari_2B_atari_skiing_1111) |
| atari_solaris | SolarisNoFrameskip-v4 | [🤗 Hub Atari-2B checkpoints](https://huggingface.co/edbeeching/atari_2B_atari_solaris_1111) |
| atari_spaceinvaders | SpaceInvadersNoFrameskip-v4 | [🤗 Hub Atari-2B checkpoints](https://huggingface.co/edbeeching/atari_2B_atari_spaceinvaders_1111) |
| atari_stargunner | StarGunnerNoFrameskip-v4 | [🤗 Hub Atari-2B checkpoints](https://huggingface.co/edbeeching/atari_2B_atari_stargunner_1111) |
| atari_surround | SurroundNoFrameskip-v4 | [🤗 Hub Atari-2B checkpoints](https://huggingface.co/edbeeching/atari_2B_atari_surround_1111) |
| atari_tennis | TennisNoFrameskip-v4 | [🤗 Hub Atari-2B checkpoints](https://huggingface.co/edbeeching/atari_2B_atari_tennis_1111) |
| atari_timepilot | TimePilotNoFrameskip-v4 | [🤗 Hub Atari-2B checkpoints](https://huggingface.co/edbeeching/atari_2B_atari_timepilot_1111) |
| atari_tutankham | TutankhamNoFrameskip-v4 | [🤗 Hub Atari-2B checkpoints](https://huggingface.co/edbeeching/atari_2B_atari_tutankham_1111) |
| atari_upndown | UpNDownNoFrameskip-v4 | [🤗 Hub Atari-2B checkpoints](https://huggingface.co/edbeeching/atari_2B_atari_upndown_1111) |
| atari_venture | VentureNoFrameskip-v4 | [🤗 Hub Atari-2B checkpoints](https://huggingface.co/edbeeching/atari_2B_atari_venture_1111) |
| atari_videopinball | VideoPinballNoFrameskip-v4 | [🤗 Hub Atari-2B checkpoints](https://huggingface.co/edbeeching/atari_2B_atari_videopinball_1111) |
| atari_wizardofwor | WizardOfWorNoFrameskip-v4 | [🤗 Hub Atari-2B checkpoints](https://huggingface.co/edbeeching/atari_2B_atari_wizardofwor_1111) |
| atari_yarsrevenge | YarsRevengeNoFrameskip-v4 | [🤗 Hub Atari-2B checkpoints](https://huggingface.co/edbeeching/atari_2B_atari_yarsrevenge_1111) |
| atari_zaxxon | ZaxxonNoFrameskip-v4 | [🤗 Hub Atari-2B checkpoints](https://huggingface.co/edbeeching/atari_2B_atari_zaxxon_1111) |
### Reports
- Sample Factory was benchmarked on Atari against CleanRL and Baselines. Sample Factory was able to achieve similar sample efficiency as CleanRL and Baselines using the same parameters.
- https://wandb.ai/wmfrank/atari-benchmark/reports/Atari-Sample-Factory2-Baselines-CleanRL--VmlldzoyMzEyNjIw
#### Better parameters (more envs, double buffering, async learning)
```
--experiment=breakout_faster
--env=atari_breakout
--summaries_use_frameskip=False
--num_workers=16
--num_envs_per_worker=8
--worker_num_splits=2
--train_for_env_steps=100000000
--rollout=32
--normalize_input=True
--normalize_returns=True
--serial_mode=False
--async_rl=True
--batch_size=1024
--wandb_user=<user>
--wandb_project=sf2_atari_breakout
--wandb_group=breakout_w16v8r32
--with_wandb=True
```
Report: https://wandb.ai/apetrenko/sf2_atari_breakout/reports/sf2-breakout-w16v8r32--Vmlldzo0MjM1MTQ4
|
/sample-factory-2.1.1.tar.gz/sample-factory-2.1.1/docs/09-environment-integrations/atari.md
| 0.437703 | 0.95635 |
atari.md
|
pypi
|
# VizDoom
<video width="800" controls autoplay><source src="https://huggingface.co/datasets/edbeeching/sample_factory_videos/resolve/main/vizdoom_grid_12_30s.mp4" type="video/mp4"></video>
### Installation
To install VizDoom just follow system setup instructions from the original repository ([VizDoom linux_deps](https://github.com/mwydmuch/ViZDoom/blob/master/doc/Building.md#linux_deps)),
after which the latest VizDoom can be installed from PyPI:
```pip install vizdoom```
### Running Experiments
Run experiments with the scripts in `sf_examples.vizdoom`.
Train for 4B env steps (also can be stopped at any time with Ctrl+C and resumed by using the same cmd).
This is more or less optimal training setup for a 10-core machine.
```
python -m sf_examples.vizdoom.train_vizdoom --env=doom_battle --train_for_env_steps=4000000000 --algo=APPO --env_frameskip=4 --use_rnn=True --batch_size=2048 --wide_aspect_ratio=False --num_workers=20 --num_envs_per_worker=20 --num_policies=1 --experiment=doom_battle_w20_v20
```
Run at any point to visualize the experiment:
```
python -m sf_examples.vizdoom.enjoy_vizdoom --env=doom_battle --algo=APPO --experiment=doom_battle_w20_v20
```
Launcher scripts are also provided in `sf_examples.vizdoom.experiments` to run experiments in parallel or on slurm.
#### Reproducing Paper Results
Train on one of the 6 "basic" VizDoom environments:
```
python -m sf_examples.vizdoom.train_vizdoom --train_for_env_steps=500000000 --algo=APPO --env=doom_my_way_home --env_frameskip=4 --use_rnn=True --num_workers=36 --num_envs_per_worker=8 --num_policies=1 --batch_size=2048 --wide_aspect_ratio=False --experiment=doom_basic_envs
```
Doom "battle" and "battle2" environments, 36-core server (72 logical cores) with 4 GPUs:
```
python -m sf_examples.vizdoom.train_vizdoom --env=doom_battle --train_for_env_steps=4000000000 --algo=APPO --env_frameskip=4 --use_rnn=True --num_workers=72 --num_envs_per_worker=8 --num_policies=1 --batch_size=2048 --wide_aspect_ratio=False --max_grad_norm=0.0 --experiment=doom_battle
python -m sf_examples.vizdoom.train_vizdoom --env=doom_battle2 --train_for_env_steps=4000000000 --algo=APPO --env_frameskip=4 --use_rnn=True --num_workers=72 --num_envs_per_worker=8 --num_policies=1 --batch_size=2048 --wide_aspect_ratio=False --max_grad_norm=0.0 --experiment=doom_battle_2
```
Duel and deathmatch versus bots, population-based training, 36-core server:
```
python -m sf_examples.vizdoom.train_vizdoom --env=doom_duel_bots --train_for_seconds=360000 --algo=APPO --gamma=0.995 --env_frameskip=2 --use_rnn=True --reward_scale=0.5 --num_workers=72 --num_envs_per_worker=32 --num_policies=8 --batch_size=2048 --benchmark=False --res_w=128 --res_h=72 --wide_aspect_ratio=False --pbt_replace_reward_gap=0.2 --pbt_replace_reward_gap_absolute=3.0 --pbt_period_env_steps=5000000 --save_milestones_sec=1800 --with_pbt=True --experiment=doom_duel_bots
python -m sf_examples.vizdoom.train_vizdoom --env=doom_deathmatch_bots --train_for_seconds=3600000 --algo=APPO --use_rnn=True --gamma=0.995 --env_frameskip=2 --num_workers=80 --num_envs_per_worker=24 --num_policies=8 --batch_size=2048 --res_w=128 --res_h=72 --wide_aspect_ratio=False --with_pbt=True --pbt_period_env_steps=5000000 --experiment=doom_deathmatch_bots
```
Duel and deathmatch self-play, PBT, 36-core server:
```
python -m sf_examples.vizdoom.train_vizdoom --env=doom_duel --train_for_seconds=360000 --algo=APPO --gamma=0.995 --env_frameskip=2 --use_rnn=True --num_workers=72 --num_envs_per_worker=16 --num_policies=8 --batch_size=2048 --res_w=128 --res_h=72 --wide_aspect_ratio=False --benchmark=False --pbt_replace_reward_gap=0.5 --pbt_replace_reward_gap_absolute=0.35 --pbt_period_env_steps=5000000 --with_pbt=True --pbt_start_mutation=100000000 --experiment=doom_duel_full
python -m sf_examples.vizdoom.train_vizdoom --env=doom_deathmatch_full --train_for_seconds=360000 --algo=APPO --gamma=0.995 --env_frameskip=2 --use_rnn=True --num_workers=72 --num_envs_per_worker=16 --num_policies=8 --batch_size=2048 --res_w=128 --res_h=72 --wide_aspect_ratio=False --benchmark=False --pbt_replace_reward_gap=0.1 --pbt_replace_reward_gap_absolute=0.1 --pbt_period_env_steps=5000000 --with_pbt=True --pbt_start_mutation=100000000 --experiment=doom_deathmatch_full
```
Reproducing benchmarking results:
This achieves 50K+ framerate on a 10-core machine (Intel Core i9-7900X):
```
python -m sf_examples.vizdoom.train_vizdoom --env=doom_benchmark --algo=APPO --env_frameskip=4 --use_rnn=True --num_workers=20 --num_envs_per_worker=32 --num_policies=1 --batch_size=4096 --experiment=doom_battle_appo_fps_20_32 --res_w=128 --res_h=72 --wide_aspect_ratio=False --policy_workers_per_policy=2 --worker_num_splits=2
```
This achieves 100K+ framerate on a 36-core machine:
```
python -m sf_examples.vizdoom.train_vizdoom --env=doom_benchmark --algo=APPO --env_frameskip=4 --use_rnn=True --num_workers=72 --num_envs_per_worker=24 --num_policies=1 --batch_size=8192 --wide_aspect_ratio=False --experiment=doom_battle_appo_w72_v24 --policy_workers_per_policy=2
```
### Results
#### Reports
1. We reproduced the paper results in SF2 in the Battle and Battle2 and compared the results using input normalization. Input normalization has improved results in the Battle environment. This experiment with input normalization was run with `sf_examples.vizdoom.experiments.sf2_doom_battle_envs`. Note that `normalize_input=True` is set compared to the results from the paper
- https://wandb.ai/andrewzhang505/sample_factory/reports/VizDoom-Battle-Environments--VmlldzoyMzcyODQx
2. In SF2's bot environments (deathmatch_bots and duel_bots), we trained the agents against randomly generated bots as opposed to a curriculum of increasing bot difficulty.
This is because the latest version of ViZDoom environment no longer provides the bots used in the curriculum, and SF2 no longer requires the curriculum to train properly. However, due to the differences in bot difficulty, the current training results are no longer comparable to the paper. An example training curve on deathmatch_bots with the same parameters as in the paper is shown below. Additionally, the report includes 8 agents trained using PBT against duel-bots with normalization and we were able to get better results than the Sample Factory paper:
- https://wandb.ai/andrewzhang505/sample_factory/reports/ViZDoom-Bots--VmlldzoyNzY2NDI1
3. We also trained in the `doom_duel` multi-agent environment using self play. The training metrics of the experiment can be found on the Hugging Face Hub: https://huggingface.co/andrewzhang505/doom-duel-selfplay/tensorboard
- The reward scaling done by PBT can be found under `zz_pbt`. For example in this experiment, the reward scaling related to damage dealt `rew_DAMAGECOUNT_0`
increase more than 10x from 0.01 to around 0.15 at max.
- The `true_objective` reported corresponds to the fraction of matches won. In this experiment, the agents performed fairly equally as seen under `policy_stats/avg_true_objective` as agents rarely win over 60% of matches.
#### Models
The models below are the best models from the input normalization experiment above. The evaluation metrics here are obtained by running the model 10 times.
| Environment | HuggingFace Hub Models | Evaluation Metrics |
| ----------- | ------------------------------------------------------------------- | ------------------ |
| Battle | https://huggingface.co/andrewzhang505/sample-factory-2-doom-battle | 59.37 +/- 3.93 |
| Battle2 | https://huggingface.co/andrewzhang505/sample-factory-2-doom-battle2 | 36.40 +/- 4.20 |
| Deathmatch-Bots | https://huggingface.co/andrewzhang505/doom_deathmatch_bots | 85.66 +/- 28.53 |
| Duel-Bots | https://huggingface.co/andrewzhang505/doom_duel_bots_pbt | 55.39 +/- 17.13 |
| Duel | https://huggingface.co/andrewzhang505/doom-duel-selfplay | |
#### Videos
##### Doom Battle
<p align="center">
<video class="w-full" src="https://huggingface.co/andrewzhang505/sample-factory-2-doom-battle/resolve/main/replay.mp4" controls="" autoplay="" loop=""></video></p>
##### Doom Battle2
<p align="center">
<video class="w-full" src="https://huggingface.co/andrewzhang505/sample-factory-2-doom-battle2/resolve/main/replay.mp4" controls="" autoplay="" loop=""></video></p>
##### Doom Deathmatch Bots
<p align="center">
<video class="w-full" src="https://huggingface.co/andrewzhang505/doom_deathmatch_bots/resolve/main/replay.mp4" controls="" autoplay="" loop=""></video></p>
##### Doom Duel Bots PBT
<p align="center">
<video class="w-full" src="https://huggingface.co/andrewzhang505/doom_duel_bots_pbt/resolve/main/replay.mp4" controls="" autoplay="" loop=""></video></p>
##### Doom Duel Multi-Agent
<p align="center">
<video class="w-full" src="https://huggingface.co/andrewzhang505/doom-duel-selfplay/resolve/main/replay.mp4" controls="" autoplay="" loop=""></video></p>
|
/sample-factory-2.1.1.tar.gz/sample-factory-2.1.1/docs/09-environment-integrations/vizdoom.md
| 0.636466 | 0.759382 |
vizdoom.md
|
pypi
|
# Custom environments
Training agents in your own environment with Sample Factory is straightforward,
but if you get stuck feel free to raise an issue on our [GitHub Page](https://github.com/alex-petrenko/sample-factory/issues).
We recommend looking at our example environment integrations such as [Atari](../09-environment-integrations/atari.md)
or [MuJoCo](../09-environment-integrations/mujoco.md) before using your own environment.
## Custom environment template
In order to integrate your own environment with Sample Factory, the following steps are required:
* Define entry points for training and evaluation scripts, such as `train_custom_env.py` and `enjoy_custom_env.py`.
* Define a method that creates an instance of your environment, such as `make_custom_env()`.
* Override any default parameters that are specific to your environment, this way you can avoid passing them from the command line (optional).
* Add any custom parameters that will be parsed by Sample Factory alongside the default parameters (optional).
We provide the following template, which you can modify to intergrate your environment.
We assume your environment conforms to the [gym](https://github.com/openai/gym) 0.26 API (5-tuple).
```python3
from typing import Optional
import argparse
import sys
from sample_factory.cfg.arguments import parse_full_cfg, parse_sf_args
from sample_factory.envs.env_utils import register_env
from sample_factory.train import run_rl
def make_custom_env(full_env_name: str, cfg=None, env_config=None, render_mode: Optional[str] = None):
# see the section below explaining arguments
return CustomEnv(full_env_name, cfg, env_config, render_mode=render_mode)
def register_custom_env_envs():
# register the env in sample-factory's global env registry
# after this, you can use the env in the command line using --env=custom_env_name
register_env("custom_env_name", make_custom_env)
def add_custom_env_args(_env, p: argparse.ArgumentParser, evaluation=False):
# You can extend the command line arguments here
p.add_argument("--custom_argument", default="value", type=str, help="")
def custom_env_override_defaults(_env, parser):
# Modify the default arguments when using this env.
# These can still be changed from the command line. See configuration guide for more details.
parser.set_defaults(
encoder_conv_architecture="convnet_atari",
obs_scale=255.0,
gamma=0.99,
learning_rate=0.00025,
lr_schedule="linear_decay",
adam_eps=1e-5,
)
def parse_args(argv=None, evaluation=False):
# parse the command line arguments to build
parser, partial_cfg = parse_sf_args(argv=argv, evaluation=evaluation)
add_custom_env_args(partial_cfg.env, parser, evaluation=evaluation)
custom_env_override_defaults(partial_cfg.env, parser)
final_cfg = parse_full_cfg(parser, argv)
return final_cfg
def main():
"""Script entry point."""
register_custom_env_envs()
cfg = parse_args()
status = run_rl(cfg)
return status
if __name__ == "__main__":
sys.exit(main())
```
Training can now be started with `python train_custom_env.py --env=custom_env_name --experiment=CustomEnv`. Note that this train script
can be defined in your own codebase, or in the Sample Factory codebase (in case you forked the repo).
### Environment factory function parameters
`register_env("custom_env_name", make_custom_env)` expects `make_custom_env` to be a Callable with the following signature:
```python3
def make_custom_env_func(full_env_name: str, cfg: Optional[Config] = None, env_config: Optional[AttrDict] = None, render_mode: Optional[str] = None) -> Env
```
Arguments:
* `full_env_name`: complete name of the environment as passed in the command line with `--env`
* `cfg`: full system configuration, output of argparser. Normally this is an `AttrDict` (dictionary where keys can be accessed as attributes)
* `env_config`: AttrDict with additional system information, for example: `env_config = AttrDict(worker_index=worker_idx, vector_index=vector_idx, env_id=env_id)`
Some custom environments will require this information, i.e. `env_id` is a unique identifier for each environment instance in 0..num_envs-1 range.
* `render_mode`: if not None, environment will be rendered in this mode (e.g. 'human', 'rgb_array'). New parameter required after Gym 0.26.
See `sample_factory/envs/create_env.py` for more details.
## Evaluation script template
The evaluation script template is even more straightforward. Note that we just reuse functions already defined in the training script.
```python3
import sys
from sample_factory.enjoy import enjoy
from train_custom_env import parse_args, register_custom_env_envs
def main():
"""Script entry point."""
register_custom_env_envs()
cfg = parse_args(evaluation=True)
status = enjoy(cfg)
return status
if __name__ == "__main__":
sys.exit(main())
```
You can now run evaluation with `python enjoy_custom_env.py --env=custom_env_name --experiment=CustomEnv` to
measure the performance of the trained model, visualize agent's performance, or record a video file.
## Examples
* `sf_examples/train_custom_env_custom_model.py` - integrates an entirely custom toy environment.
* `sf_examples/train_gym_env.py` - trains an agent in a Gym environment. Environments registered in `gym` do not
get any special treatment, as it is just another way to define an environment. In this case the environment creation
function reduces to `gym.make(env_name)`.
* See environment integrations in `sf_examples/<env_name>` for additional examples.
|
/sample-factory-2.1.1.tar.gz/sample-factory-2.1.1/docs/03-customization/custom-environments.md
| 0.778355 | 0.865508 |
custom-environments.md
|
pypi
|
# Configuration
Sample Factory experiments are configured via command line parameters. The following command will print the help message
for the algorithm-environment combination containing the list of all parameters, their descriptions, and their default values:
```bash
python -m sf_examples.train_gym_env --env=CartPole-v1 --help
```
(replace `train_gym_env` with your own training script name and `CartPole-v1` with a different environment name to
get information about parameters specific to this particular environment).
Default parameter values and their help strings are defined in `sample_factory/cfg/cfg.py`.
Besides that, additional parameters can be defined in specific environment integrations, for example in
`sf_examples/envpool/mujoco/envpool_mujoco_params.py`.
## config.json
Once the new experiment is started, a directory containing experiment-related files is created in `--train_dir`
location (or `./train_dir` in `cwd` if `--train_dir` is not passed from command line). This directory contains a file
`config.json` where all the experiment parameters are saved (including those instantiated from their default values).
In addition to that, selected parameter values are printed to the console and thus are saved to `sf_log.txt` file in the experiment directory.
Running an experiment and then stopping it to check the parameter values is a good practice to make sure
that the experiment is configured as expected.
## Key parameters
- `--env` (required) full name that uniquely identifies the environment as it is registered in the environment registry
(see `register_env()` function).
- `--experiment` a name that uniquely identifies the experiment and the experiment folder. E.g. `--experiment=my_experiment`.
If the experiment folder with the name already exists the experiment (by default) will be _resumed_!
Resuming experiments after a stop is the default behavior in Sample Factory.
When the experiment is resumed from command line are taken into account, unspecified parameters will be loaded from the existing experiment
`config.json` file. If you want to start a new experiment, delete the old experiment folder or change the experiment name.
You can also use `--restart_behavior=[resume|restart|overwrite]` to control this behavior.
- `--train_dir` location for all experiments folders, defaults to `./train_dir`.
- `--num_workers` defaults to number of logical cores in the system, which will give the best throughput in
most scenarios.
- `--num_envs_per_worker` will greatly affect the performance. Large values (15-30) improve hardware utilization but
increase memory usage and policy lag. _Must be even_ for the double-buffered sampling to work. Disable double-buffered sampling by setting `--worker_num_splits=1`
to use odd number of envs per worker (e.g. 1 env per worker). (Default: 2)
A good rule of thumb is to set this to relatively low value (e.g. 4 or 8 for common envs) and then increase it until you see
no more performance improvements or you start losing sample efficiency due to the [policy lag](../07-advanced-topics/policy-lag.md).
- `--rollout` is the length of trajectory collected by each agent.
- `--batch_size` is the minibatch size for SGD.
- `--num_batches_per_epoch` is the number of minibatches the training batch (dataset) is split into.
- `--num_epochs` is the number of epochs on the learner over one training batch (dataset).
The above six parameters (`batch_size, num_batches_per_epoch, rollout, num_epochs, num_workers, num_envs_per_worker`) have the
biggest influence on the data regime of the RL algorithm and thus on the sample efficiency and the training speed.
`num_workers`, `num_envs_per_worker`, and `rollout` define how many samples are collected per iteration (one rollout for all envs), which is
`sampling_size = num_workers * num_envs_per_worker * rollout` (note that this is further multiplied by env's `num_agents` for multi-agent envs).
`batch_size` and `num_batches_per_epoch` define how many samples are used for training per iteration.
If `sampling_size >> batch_size` then we will need many iterations of training to go through the data, which
will make some experience stale by the time it is used for training (**policy lag**). See [Policy Lag](../07-advanced-topics/policy-lag.md)
for additional information.
## Evaluation script parameters
Evaluation scripts (i.e. `sf_examples/atari/enjoy_atari.py`) use the same configuration parameters as training scripts
for simplicity, although of course many of them are ignored as they don't affect evaluation.
In addition to that, evaluation scripts provide additional parameters, see `add_eval_args()` in `sample_factory/cfg/cfg.py`.
[HuggingFace Hub integration guide](../10-huggingface/huggingface.md) provides a good overview of the important parameters
such as `--save_video`, check it out!
## Full list of parameters
Please see the [Full Parameter Reference](cfg-params.md) auto-generated using the `--help`
flag for the full list of available command line arguments.
|
/sample-factory-2.1.1.tar.gz/sample-factory-2.1.1/docs/02-configuration/configuration.md
| 0.404272 | 0.908456 |
configuration.md
|
pypi
|
# Architecture Overview
While a basic implementation of an RL algorithm can fit in a [single file](https://cleanrl.dev/), a high-throughput
RL system requires a rather sophisticated architecture. This document describes the high-level design of Sample Factory.
The following diagram shows the main components of the system and the data flow between them. Please see sections below for more details.
<img src="https://github.com/alex-petrenko/sf_assets/blob/main/docs/arch/arch_diag.png?raw=true" alt="Architecture Diagram" width="1280">
## High-level design
At the core of Sample Factory structure is the idea that RL training can be split into multiple largely independent components,
each one of them focusing on a specific task. This enables a modular design where these components can be
accelerated/parallelized independently, allowing us to achieve the maximum performance on any RL task.
Components interact asynchronously by sending and receving messages (aka signals, see a [dedicated section on message passing](message-passing.md)).
Typically separate components live on different event loops in different processes, although the system is agnostic of
whether this is true and it is thus possible to run multiple (or even all components) on a single event loop in a single process.
Instead of explicitly sending the data between components (i.e. by serializing observations and sending them across processes),
we choose to send the data through shared memory buffers.
Each time a component needs to send data to another component, it writes the data to a shared memory buffer and sends a signal
containing the buffer ID (essentially a pointer to data). This massively reduces the overhead of message passing.
## Components
Each component is dedicated to a specific task and can be seen as a data processing engine (i.e. each component
gets some input by receiving signals, executes a computation, and broadcasts the results by emitting its own signals).
These are the main components of Sample Factory:
* **Rollout Workers** are responsible for environment simulation. Rollout workers receive actions from the policy,
do environment `step()` and produce observations after each step and full trajectories after `--rollout` steps.
* **Inference Workers** receive observations and hidden states and produce actions. The policy on each inference worker
is updated after each SGD step on the learner.
* **Batcher** receives trajectories from rollout workers, puts them together and produces datasets of data for the learner.
* **Learner** gets batches of data from the batcher, splits them into minibatches and does `--num_epochs` of
stochastic gradient descent. After each SGD step the updated weights are written to shared memory buffers and the corresponding
signal is broadcasted.
* **Runner** is a component that bootstraps the whole system, receives all sorts of statistics from other components and
takes care of logging and summary writing.
* **Sampler**, although technically its own component that can send and receive signals, in the typical configuration
is nothing more than a thin wrapper around Rollout/Inference workers and serves as an interface to the rest of the system.
(Although this interface allows us to create alternative samplers i.e. single-process synchronous JAX-optimized sampler is an idea)
### Rollout Workers
The number of rollout workers is controlled by `--num_workers`. Each rollout worker can simulate one or multiple environments
serially in the same process. The number of environments per worker is controlled by `--num_envs_per_worker`.
Each rollout worker contains >= 1 of `VectorEnvRunner` objects, the number of which is controlled by `--worker_num_splits`.
The default value of this parameter is 2, which enables [double-buffered sampling](../07-advanced-topics/double-buffered.md). The number of envs on each
`VectorEnvRunner` is thus `num_envs_per_worker // worker_num_splits` and therefore `--num_envs_per_worker` must be divisible by `--worker_num_splits`.
### Inference Workers
Each policy (see [multi-policy training](../07-advanced-topics/multi-policy-training.md)) has >= 1 corresponding inference workers
which generate actions for the agents controlled by this policy.
The number of inference workers is controlled by `--policy_workers_per_policy`.
### Batcher
There's typically a single batcher per policy in the system.
The batcher receives trajectories from rollout workers and puts them together into a dataset available for training.
In [batched sampling mode](../07-advanced-topics/batched-non-batched.md) this is pretty much a no-op, the batcher just passes the data through.
In non-batched sampling mode this is a non-trivial process, since rollouts from different workers finish
asynchronously and need to be put in the contiguous tensor for minibatch SGD.
Although batcher is it's own component, in the default configuration we run it in the same process as the learner (but in a separate thread)
in order to minimize the number of CUDA contexts and thus VRAM usage.
### Learner
There's typically a single learner per policy in the system.
Trajectory datasets flow in and updated parameters flow out.
## Terminology
Some terminology used in the codebase and in the further documentation:
* **rollout** or **trajectory** is a sequence of observations, actions, rewards, etc. produced by a single agent.
* **dataset** (or **training batch** or sometimes just **batch**) is a collection of trajectories produced by >=1 agents.
* Datasets are split into **minibatches** and >=1 **epochs** of SGD are performed.
Minibatch size is determined by `--batch_size` and number of epochs is determined by `--num_epochs`. Dataset size is
`batch_size * num_batches_per_epoch`, and in total `batch_size * num_batches_per_epoch * num_epochs` SGD steps are performed
on each dataset (sorry for the obvious confusion between "batch" and "minibatch" terms, the parameter names are kept largely for legacy reasons).
* **signals** are messages sent between components. Signals are connected to **slots**, which are functions that are called
when a signal is received. This mechanism is inspired by Qt's signals and slots (see the [dedicated section on message passing](06-architecture/message-passing.md)).
* **shared memory buffers** are PyTorch tensors shared between processes, created with `share_memory_()` method.
|
/sample-factory-2.1.1.tar.gz/sample-factory-2.1.1/docs/06-architecture/overview.md
| 0.954446 | 0.981257 |
overview.md
|
pypi
|
# Experiment Launcher
The simplest way to run experiments is just through command line, see [Basic Usage](../01-get-started/basic-usage.md) for example.
For more complex workflows Sample Factory provides an interface that allows users to run experiments with multiple seeds or hyperparameter combinations
with automatic distribution of work across GPUs on a single machine or multiple machines on the cluster.
The configuration of such experiments is done through in Python code, i.e. instead of yaml or json files we
directly use Python scripts for ultimate flexibility.
## Launcher scripts
Take a look at `sf_examples/mujoco/experiments/mujoco_all_envs.py`:
```python
from sample_factory.launcher.run_description import Experiment, ParamGrid, RunDescription
_params = ParamGrid(
[
("seed", [0, 1111, 2222, 3333, 4444, 5555, 6666, 7777, 8888, 9999]),
("env", ["mujoco_ant", "mujoco_halfcheetah", "mujoco_hopper", "mujoco_humanoid", "mujoco_doublependulum", "mujoco_pendulum", "mujoco_reacher", "mujoco_swimmer", "mujoco_walker"]),
]
)
_experiments = [
Experiment(
"mujoco_all_envs",
"python -m sf_examples.mujoco.train_mujoco --algo=APPO --with_wandb=True --wandb_tags mujoco",
_params.generate_params(randomize=False),
),
]
RUN_DESCRIPTION = RunDescription("mujoco_all_envs", experiments=_experiments)
```
This script defines a list of experiments to run. Here we have 10 seeds and 9 environments, so we will run 90 experiments in total with 90 different seed/env combinations.
This can be extended in a straightforward way to run hyperparameter searches and so on.
The only requirement for such a script is that it defines a `RUN_DESCRIPTION` variable that references a `RunDescription` object.
This object contains a list of `Experiment` objects, each of which potentially defines a gridsearch to run.
Each experiment object defines a name, a "base" command line to run, and a `ParamGrid` that will generate parameter combinations to be added to the base command line.
Take a look at other experiment scripts in `sf_examples` to see how to define more complex experiments.
Note that there's no requirement to use Launcher API to run experiments. You can just run individual experiments from the command line,
use WandB hyperparam search features, use Ray Tune or any other tool you like. Launcher API is just a convenient feature for simple workflows available out of the box.
### Complex hyperparameter configurations
The `ParamGrid` object above can define a cartesian product of parameter lists.
In some cases we want searches over pairs (or tuples) of parameters at the same time.
For example:
```python
_params = ParamGrid(
[
("seed", [1111, 2222, 3333, 4444]),
(("serial_mode", "async_rl"), ([True, False], [False, True])),
(("use_rnn", "recurrence"), ([False, 1], [True, 16])),
]
)
```
Here we consider parameter pairs `("serial_mode", "async_rl")` and `("use_rnn", "recurrence")` at the same time.
If we used a simple grid, we would have to execute useless combinations of parameters such as `use_rnn=True, recurrence=1` or `use_rnn=False, recurrence=16`
(it makes sense to use recurrence > 1 only when using RNNs).
### RunDescription arguments
Launcher script should expose a RunDescription object named `RUN_DESCRIPTION` that contains a list of experiments to run and some auxiliary parameters.
`RunDescription` parameter reference:
```python
class RunDescription:
def __init__(
self,
run_name,
experiments,
experiment_arg_name="--experiment",
experiment_dir_arg_name="--train_dir",
customize_experiment_name=True,
param_prefix="--",
):
"""
:param run_name: overall name of the experiment and the name of the root folder
:param experiments: a list of Experiment objects to run
:param experiment_arg_name: CLI argument of the underlying experiment that determines it's unique name
to be generated by the launcher. Default: --experiment
:param experiment_dir_arg_name: CLI argument for the root train dir of your experiment. Default: --train_dir
:param customize_experiment_name: whether to add a hyperparameter combination to the experiment name
:param param_prefix: most experiments will use "--" prefix for each parameter, but some apps don't have this
prefix, i.e. with Hydra you should set it to empty string.
"""
```
## Using a launcher script
The script above can be executed using one of several backends.
Additional backends are a welcome contribution! Please submit PRs :)
### "Local" backend (multiprocessing)
Command line below will run all experiments on a single 4-GPU machine, scheduling 2 experiments per GPU, so running 8 experiments in parallel until all 90 are done.
Note how we pass the full path to the launcher script using `--run` argument.
The script should be in your Python path in a way that you should be able to import the module using the path you pass to `--run` (because this is what the Launcher internally does).
```bash
python -m sample_factory.launcher.run --run=sf_examples.mujoco.experiments.mujoco_all_envs --backend=processes --max_parallel=8 --pause_between=1 --experiments_per_gpu=2 --num_gpus=4
```
### Slurm backend
The following command will run experiments on a Slurm cluster, creating a separate job for each experiment.
```bash
python -m sample_factory.launcher.run --run=sf_examples.mujoco.experiments.mujoco_all_envs --backend=slurm --slurm_workdir=./slurm_isaacgym --experiment_suffix=slurm --slurm_gpus_per_job=1 --slurm_cpus_per_gpu=16 --slurm_sbatch_template=./sample_factory/launcher/slurm/sbatch_timeout.sh --pause_between=1 --slurm_print_only=False
```
Here we will use 1 GPU and 16 CPUs per job (adjust according to your cluster configuration and experiment config).
Note how we also pass `--slurm_sbatch_template` argument which contains a bash script that will bootstrap a job.
In this particular example we use a template that will kill the job if it runs longer than a certain amount of time and then restarts itself
(controlled by `--slurm_timeout` which defaults to 0, i.e. no timeout).
Feel free to use your custom template if your job has certain pre-requisites (i.e. installing some packages or activating a Python environment).
Please find additional Slurm considerations in [How to use Sample Factory on Slurm](slurm-details.md) guide.
### NGC backend
We additionally provide a backend for NGC clusters (https://ngc.nvidia.com/).
```bash
python -m sample_factory.launcher.run --run=sf_examples.mujoco.experiments.mujoco_all_envs --backend=ngc --ngc_job_template=run_scripts/ngc_job_16g_1gpu.template --ngc_print_only=False --train_dir=/workspace/train_dir
```
Here `--ngc_job_template` contains information about which Docker image to run plus any additional job bootstrapping.
The command will essentially spin a separate VM on the cloud for each job.
Point `--train_dir` to a mounted workspace folder so that you can access results of your experiments (trained models, logs, etc.)
### Additional CLI examples
```
Local multiprocessing backend:
$ python -m sample_factory.launcher.run --run=sf_examples.vizdoom.experiments.paper_doom_battle2_appo --backend=processes --max_parallel=4 --pause_between=10 --experiments_per_gpu=1 --num_gpus=4
Parallelize with Slurm:
$ python -m sample_factory.launcher.run --run=megaverse_rl.runs.single_agent --backend=slurm --slurm_workdir=./megaverse_single_agent --experiment_suffix=slurm --pause_between=1 --slurm_gpus_per_job=1 --slurm_cpus_per_gpu=12 --slurm_sbatch_template=./megaverse_rl/slurm/sbatch_template.sh --slurm_print_only=False
Parallelize with NGC (https://ngc.nvidia.com/):
$ python -m sample_factory.launcher.run --run=rlgpu.run_scripts.dexterous_manipulation --backend=ngc --ngc_job_template=run_scripts/ngc_job_16g_1gpu.template --ngc_print_only=False --train_dir=/workspace/train_dir
```
## Command-line reference
```
usage: run.py [-h] [--train_dir TRAIN_DIR] [--run RUN]
[--backend {processes,slurm,ngc}]
[--pause_between PAUSE_BETWEEN]
[--experiment_suffix EXPERIMENT_SUFFIX]
# Multiprocessing backend:
[--num_gpus NUM_GPUS]
[--experiments_per_gpu EXPERIMENTS_PER_GPU]
[--max_parallel MAX_PARALLEL]
# Slurm-related:
[--slurm_gpus_per_job SLURM_GPUS_PER_JOB]
[--slurm_cpus_per_gpu SLURM_CPUS_PER_GPU]
[--slurm_print_only SLURM_PRINT_ONLY]
[--slurm_workdir SLURM_WORKDIR]
[--slurm_partition SLURM_PARTITION]
[--slurm_sbatch_template SLURM_SBATCH_TEMPLATE]
# NGC-related
[--ngc_job_template NGC_JOB_TEMPLATE]
[--ngc_print_only NGC_PRINT_ONLY]
```
```
Arguments:
-h, --help show this help message and exit
--train_dir TRAIN_DIR
Directory for sub-experiments
--run RUN Name of the python module that describes the run, e.g.
sf_examples.vizdoom.experiments.doom_basic
--backend {processes,slurm,ngc}
--pause_between PAUSE_BETWEEN
Pause in seconds between processes
--experiment_suffix EXPERIMENT_SUFFIX
Append this to the name of the experiment dir
Multiprocessing backend:
--num_gpus NUM_GPUS How many GPUs to use (only for local multiprocessing)
--experiments_per_gpu EXPERIMENTS_PER_GPU
How many experiments can we squeeze on a single GPU
(-1 for not altering CUDA_VISIBLE_DEVICES at all)
--max_parallel MAX_PARALLEL
Maximum simultaneous experiments (only for local multiprocessing)
Slurm-related:
--slurm_gpus_per_job SLURM_GPUS_PER_JOB
GPUs in a single SLURM process
--slurm_cpus_per_gpu SLURM_CPUS_PER_GPU
Max allowed number of CPU cores per allocated GPU
--slurm_print_only SLURM_PRINT_ONLY
Just print commands to the console without executing
--slurm_workdir SLURM_WORKDIR
Optional workdir. Used by slurm launcher to store
logfiles etc.
--slurm_partition SLURM_PARTITION
Adds slurm partition, i.e. for "gpu" it will add "-p
gpu" to sbatch command line
--slurm_sbatch_template SLURM_SBATCH_TEMPLATE
Commands to run before the actual experiment (i.e.
activate conda env, etc.) Example: https://github.com/alex-petrenko/megaverse/blob/master/megaverse_rl/slurm/sbatch_template.sh
(typically a shell script)
--slurm_timeout SLURM_TIMEOUT
Time to run jobs before timing out job and requeuing the job. Defaults to 0, which does not time out the job
NGC-related:
--ngc_job_template NGC_JOB_TEMPLATE
NGC command line template, specifying instance type, docker container, etc.
--ngc_print_only NGC_PRINT_ONLY
Just print commands to the console without executing
```
|
/sample-factory-2.1.1.tar.gz/sample-factory-2.1.1/docs/04-experiments/experiment-launcher.md
| 0.68941 | 0.904144 |
experiment-launcher.md
|
pypi
|
# Profiling
It is virtually impossible to optimize any system without measuring its performance and identifying the bottlenecks.
This guide will show you how to profile your RL workload in different regimes.
## Profiling with the built-in "Timing" tool
Sample Factory provides a simple class called `Timing` (see `sample_factory/utils/timing.py`) that can be used
for high-level profiling to get a rough idea of where the compute cycles are spent.
Core hotspots are already instrumented, but if you'd like to see a more elaborate picture, you can use the `Timing` class in
your own code like this:
```python
import time
timing = Timing(name="MyProfile")
# add_time() will accumulate time spent in the block
# this is the most commonly used method
with timing.add_time("hotspot"):
# do something
...
# measure time spent in a subsection of code
# when we build the timing report, we'll generate a tree corresponding to the nesting
with timing.add_time("subsection1"):
# do something
...
with timing.add_time("subsection2"):
# do something
...
# instead of accumulating time, this will measure the last time the block was executed
with timing.timeit("hotspot2"):
# do something
...
# this will measure the average time spent in the block
with timing.time_avg("hotspot3"):
# do something
...
# this will print the timing report
print(timing)
```
### Example: profiling an asynchronous workload
Let's take a look at a typical RL workload: training an agent in a VizDoom pixel-based environment.
We use the following command line and run it on a 6-core laptop with hyperthreading:
```bash
python -m sf_examples.vizdoom.train_vizdoom --env=doom_benchmark --env_frameskip=4 --train_for_env_steps=4000000 \\
--use_rnn=True --num_workers=12 --num_envs_per_worker=16 --num_policies=1 --num_epochs=1 --rollout=32 --recurrence=32 \\
--batch_size=2048 --experiment=profiling --benchmark=True --decorrelate_envs_on_one_worker=False --res_w=128 --res_h=72 \\
--wide_aspect_ratio=True --policy_workers_per_policy=1 --worker_num_splits=2 --batched_sampling=False \\
--serial_mode=False --async_rl=True --policy_workers_per_policy=1
```
If we wait for this experiment to finish (in this case, after training for 4M env steps), we'll get the following timing report:
```
[2022-11-25 01:36:52,563][15762] Batcher 0 profile tree view:
batching: 10.1365, releasing_batches: 0.0136
[2022-11-25 01:36:52,564][15762] InferenceWorker_p0-w0 profile tree view:
wait_policy: 0.0022
wait_policy_total: 93.7697
update_model: 2.3025
weight_update: 0.0015
one_step: 0.0034
handle_policy_step: 105.2299
deserialize: 7.4926, stack: 0.6621, obs_to_device_normalize: 29.3540, forward: 38.4143, send_messages: 5.9522
prepare_outputs: 18.2651
to_cpu: 11.2702
[2022-11-25 01:36:52,564][15762] Learner 0 profile tree view:
misc: 0.0024, prepare_batch: 8.0517
train: 28.5942
epoch_init: 0.0037, minibatch_init: 0.0038, losses_postprocess: 0.1654, kl_divergence: 0.2093, after_optimizer: 12.5617
calculate_losses: 10.2242
losses_init: 0.0021, forward_head: 0.4746, bptt_initial: 7.5225, tail: 0.3432, advantages_returns: 0.0976, losses: 0.7113
bptt: 0.9616
bptt_forward_core: 0.9263
update: 5.0903
clip: 0.8172
[2022-11-25 01:36:52,564][15762] RolloutWorker_w0 profile tree view:
wait_for_trajectories: 0.0767, enqueue_policy_requests: 5.3569, env_step: 170.3642, overhead: 10.1567, complete_rollouts: 0.3764
save_policy_outputs: 6.6260
split_output_tensors: 3.0167
[2022-11-25 01:36:52,564][15762] RolloutWorker_w11 profile tree view:
wait_for_trajectories: 0.0816, enqueue_policy_requests: 5.5298, env_step: 169.3195, overhead: 10.2944, complete_rollouts: 0.3914
save_policy_outputs: 6.7380
split_output_tensors: 3.1037
[2022-11-25 01:36:52,564][15762] Loop Runner_EvtLoop terminating...
[2022-11-25 01:36:52,565][15762] Runner profile tree view:
main_loop: 217.4041
[2022-11-25 01:36:52,565][15762] Collected {0: 4014080}, FPS: 18463.7
```
First thing to notice here: instead of a single report we have reports from all
different types of components of our system: Batcher, InferenceWorker, Learner, RolloutWorker, Runner (main loop).
There are 12 rollout workers but we see only 0th (first) and 11th (last) workers in the report - that's just
to save space, reports from all other workers will be very similar.
Total training time took 217 seconds at ~18400FPS (actual FPS reported during training was ~21000FPS, but
this final number takes initialization time into account).
Each individual report is a tree view of the time spent in different hotspots.
For example, learner profile looks like this:
```
train: 28.5942
epoch_init: 0.0037, minibatch_init: 0.0038, losses_postprocess: 0.1654, kl_divergence: 0.2093, after_optimizer: 12.5617
calculate_losses: 10.2242
losses_init: 0.0021, forward_head: 0.4746, bptt_initial: 7.5225, tail: 0.3432, advantages_returns: 0.0976, losses: 0.7113
bptt: 0.9616
bptt_forward_core: 0.9263
update: 5.0903
clip: 0.8172
```
`train` is the highest-level profiler context. On the next line we print all sub-profiles that don't have any
sub-profiles of their own. In this case, `epoch_init`, `minibatch_init`, etc.
After that, one by one, we print all sub-profiles that have sub-profiles of their own.
Let's take a look at individual component reports:
* Runner (main loop) does not actually do any heavy work other than reporting summaries, so we can ignore it.
It is here mostly to give us the total time from experiment start to finish.
* Batcher is responsible for batching trajectories from rollout workers and feeding them to the learner. In
this case it only took 10 seconds and since it was done in parallel to all other work, we can ignore it for the most part,
it's pretty fast.
* Learner's main hotspots took only 8 and 28 seconds. Again, considering that it was done in parallel to all other work,
and the time is pretty insignificant compared to the total time of 217 seconds, we can
safely say that it's not the bottleneck.
* InferenceWorker's overall time is 105 seconds, which is significant. We can see that the main hotspots are
`forward` (actual forward pass) and `obs_to_device_normalize` (normalizing the observations and transferring them to GPU).
In order to increase throughtput we might want to make our model faster (i.e. by making it smaller) or disable normalization
(parameter `--normalize_input=False`, see config reference).
Note however that both of these measures may hurt sample efficiency.
* RolloutWorkers that simulate the environment are the main culprits here.
The majority of time is taken by `env_step` (stepping through the environment), ~170 seconds. Overall, we can say that this workload
is heavily dominated by CPU-based simulation. If you're in a similar situation you might want to consider instrumenting
your code deeper (i.e. using `Timing` or other tool) to measure hotspots in your environment and attempt to optimize it.
### Notes on GPU profiling
Profiling GPU-based workloads can be misleading because GPU kernels are asynchronous and sometimes we can see
a lot of time spent in sections _after_ the ones we expect to be the hotspots.
In the example above, the learner's `after_optimizer: 12.5617` is significantly longer than `update: 5.0903` where
the actual backward pass happens.
Thus one should not rely too heavily on timing your code for GPU profiling.
Take a look at CUDA profiling, i.e. here is a [Pytorch tutorial](https://pytorch.org/tutorials/recipes/recipes/profiler_recipe.html).
Also check out [this tutorial](https://www.youtube.com/watch?v=I4MjX598ZYs&list=PLGywud_-HlCORC0c4uj97oppQrGiB6JNy)
for some advanced RL profiling techniques.
## Profiling with standard Python profilers (cProfile or yappi)
In most RL workloads in Sample Factory it can be difficult to use standard profiling tools because the full application
consists of many processes and threads and in the author's experience standard tools struggle to organise
traces from multiple processes into a single coherent report
(if the reader knows of a good tool for this, please [let the author know](mailto:[email protected])).
However, using *serial mode* we can force Sample Factory to execute everything in one process! This can be very useful
for finding bottlenecks in your environment implementation without the need for manual instrumentation.
The following command will run the entire experiment in a single process:
```bash
python -m sf_examples.mujoco.train_mujoco --env=mujoco_ant --serial_mode=True --async_rl=False
```
Note that we enable synchronous RL mode as well, it's easier to debug this way and asynchronicity does not make
sense when we're not using multiple processes.
Moreover for some workloads it is actually optimal to run everything in a single process! This is true for
GPU-accelerated environments such as IsaacGym or Brax. When env simulation, inference, and learning are all done on one GPU
it is not necessarily beneficial to run these tasks in separate processes.
In this case we can profile Sample Factory like any other Python application. For example, PyCharm has a nice visualizer
for profiling results generated by `cProfile` or `yappi`. If we run training in IsaacGym in serial mode under PyCharm's profiler:
```bash
python -m sf_examples.isaacgym_examples.train_isaacgym --env=Ant --experiment=igeAnt
```
we get the following report which can be explored to find hotspots in different parts of the code:
<img src="https://github.com/alex-petrenko/sf_assets/blob/main/docs/prof.png?raw=true" alt="yappi profiling view">
|
/sample-factory-2.1.1.tar.gz/sample-factory-2.1.1/docs/07-advanced-topics/profiling.md
| 0.495361 | 0.923523 |
profiling.md
|
pypi
|
# Population-Based Training
Sample Factory contains an implementation of the Population-Based Training algorithm.
See [PBT paper](https://arxiv.org/abs/1711.09846) and original [Sample Factory paper](https://arxiv.org/abs/2006.11751) for more details.
PBT is a hyperparameter optimization algorithm that can be used to train RL agents.
Instead of manually tuning all hyperparameters, you can let an optimization method do it for you. This can include
not only learning parameters (e.g. learning rate, entropy coefficient), but also environment parameters (e.g. reward function coefficients).
It is common in RL to have a sophisticated (shaped) reward function which guides the exploration process.
As a result such reward function can distract the agent from the actual final goal.
PBT allows you to optimize with respect to some sparse final objective (which we call "true_objective") while still using a shaped reward function.
Theoretically the algorithm should find hyperparameters (including shaping coefficients) that lead to the best final objective.
This can be, for example, directly optimizing for just winning a match in a multiplayer game, which would be very difficult to do with just regular RL because of
the sparsity of such objective.
This type of PBT algorithm is implemented in the [FTW agent](https://www.deepmind.com/blog/capture-the-flag-the-emergence-of-complex-cooperative-agents) by DeepMind.
## Algorithm
PBT works similar to a genetic algorithm. A population of agents is trained simultaneously with roughly the following approach:
* Each agent is assigned a set of hyperparameters (e.g. learning rate, entropy coefficient, reward function coefficients, etc.)
* Each agent is trained for a fixed number of steps (e.g. 5M steps)
* At the end of this meta-training epoch, the performance of all agents is ranked:
* Agents with top K % of performance are unchanged, we just keep training them
* Agents with bottom K % of performance are replaced by a copy of a random top-K % agent with mutated hyperparameters.
* Agents in the middle keep their weights but also get mutated hyperparameters.
* Proceed to the next meta-training epoch.
Current version of PBT is implemented for a single machine. The perfect setup is a multi-GPU server that can train multiple agents at the same time.
For example, we can train a population of 8 agents on a 4-GPU machine, training 2 agents on each GPU.
PBT is perfect for multiplayer game scenarios where training a population of agents against one another
yields much more robust results compared to self-play with a single policy.
## Providing "True Objective" to PBT
In order to optimize for a true objective, you need to return it from the environment.
Just add it to the `info` dictionary returned by the environment at the last step of the episode, e.g.:
```python
def step(self, action):
info = {}
...
info['true_objective'] = self.compute_true_objective()
return obs, reward, terminated, truncated, info
```
In the absence of `true_objective` in the `info` dictionary, PBT will use the regular reward as the objective.
## Learning parameters optimized by PBT
See `population_based_training.py`:
```python
HYPERPARAMS_TO_TUNE = {
"learning_rate",
"exploration_loss_coeff",
"value_loss_coeff",
"max_grad_norm",
"ppo_clip_ratio",
"ppo_clip_value",
# gamma can be added with a CLI parameter (--pbt_optimize_gamma=True)
}
```
During training current learning parameters are saved in `f"policy_{policy_id:02d}_cfg.json"` files in the experiment directory.
## Optimizing environment parameters
Besides learning parameters we can also optimize parameters of the environment with respect to some "true objective".
In order to do that, your environment should implement `RewardShapingInterface` interface in addition to `gym.Env` interface.
```python
class RewardShapingInterface:
def get_default_reward_shaping(self) -> Optional[Dict[str, Any]]:
"""Should return a dictionary of string:float key-value pairs defining the current reward shaping scheme."""
raise NotImplementedError
def set_reward_shaping(self, reward_shaping: Dict[str, Any], agent_idx: int | slice) -> None:
"""
Sets the new reward shaping scheme.
:param reward_shaping dictionary of string-float key-value pairs
:param agent_idx: integer agent index (for multi-agent envs). Can be a slice if we're training in batched mode
(set a single reward shaping scheme for a range of agents)
"""
raise NotImplementedError
```
Any parameters in the dictionary returned by `get_default_reward_shaping` will be optimized by PBT.
Note that although the dictionary is called "reward shaping", it can be used to optimize any environment parameters.
It is important that none of these parameters should directly affect the objective calculation, otherwise
all PBT will do is increase the coefficients all the way to infinity.
An example of how this can be used. Suppose your shaped reward function contains a term for picking up a weapon in a game like Quake or VizDoom.
If the true objective is `1.0` for winning the game and `0.0` otherwise then PBT can optimize these weapon preference coefficients to maximize success.
But if true objective is not specified (so just the env reward itself is used as objective), then you can just increase
the coefficients to increase the reward unboundedly.
## Configuring PBT
Please see [Configuration parameter reference](../02-configuration/cfg-params.md). Parameters with `--pbt_` prefix are related to PBT.
Use `--with_pbt=True` to enable PBT. It is important also to set `--num_policies` to the number of agents in the population.
### Command-line examples
Training on DMLab-30 with a 4-agent population on a 4-GPU machine:
```bash
python -m sf_examples.dmlab.train_dmlab --env=dmlab_30 --train_for_env_steps=10000000000 --algo=APPO --gamma=0.99 --use_rnn=True --num_workers=90 --num_envs_per_worker=12 --num_epochs=1 --rollout=32 --recurrence=32 --batch_size=2048 --benchmark=False --max_grad_norm=0.0 --dmlab_renderer=software --decorrelate_experience_max_seconds=120 --encoder_conv_architecture=resnet_impala --encoder_conv_mlp_layers=512 --nonlinearity=relu --rnn_type=lstm --dmlab_extended_action_set=True --num_policies=4 --pbt_replace_reward_gap=0.05 --pbt_replace_reward_gap_absolute=5.0 --pbt_period_env_steps=10000000 --pbt_start_mutation=100000000 --with_pbt=True --experiment=dmlab_30_resnet_4pbt_w90_v12 --dmlab_one_task_per_worker=True --set_workers_cpu_affinity=True --max_policy_lag=35 --pbt_target_objective=dmlab_target_objective --dmlab30_dataset=~/datasets/brady_konkle_oliva2008 --dmlab_use_level_cache=True --dmlab_level_cache_path=/home/user/dmlab_cache
```
PBT for VizDoom (8 agents, 4-GPU machine):
```bash
python -m sf_examples.vizdoom.train_vizdoom --env=doom_deathmatch_bots --train_for_seconds=3600000 --algo=APPO --use_rnn=True --gamma=0.995 --env_frameskip=2 --num_workers=80 --num_envs_per_worker=24 --num_policies=8 --batch_size=2048 --res_w=128 --res_h=72 --wide_aspect_ratio=False --with_pbt=True --pbt_period_env_steps=5000000 --experiment=doom_deathmatch_bots
```
|
/sample-factory-2.1.1.tar.gz/sample-factory-2.1.1/docs/07-advanced-topics/pbt.md
| 0.837321 | 0.984723 |
pbt.md
|
pypi
|
# Metrics Reference
## General information
Each experiment will have at least the following groups of metrics on Tensorboard/Wandb:
* `len`
* `perf`
* `policy_stats`
* `reward`
* `stats`
* `train`
Plus new sections (groups) are created for each custom metric with key in `<group_name>/<metric_name>` format (see [Custom Metrics](custom-metrics.md) section).
Summaries such as `len`, `perf`, `reward` are averaged over the last 100 data points to filter noise
(this can be changed by `--stats_avg=N` argument). These summaries are written to Tensorboard/Wandb every
`--experiment_summaries_interval` seconds (10 seconds by default).
`train` summaries are not averaged and just represent the values from the latest minibatch on the learner.
The reporting rate for `train` summaries is decayed over time to reduce the size of the log files.
The schedule is controlled by `summary_rate_decay_seconds` variable in `learner.py`.
## `len`
`len/len`, `len/len_max`, `len/len_min` are simply episode lengths measured _after frameskip_.
If your environment uses frameskip=4 and the reported episode length is 400, it means that 400 environment steps
were simulated but the agent actually observed only 100 frames.
## `perf`
`perf/_fps` and `perf/_sample_throughput` represent throughput as measured in different parts of the algorithm.
`perf/_sample_throughput` is the number of observations processed (or actions generated) by the inference worker, i.e. pure
sampling throughput measured before frameskipping is taken into account.
`perf/_fps` is the number of observations/actions processed by the learner and measured after frameskipping.
For example with frameskip=4, `perf/_sample_throughput` will be 4 times smaller than `perf/_fps`. If this is not the case,
it means that the learner had to throw away some trajectories which can happen for multiple reasons, for example
if the trajectories were too stale and exceeded `--max_policy_lag`.
## `policy_stats`
By default this section only contains the `true_objective` metrics: `policy_stats/avg_true_objective`,
`policy_stats/avg_true_objective_max`, `policy_stats/avg_true_objective_min`.
This will reflect the `true_objective` value if the environment returns one in the `info` dictionary
(see [PBT](../07-advanced-topics/pbt.md) for more details).
If `true_objective` is not specified these metrics should be equal to the scalar environment reward.
`policy_stats` will also contain any custom metrics (see [Custom metrics](custom-metrics.md)) that are not in
`<group_name>/<metric_name>` format.
## `reward`
`reward/reward`, `reward/reward_max`, `reward/reward_min` are the raw scalar environment rewards, reported
_before_ any scaling (`--reward_scale`) or normalization is applied.
## `stats`
* `stats/avg_request_count` - how many requests from the rollout workers are processed per inference step.
The correpondence between this number and the actual inference batch size depends on training configuration, this is
mostly an internal metric for debugging purposes.
* `stats/gpu_cache_learner`, `stats/gpu_cache_policy_worker`, `stats/gpu_mem_learner`, `stats/gpu_mem_policy_worker`,
`stats/gpu_mem_policy_worker`, `stats/master_process_memory_mb`, `stats/memory_learner`, `stats/memory_policy_worker` -
a group of metrics to keep track of RAM and VRAM usage, mostly used to detect and debug memory leaks.
* `stats/step_policy`, `stats/wait_policy` - performance debugging metrics for the inference worker, respectively
the time spent on the last inference step and the time spent waiting for new observations from the rollout workers,
both in seconds.
## `train`
This is perhaps the most useful section of metrics, many parameters can be used to debug RL training issues.
Metrics are listed and explained below in the alphabetical order in which they appear in Tensorboard.
<img src="https://github.com/alex-petrenko/sf_assets/blob/main/docs/metrics/p1.png?raw=true" alt="Tensorboard train metrics page 1">
* `train/actual_lr` - the actual learning rate used by the learner, which can be different from the configuration
parameter if the adaptive learning rate is enabled.
* `train/adam_max_second_moment` - the maximum value of the second moment of the Adam optimizer. Sometimes spikes in this metric can
be used to detect training instability.
* `train/adv_max`, `train/adv_min`, `train/adv_std` - the maximum, minimum, standard deviation of the
advantage values. "Mean" value is not reported because it is always zero (we use advantage normalization by default).
* `train/entropy` - the entropy of the actions probability distribution.
* `train/exploration_loss` - exploration loss (if any). See `--exploration_loss` argument for more details.
* `train/fraction_clipped` - fraction of minibatch samples that were clipped by the PPO loss. This value growing too
large is often a sign of training instability (i.e. learning rate is too high).
* `train/grad_norm` - the L2 norm of the gradient of the loss function after gradient clipping.
* `train/kl_divergence` - the average KL-divergence between the policy that collected the experience and the latest copy
of the policy on the learner. This value growing or spiking is often concerning and can be a sign of training instability.
* `train/kl_divergence_max` - max KL value in the whole minibatch.
* `train/kl_loss` - value of the KL loss (if any). See `--kl_loss_coeff` argument for more details.
<img src="https://github.com/alex-petrenko/sf_assets/blob/main/docs/metrics/p2.png?raw=true" alt="Tensorboard train metrics page 2">
* `train/loss` - the total loss function value.
* `train/lr` - the learning rate used by the learner (can be changed by PBT algorithm even if there is no lr scheduler).
* `train/max_abs_logprob` - the maximum absolute value of the log probability of any action in the minibatch under the latest policy.
If this reaches hundreds or thousands (extremely improbable) it might be a sign that the distributions fluctuate too much,
although it can also happen with very complex action distributions, i.e. Tuple action distributions.
* `train/measurements_running_mean`, `train/measurements_running_std` - in this particular example the environment
provides the additional observation space called "measurements" and these values report the statistics of this observation space.
* `train/num_sgd_steps` - number of SGD steps performed on the current trajectories dataset when the summaries are recorded. This can range
from 1 to `--num_epochs` * `--num_batches_per_epoch`.
* `train/obs_running_mean`, `train/obs_running_std` - the running mean and standard deviation of the observations, reported
when `--normalize_input` is enabled.
* `train/policy_loss` - policy gradient loss component of the total loss.
* `train/ratio_max`, `train/ratio_mean`, `train/ratio_min` - action probability ratio between the latest policy and the policy
that collected the experience. Min/max/mean are across the minibatch.
<img src="https://github.com/alex-petrenko/sf_assets/blob/main/docs/metrics/p3.png?raw=true" alt="Tensorboard train metrics page 3">
* `train/returns_running_mean`, `train/returns_running_std` - the running mean and standard deviation of bootstrapped
discounted returns, reported when `--normalize_returns` is enabled.
* `train/same_policy_fraction` - fraction of samples in the minibatch that come from the same policy. This can be less than 1.0
in multi-policy (i.e. PBT) workflows when we change the policy controlling the agent mid-episode.
* `train/valids_fraction` - fraction of samples in the minibatch that are valid. Samples can be invalid if they
come from a different policy or if they are too old exceeding `--max_policy_lag`. In most cases both `train/same_policy_fraction`
and `train/valids_fraction` should be close to 1.0.
* `train/value` - discounted return as predicted by the value function.
* `train/value_delta`, `train/value_delta_max` - how much the value estimate changed between the current critic and the critic
at the moment when the experience was collected. Similar to `train/ratio...` metrics, but for the value function.
* `train/value_loss` - value function loss component of the total loss.
* `train/version_diff_avg`, `train/version_diff_max`, `train/version_diff_min` - policy lag measured in policy versions (SGD steps)
between the policy that collected the experience and the latest policy on the learner.
|
/sample-factory-2.1.1.tar.gz/sample-factory-2.1.1/docs/05-monitoring/metrics-reference.md
| 0.956927 | 0.743145 |
metrics-reference.md
|
pypi
|
from __future__ import annotations
import random
import sys
from typing import Any, Dict, Optional
import gymnasium as gym
import numpy as np
from sample_factory.algo.utils.context import global_model_factory
from sample_factory.cfg.arguments import parse_full_cfg, parse_sf_args
from sample_factory.envs.env_utils import RewardShapingInterface, TrainingInfoInterface, register_env
from sample_factory.train import run_rl
from sf_examples.train_custom_env_custom_model import make_custom_encoder, override_default_params
class CustomMultiEnv(gym.Env, TrainingInfoInterface, RewardShapingInterface):
"""
Implements a simple 2-agent game. Observation space is irrelevant. Optimal strategy is for both agents
to choose the same action (both 0 or 1).
"""
def __init__(self, full_env_name, cfg, render_mode: Optional[str] = None):
TrainingInfoInterface.__init__(self)
self.name = full_env_name # optional
self.cfg = cfg
self.curr_episode_steps = 0
self.res = 8 # 8x8 images
self.channels = 1 # it's easier when the channel dimension is present, even if it's 1
self.observation_space = gym.spaces.Box(0, 1, (self.channels, self.res, self.res))
self.action_space = gym.spaces.Discrete(2)
self.num_agents = 2
self.is_multiagent = True
self.inactive_steps = [3] * self.num_agents
self.episode_rewards = [[] for _ in range(self.num_agents)]
self.reward_shaping = [dict(rew=-1.0) for _ in range(self.num_agents)]
self.obs = None
self.render_mode = render_mode
def _obs(self):
if self.obs is None:
self.obs = [np.float32(np.random.rand(self.channels, self.res, self.res)) for _ in range(self.num_agents)]
return self.obs
def reset(self, **kwargs):
self.curr_episode_steps = 0
# log.debug(f"Episode reward: {self.episode_rewards} sum_0: {sum(self.episode_rewards[0])} sum_1: {sum(self.episode_rewards[1])}")
self.episode_rewards = [[] for _ in range(self.num_agents)]
return self._obs(), [dict() for _ in range(self.num_agents)]
def step(self, actions):
infos = [dict() for _ in range(self.num_agents)]
# "deactivate" agents randomly, mostly to test inactive agent masking functionality
for agent_idx in range(self.num_agents):
if self.inactive_steps[agent_idx] > 0:
self.inactive_steps[agent_idx] -= 1
else:
if random.random() < 0.005:
self.inactive_steps[agent_idx] = random.randint(1, 48)
infos[agent_idx]["is_active"] = self.inactive_steps[agent_idx] <= 0
self.curr_episode_steps += 1
# this is like prisoner's dilemma
rew0 = self.reward_shaping[0]["rew"]
rew1 = self.reward_shaping[1]["rew"]
payout_matrix = [
[(0, 0), (rew0, rew1)],
[(rew0, rew1), (0, 0)],
]
# action = 0 to stay silent, 1 to betray
rewards = list(payout_matrix[actions[0]][actions[1]])
for agent_idx in range(self.num_agents):
if not infos[agent_idx]["is_active"]:
rewards[agent_idx] = 0
self.episode_rewards[agent_idx].append(rewards[agent_idx])
time_out = self.curr_episode_steps >= self.cfg.custom_env_episode_len
for agent_idx in range(self.num_agents):
infos[agent_idx]["time_outs"] = time_out
terminated = truncated = [time_out] * self.num_agents
if time_out:
# multi-agent environments should auto-reset!
obs, infos = self.reset()
else:
obs = self._obs()
return obs, rewards, terminated, truncated, infos
def get_default_reward_shaping(self) -> Optional[Dict[str, Any]]:
return self.reward_shaping[0]
def set_reward_shaping(self, reward_shaping: Dict[str, Any], agent_idx: int | slice) -> None:
if isinstance(agent_idx, int):
agent_idx = slice(agent_idx, agent_idx + 1)
for idx in range(agent_idx.start, agent_idx.stop):
self.reward_shaping[idx] = reward_shaping
def render(self):
pass
def make_custom_multi_env_func(full_env_name, cfg=None, _env_config=None, render_mode: Optional[str] = None):
return CustomMultiEnv(full_env_name, cfg, render_mode=render_mode)
def add_extra_params_func(parser):
"""
Specify any additional command line arguments for this family of custom environments.
"""
p = parser
p.add_argument("--custom_env_episode_len", default=10, type=int, help="Number of steps in the episode")
def register_custom_components():
register_env("my_custom_multi_env_v1", make_custom_multi_env_func)
global_model_factory().register_encoder_factory(make_custom_encoder)
def parse_custom_args(argv=None, evaluation=False):
parser, cfg = parse_sf_args(argv=argv, evaluation=evaluation)
add_extra_params_func(parser)
override_default_params(parser)
# second parsing pass yields the final configuration
cfg = parse_full_cfg(parser, argv)
return cfg
def main():
"""Script entry point."""
register_custom_components()
cfg = parse_custom_args()
status = run_rl(cfg)
return status
if __name__ == "__main__":
sys.exit(main())
|
/sample-factory-2.1.1.tar.gz/sample-factory-2.1.1/sf_examples/train_custom_multi_env.py
| 0.745213 | 0.293102 |
train_custom_multi_env.py
|
pypi
|
from __future__ import annotations
import sys
from typing import Any, Dict, Optional
import gymnasium as gym
import numpy as np
from torch import nn
from sample_factory.algo.utils.context import global_model_factory
from sample_factory.algo.utils.torch_utils import calc_num_elements
from sample_factory.cfg.arguments import parse_full_cfg, parse_sf_args
from sample_factory.envs.env_utils import RewardShapingInterface, TrainingInfoInterface, register_env
from sample_factory.model.encoder import Encoder
from sample_factory.model.model_utils import nonlinearity
from sample_factory.train import run_rl
from sample_factory.utils.typing import Config, ObsSpace
# add "TrainingInfoInterface" and "RewardShapingInterface" just to demonstrate how to use them (and for testing)
class CustomEnv(gym.Env, TrainingInfoInterface, RewardShapingInterface):
def __init__(self, full_env_name, cfg, render_mode: Optional[str] = None):
TrainingInfoInterface.__init__(self)
self.name = full_env_name # optional
self.cfg = cfg
self.curr_episode_steps = 0
self.res = 10 # 10x10 images
self.channels = 1 # it's easier when the channel dimension is present, even if it's 1
self.observation_space = gym.spaces.Box(0, 1, (self.channels, self.res, self.res))
self.action_space = gym.spaces.Discrete(self.cfg.custom_env_num_actions)
self.reward_shaping: Dict[str, Any] = dict(action_rew_coeff=0.01)
self.render_mode = render_mode
def _obs(self):
return np.float32(np.random.rand(self.channels, self.res, self.res))
def reset(self, **kwargs):
self.curr_episode_steps = 0
return self._obs(), {}
def step(self, action):
# action should be an int here
assert isinstance(action, (int, np.int32, np.int64))
reward = action * self.reward_shaping["action_rew_coeff"]
terminated = truncated = self.curr_episode_steps >= self.cfg.custom_env_episode_len
self.curr_episode_steps += 1
return self._obs(), reward, terminated, truncated, dict()
def render(self):
pass
def get_default_reward_shaping(self) -> Dict[str, Any]:
return self.reward_shaping
def set_reward_shaping(self, reward_shaping: Dict[str, Any], agent_idx: int | slice) -> None:
self.reward_shaping = reward_shaping
def make_custom_env_func(full_env_name, cfg=None, _env_config=None, render_mode: Optional[str] = None):
return CustomEnv(full_env_name, cfg, render_mode=render_mode)
def add_extra_params(parser):
"""
Specify any additional command line arguments for this family of custom environments.
"""
p = parser
p.add_argument("--custom_env_num_actions", default=10, type=int, help="Number of actions in my custom env")
p.add_argument("--custom_env_episode_len", default=1000, type=int, help="Number of steps in the episode")
def override_default_params(parser):
"""
Override default argument values for this family of environments.
All experiments for environments from my_custom_env_ family will have these parameters unless
different values are passed from command line.
"""
parser.set_defaults(
rnn_size=128,
)
class CustomEncoder(Encoder):
"""Just an example of how to use a custom model component."""
def __init__(self, cfg, obs_space):
super().__init__(cfg)
obs_shape = obs_space["obs"].shape
conv_layers = [
nn.Conv2d(1, 8, 3, stride=2),
nonlinearity(cfg),
nn.Conv2d(8, 16, 2, stride=1),
nonlinearity(cfg),
]
self.conv_head = nn.Sequential(*conv_layers)
self.conv_head_out_size = calc_num_elements(self.conv_head, obs_shape)
def forward(self, obs_dict):
# we always work with dictionary observations. Primary observation is available with the key 'obs'
main_obs = obs_dict["obs"]
x = self.conv_head(main_obs)
x = x.view(-1, self.conv_head_out_size)
return x
def get_out_size(self) -> int:
return self.conv_head_out_size
def make_custom_encoder(cfg: Config, obs_space: ObsSpace) -> Encoder:
"""Factory function as required by the API."""
return CustomEncoder(cfg, obs_space)
def register_custom_components():
register_env("my_custom_env_v1", make_custom_env_func)
global_model_factory().register_encoder_factory(make_custom_encoder)
def parse_custom_args(argv=None, evaluation=False):
parser, cfg = parse_sf_args(argv, evaluation=evaluation)
add_extra_params(parser)
override_default_params(parser)
# second parsing pass yields the final configuration
cfg = parse_full_cfg(parser, argv)
return cfg
def main():
"""Script entry point."""
register_custom_components()
cfg = parse_custom_args()
status = run_rl(cfg)
return status
if __name__ == "__main__":
sys.exit(main())
|
/sample-factory-2.1.1.tar.gz/sample-factory-2.1.1/sf_examples/train_custom_env_custom_model.py
| 0.85493 | 0.356811 |
train_custom_env_custom_model.py
|
pypi
|
from typing import Optional
import gymnasium as gym
from sample_factory.envs.env_wrappers import (
ClipRewardEnv,
EpisodicLifeEnv,
FireResetEnv,
MaxAndSkipEnv,
NoopResetEnv,
NumpyObsWrapper,
)
ATARI_W = ATARI_H = 84
class AtariSpec:
def __init__(self, name, env_id, default_timeout=None):
self.name = name
self.env_id = env_id
self.default_timeout = default_timeout
self.has_timer = False
ATARI_ENVS = [
AtariSpec("atari_alien", "AlienNoFrameskip-v4"),
AtariSpec("atari_amidar", "AmidarNoFrameskip-v4"),
AtariSpec("atari_assault", "AssaultNoFrameskip-v4"),
AtariSpec("atari_asterix", "AsterixNoFrameskip-v4"),
AtariSpec("atari_asteroid", "AsteroidsNoFrameskip-v4"),
AtariSpec("atari_atlantis", "AtlantisNoFrameskip-v4"),
AtariSpec("atari_bankheist", "BankHeistNoFrameskip-v4"),
AtariSpec("atari_battlezone", "BattleZoneNoFrameskip-v4"),
AtariSpec("atari_beamrider", "BeamRiderNoFrameskip-v4"),
AtariSpec("atari_berzerk", "BerzerkNoFrameskip-v4"),
AtariSpec("atari_bowling", "BowlingNoFrameskip-v4"),
AtariSpec("atari_boxing", "BoxingNoFrameskip-v4"),
AtariSpec("atari_breakout", "BreakoutNoFrameskip-v4"),
AtariSpec("atari_centipede", "CentipedeNoFrameskip-v4"),
AtariSpec("atari_choppercommand", "ChopperCommandNoFrameskip-v4"),
AtariSpec("atari_crazyclimber", "CrazyClimberNoFrameskip-v4"),
AtariSpec("atari_defender", "DefenderNoFrameskip-v4"),
AtariSpec("atari_demonattack", "DemonAttackNoFrameskip-v4"),
AtariSpec("atari_doubledunk", "DoubleDunkNoFrameskip-v4"),
AtariSpec("atari_enduro", "EnduroNoFrameskip-v4"),
AtariSpec("atari_fishingderby", "FishingDerbyNoFrameskip-v4"),
AtariSpec("atari_freeway", "FreewayNoFrameskip-v4"),
AtariSpec("atari_frostbite", "FrostbiteNoFrameskip-v4"),
AtariSpec("atari_gopher", "GopherNoFrameskip-v4"),
AtariSpec("atari_gravitar", "GravitarNoFrameskip-v4"),
AtariSpec("atari_hero", "HeroNoFrameskip-v4"),
AtariSpec("atari_icehockey", "IceHockeyNoFrameskip-v4"),
AtariSpec("atari_jamesbond", "JamesbondNoFrameskip-v4"),
AtariSpec("atari_kangaroo", "KangarooNoFrameskip-v4"),
AtariSpec("atari_krull", "KrullNoFrameskip-v4"),
AtariSpec("atari_kongfumaster", "KungFuMasterNoFrameskip-v4"),
AtariSpec("atari_montezuma", "MontezumaRevengeNoFrameskip-v4", default_timeout=18000),
AtariSpec("atari_mspacman", "MsPacmanNoFrameskip-v4"),
AtariSpec("atari_namethisgame", "NameThisGameNoFrameskip-v4"),
AtariSpec("atari_phoenix", "PhoenixNoFrameskip-v4"),
AtariSpec("atari_pitfall", "PitfallNoFrameskip-v4"),
AtariSpec("atari_pong", "PongNoFrameskip-v4"),
AtariSpec("atari_privateye", "PrivateEyeNoFrameskip-v4"),
AtariSpec("atari_qbert", "QbertNoFrameskip-v4"),
AtariSpec("atari_riverraid", "RiverraidNoFrameskip-v4"),
AtariSpec("atari_roadrunner", "RoadRunnerNoFrameskip-v4"),
AtariSpec("atari_robotank", "RobotankNoFrameskip-v4"),
AtariSpec("atari_seaquest", "SeaquestNoFrameskip-v4"),
AtariSpec("atari_skiing", "SkiingNoFrameskip-v4"),
AtariSpec("atari_solaris", "SolarisNoFrameskip-v4"),
AtariSpec("atari_spaceinvaders", "SpaceInvadersNoFrameskip-v4"),
AtariSpec("atari_stargunner", "StarGunnerNoFrameskip-v4"),
AtariSpec("atari_surround", "SurroundNoFrameskip-v4"),
AtariSpec("atari_tennis", "TennisNoFrameskip-v4"),
AtariSpec("atari_timepilot", "TimePilotNoFrameskip-v4"),
AtariSpec("atari_tutankham", "TutankhamNoFrameskip-v4"),
AtariSpec("atari_upndown", "UpNDownNoFrameskip-v4"),
AtariSpec("atari_venture", "VentureNoFrameskip-v4"),
AtariSpec("atari_videopinball", "VideoPinballNoFrameskip-v4"),
AtariSpec("atari_wizardofwor", "WizardOfWorNoFrameskip-v4"),
AtariSpec("atari_yarsrevenge", "YarsRevengeNoFrameskip-v4"),
AtariSpec("atari_zaxxon", "ZaxxonNoFrameskip-v4"),
]
def atari_env_by_name(name):
for cfg in ATARI_ENVS:
if cfg.name == name:
return cfg
raise Exception("Unknown Atari env")
def make_atari_env(env_name, cfg, env_config, render_mode: Optional[str] = None):
atari_spec = atari_env_by_name(env_name)
env = gym.make(atari_spec.env_id, render_mode=render_mode)
if atari_spec.default_timeout is not None:
env._max_episode_steps = atari_spec.default_timeout
# these are chosen to match Stable-Baselines3 and CleanRL implementations as precisely as possible
env = gym.wrappers.RecordEpisodeStatistics(env)
env = NoopResetEnv(env, noop_max=30)
env = MaxAndSkipEnv(env, skip=cfg.env_frameskip)
env = EpisodicLifeEnv(env)
# noinspection PyUnresolvedReferences
if "FIRE" in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = ClipRewardEnv(env)
env = gym.wrappers.ResizeObservation(env, (84, 84))
env = gym.wrappers.GrayScaleObservation(env)
env = gym.wrappers.FrameStack(env, cfg.env_framestack)
env = NumpyObsWrapper(env)
return env
|
/sample-factory-2.1.1.tar.gz/sample-factory-2.1.1/sf_examples/atari/atari_utils.py
| 0.848376 | 0.502808 |
atari_utils.py
|
pypi
|
import sys
from multiprocessing.context import BaseContext
from typing import Optional
from tensorboardX import SummaryWriter
from sample_factory.algo.runners.runner import AlgoObserver, Runner
from sample_factory.algo.utils.context import global_model_factory
from sample_factory.algo.utils.misc import ExperimentStatus
from sample_factory.algo.utils.multiprocessing_utils import get_mp_ctx
from sample_factory.cfg.arguments import parse_full_cfg, parse_sf_args
from sample_factory.envs.env_utils import register_env
from sample_factory.train import make_runner
from sample_factory.utils.typing import Config, Env, PolicyID
from sample_factory.utils.utils import experiment_dir
from sf_examples.dmlab.dmlab_env import (
DMLAB_ENVS,
dmlab_extra_episodic_stats_processing,
dmlab_extra_summaries,
list_all_levels_for_experiment,
make_dmlab_env,
)
from sf_examples.dmlab.dmlab_level_cache import DmlabLevelCaches, make_dmlab_caches
from sf_examples.dmlab.dmlab_model import make_dmlab_encoder
from sf_examples.dmlab.dmlab_params import add_dmlab_env_args, dmlab_override_defaults
class DmlabEnvWithCache:
def __init__(self, level_caches: Optional[DmlabLevelCaches] = None):
self.caches = level_caches
def make_env(self, env_name, cfg, env_config, render_mode) -> Env:
return make_dmlab_env(env_name, cfg, env_config, render_mode, self.caches)
def register_dmlab_envs(level_caches: Optional[DmlabLevelCaches] = None):
env_factory = DmlabEnvWithCache(level_caches)
for env in DMLAB_ENVS:
register_env(env.name, env_factory.make_env)
def register_dmlab_components(level_caches: Optional[DmlabLevelCaches] = None):
register_dmlab_envs(level_caches)
global_model_factory().register_encoder_factory(make_dmlab_encoder)
class DmlabExtraSummariesObserver(AlgoObserver):
def extra_summaries(self, runner: Runner, policy_id: PolicyID, writer: SummaryWriter, env_steps: int) -> None:
dmlab_extra_summaries(runner, policy_id, env_steps, writer)
def register_msg_handlers(cfg: Config, runner: Runner):
if cfg.env == "dmlab_30":
# extra functions to calculate human-normalized score etc.
runner.register_episodic_stats_handler(dmlab_extra_episodic_stats_processing)
runner.register_observer(DmlabExtraSummariesObserver())
def initialize_level_cache(cfg: Config, mp_ctx: BaseContext) -> Optional[DmlabLevelCaches]:
if not cfg.dmlab_use_level_cache:
return None
env_name = cfg.env
num_policies = cfg.num_policies if hasattr(cfg, "num_policies") else 1
all_levels = list_all_levels_for_experiment(env_name)
level_cache_dir = cfg.dmlab_level_cache_path
caches = make_dmlab_caches(experiment_dir(cfg), all_levels, num_policies, level_cache_dir, mp_ctx)
return caches
def parse_dmlab_args(argv=None, evaluation=False):
parser, cfg = parse_sf_args(argv, evaluation=evaluation)
add_dmlab_env_args(parser)
dmlab_override_defaults(parser)
cfg = parse_full_cfg(parser, argv)
return cfg
def main():
"""Script entry point."""
cfg = parse_dmlab_args()
# explicitly create the runner instead of simply calling run_rl()
# this allows us to register additional message handlers
cfg, runner = make_runner(cfg)
register_msg_handlers(cfg, runner)
level_caches = initialize_level_cache(cfg, get_mp_ctx(cfg.serial_mode))
register_dmlab_components(level_caches)
status = runner.init()
if status == ExperimentStatus.SUCCESS:
status = runner.run()
return status
if __name__ == "__main__":
sys.exit(main())
|
/sample-factory-2.1.1.tar.gz/sample-factory-2.1.1/sf_examples/dmlab/train_dmlab.py
| 0.675658 | 0.206274 |
train_dmlab.py
|
pypi
|
import os
import random
import shutil
import time
from os.path import join
from typing import Dict, Optional
import cv2
import deepmind_lab
import gymnasium as gym
import numpy as np
from sample_factory.utils.typing import PolicyID
from sample_factory.utils.utils import ensure_dir_exists, log
from sf_examples.dmlab.dmlab30 import DMLAB_INSTRUCTIONS, DMLAB_MAX_INSTRUCTION_LEN, DMLAB_VOCABULARY_SIZE
from sf_examples.dmlab.dmlab_level_cache import DmlabLevelCache
from sf_examples.dmlab.dmlab_utils import string_to_hash_bucket
ACTION_SET = (
(0, 0, 0, 1, 0, 0, 0), # Forward
(0, 0, 0, -1, 0, 0, 0), # Backward
(0, 0, -1, 0, 0, 0, 0), # Strafe Left
(0, 0, 1, 0, 0, 0, 0), # Strafe Right
(-20, 0, 0, 0, 0, 0, 0), # Look Left
(20, 0, 0, 0, 0, 0, 0), # Look Right
(-20, 0, 0, 1, 0, 0, 0), # Look Left + Forward
(20, 0, 0, 1, 0, 0, 0), # Look Right + Forward
(0, 0, 0, 0, 1, 0, 0), # Fire.
)
EXTENDED_ACTION_SET = (
(0, 0, 0, 1, 0, 0, 0), # Forward
(0, 0, 0, -1, 0, 0, 0), # Backward
(0, 0, -1, 0, 0, 0, 0), # Strafe Left
(0, 0, 1, 0, 0, 0, 0), # Strafe Right
(-10, 0, 0, 0, 0, 0, 0), # Small Look Left
(10, 0, 0, 0, 0, 0, 0), # Small Look Right
(-60, 0, 0, 0, 0, 0, 0), # Large Look Left
(60, 0, 0, 0, 0, 0, 0), # Large Look Right
(0, 10, 0, 0, 0, 0, 0), # Look Down
(0, -10, 0, 0, 0, 0, 0), # Look Up
(-10, 0, 0, 1, 0, 0, 0), # Forward + Small Look Left
(10, 0, 0, 1, 0, 0, 0), # Forward + Small Look Right
(-60, 0, 0, 1, 0, 0, 0), # Forward + Large Look Left
(60, 0, 0, 1, 0, 0, 0), # Forward + Large Look Right
(0, 0, 0, 0, 1, 0, 0), # Fire.
)
def dmlab_level_to_level_name(level):
level_name = level.split("/")[-1]
return level_name
class DmlabGymEnv(gym.Env):
def __init__(
self,
task_id,
level,
action_repeat,
res_w,
res_h,
benchmark_mode,
renderer,
dataset_path,
with_instructions,
extended_action_set,
level_cache_path,
gpu_index,
dmlab_level_caches_per_policy: Dict[PolicyID, DmlabLevelCache] = None,
extra_cfg=None,
render_mode: Optional[str] = None,
):
self.width = res_w
self.height = res_h
# self._main_observation = 'DEBUG.CAMERA_INTERLEAVED.PLAYER_VIEW_NO_RETICLE'
self.main_observation = "RGB_INTERLEAVED"
self.instructions_observation = DMLAB_INSTRUCTIONS
self.with_instructions = with_instructions and not benchmark_mode
self.action_repeat = action_repeat
self.random_state = None
self.task_id = task_id
self.level = level
self.level_name = dmlab_level_to_level_name(self.level)
# the policy index which currently acts in the environment
self.curr_policy_idx = 0
self.dmlab_level_caches_per_policy = dmlab_level_caches_per_policy
self.use_level_cache = self.dmlab_level_caches_per_policy is not None
self.curr_cache = None
if self.use_level_cache:
self.curr_cache = self.dmlab_level_caches_per_policy[self.curr_policy_idx]
self.instructions = np.zeros([DMLAB_MAX_INSTRUCTION_LEN], dtype=np.int32)
observation_format = [self.main_observation]
if self.with_instructions:
observation_format += [self.instructions_observation]
config = {
"width": self.width,
"height": self.height,
"gpuDeviceIndex": str(gpu_index),
"datasetPath": dataset_path,
}
if extra_cfg is not None:
config.update(extra_cfg)
config = {k: str(v) for k, v in config.items()}
self.render_mode: Optional[str] = render_mode
self.level_cache_path = ensure_dir_exists(level_cache_path)
# this object provides fetch and write methods, hence using "self" for env level cache
env_level_cache = self if self.use_level_cache else None
self.env_uses_level_cache = False # will be set to True when this env instance queries the cache
self.last_reset_seed = None
self.dmlab = deepmind_lab.Lab(
level,
observation_format,
config=config,
renderer=renderer,
level_cache=env_level_cache,
)
self.action_set = EXTENDED_ACTION_SET if extended_action_set else ACTION_SET
self.action_list = np.array(self.action_set, dtype=np.intc) # DMLAB requires intc type for actions
self.last_observation = None
self.render_scale = 5
self.render_fps = 30
self.last_frame = time.time()
self.action_space = gym.spaces.Discrete(len(self.action_set))
self.observation_space = gym.spaces.Dict(
obs=gym.spaces.Box(low=0, high=255, shape=(self.height, self.width, 3), dtype=np.uint8)
)
if self.with_instructions:
self.observation_space.spaces[self.instructions_observation] = gym.spaces.Box(
low=0,
high=DMLAB_VOCABULARY_SIZE,
shape=[DMLAB_MAX_INSTRUCTION_LEN],
dtype=np.int32,
)
self.benchmark_mode = benchmark_mode
if self.benchmark_mode:
log.warning("DmLab benchmark mode is true! Use this only for testing, not for actual training runs!")
self.seed()
def seed(self, seed=None):
if self.benchmark_mode:
initial_seed = 42
else:
if seed is None:
initial_seed = random.randint(0, int(1e9))
else:
initial_seed = seed
self.random_state = np.random.RandomState(seed=initial_seed)
return [initial_seed]
def format_obs_dict(self, env_obs_dict):
"""SampleFactory traditionally uses 'obs' key for the 'main' observation."""
env_obs_dict["obs"] = env_obs_dict.pop(self.main_observation)
instr = env_obs_dict.get(self.instructions_observation)
self.instructions[:] = 0
if instr is not None:
instr_words = instr.split()
for i, word in enumerate(instr_words):
self.instructions[i] = string_to_hash_bucket(word, DMLAB_VOCABULARY_SIZE)
env_obs_dict[self.instructions_observation] = self.instructions
return env_obs_dict
def reset(self, **kwargs):
if self.use_level_cache:
self.curr_cache = self.dmlab_level_caches_per_policy[self.curr_policy_idx]
self.last_reset_seed = self.curr_cache.get_unused_seed(self.level, self.random_state)
else:
self.last_reset_seed = self.random_state.randint(0, 2**31 - 1)
self.dmlab.reset(seed=self.last_reset_seed)
self.last_observation = self.format_obs_dict(self.dmlab.observations())
return self.last_observation, {}
def step(self, action):
if self.benchmark_mode:
# the performance of many DMLab environments heavily depends on what agent is actually doing
# therefore for purposes of measuring throughput we ignore the actions, this way the agent executes
# random policy and we can measure raw throughput more precisely
action = random.randrange(0, self.action_space.n)
reward = self.dmlab.step(self.action_list[action], num_steps=self.action_repeat)
terminated = not self.dmlab.is_running()
truncated = False
if not terminated:
obs_dict = self.format_obs_dict(self.dmlab.observations())
self.last_observation = obs_dict
info = {"num_frames": self.action_repeat}
return self.last_observation, reward, terminated, truncated, info
def render(self) -> Optional[np.ndarray]:
if self.last_observation is None and self.dmlab.is_running():
self.last_observation = self.dmlab.observations()
img = self.last_observation["obs"]
if self.render_mode == "rgb_array":
return img
elif self.render_mode != "human":
raise Exception(f"Rendering mode {self.render_mode} not supported")
img = np.transpose(img, (1, 2, 0)) # CHW to HWC
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
scale = self.render_scale
img_big = cv2.resize(img, (self.width * scale, self.height * scale), interpolation=cv2.INTER_NEAREST)
cv2.imshow("dmlab_examples", img_big)
since_last_frame = time.time() - self.last_frame
wait_time_sec = max(1.0 / self.render_fps - since_last_frame, 0.001)
wait_time_ms = max(int(1000 * wait_time_sec), 1)
cv2.waitKey(wait_time_ms)
self.last_frame = time.time()
def close(self):
self.dmlab.close()
def fetch(self, key, pk3_path):
"""Environment object itself acts as a proxy to the global level cache."""
if not self.env_uses_level_cache:
self.env_uses_level_cache = True
# log.debug('Env %s uses level cache!', self.level_name)
path = join(self.level_cache_path, key)
if os.path.isfile(path):
# copy the cached file to the path expected by DeepMind Lab
shutil.copyfile(path, pk3_path)
return True
else:
log.warning("Cache miss in environment %s key: %s!", self.level_name, key)
return False
def write(self, key, pk3_path):
"""Environment object itself acts as a proxy to the global level cache."""
if self.use_level_cache:
log.debug("Add new level to cache! Level %s seed %r key %s", self.level_name, self.last_reset_seed, key)
self.curr_cache.add_new_level(self.level, self.last_reset_seed, key, pk3_path)
|
/sample-factory-2.1.1.tar.gz/sample-factory-2.1.1/sf_examples/dmlab/dmlab_gym.py
| 0.717705 | 0.22657 |
dmlab_gym.py
|
pypi
|
import collections
DMLAB_INSTRUCTIONS = "INSTR"
DMLAB_VOCABULARY_SIZE = 1000
DMLAB_MAX_INSTRUCTION_LEN = 16
LEVEL_MAPPING = collections.OrderedDict(
[
("rooms_collect_good_objects_train", "rooms_collect_good_objects_test"),
("rooms_exploit_deferred_effects_train", "rooms_exploit_deferred_effects_test"),
("rooms_select_nonmatching_object", "rooms_select_nonmatching_object"),
("rooms_watermaze", "rooms_watermaze"),
("rooms_keys_doors_puzzle", "rooms_keys_doors_puzzle"),
("language_select_described_object", "language_select_described_object"),
("language_select_located_object", "language_select_located_object"),
("language_execute_random_task", "language_execute_random_task"),
("language_answer_quantitative_question", "language_answer_quantitative_question"),
("lasertag_one_opponent_small", "lasertag_one_opponent_small"),
("lasertag_three_opponents_small", "lasertag_three_opponents_small"),
("lasertag_one_opponent_large", "lasertag_one_opponent_large"),
("lasertag_three_opponents_large", "lasertag_three_opponents_large"),
("natlab_fixed_large_map", "natlab_fixed_large_map"),
("natlab_varying_map_regrowth", "natlab_varying_map_regrowth"),
("natlab_varying_map_randomized", "natlab_varying_map_randomized"),
("skymaze_irreversible_path_hard", "skymaze_irreversible_path_hard"),
("skymaze_irreversible_path_varied", "skymaze_irreversible_path_varied"),
("psychlab_arbitrary_visuomotor_mapping", "psychlab_arbitrary_visuomotor_mapping"),
("psychlab_continuous_recognition", "psychlab_continuous_recognition"),
("psychlab_sequential_comparison", "psychlab_sequential_comparison"),
("psychlab_visual_search", "psychlab_visual_search"),
("explore_object_locations_small", "explore_object_locations_small"),
("explore_object_locations_large", "explore_object_locations_large"),
("explore_obstructed_goals_small", "explore_obstructed_goals_small"),
("explore_obstructed_goals_large", "explore_obstructed_goals_large"),
("explore_goal_locations_small", "explore_goal_locations_small"),
("explore_goal_locations_large", "explore_goal_locations_large"),
("explore_object_rewards_few", "explore_object_rewards_few"),
("explore_object_rewards_many", "explore_object_rewards_many"),
]
)
DMLAB30_LEVELS = tuple(LEVEL_MAPPING.keys())
HUMAN_SCORES = {
"rooms_collect_good_objects_test": 10,
"rooms_exploit_deferred_effects_test": 85.65,
"rooms_select_nonmatching_object": 65.9,
"rooms_watermaze": 54,
"rooms_keys_doors_puzzle": 53.8,
"language_select_described_object": 389.5,
"language_select_located_object": 280.7,
"language_execute_random_task": 254.05,
"language_answer_quantitative_question": 184.5,
"lasertag_one_opponent_small": 12.65,
"lasertag_three_opponents_small": 18.55,
"lasertag_one_opponent_large": 18.6,
"lasertag_three_opponents_large": 31.5,
"natlab_fixed_large_map": 36.9,
"natlab_varying_map_regrowth": 24.45,
"natlab_varying_map_randomized": 42.35,
"skymaze_irreversible_path_hard": 100,
"skymaze_irreversible_path_varied": 100,
"psychlab_arbitrary_visuomotor_mapping": 58.75,
"psychlab_continuous_recognition": 58.3,
"psychlab_sequential_comparison": 39.5,
"psychlab_visual_search": 78.5,
"explore_object_locations_small": 74.45,
"explore_object_locations_large": 65.65,
"explore_obstructed_goals_small": 206,
"explore_obstructed_goals_large": 119.5,
"explore_goal_locations_small": 267.5,
"explore_goal_locations_large": 194.5,
"explore_object_rewards_few": 77.7,
"explore_object_rewards_many": 106.7,
}
RANDOM_SCORES = {
"rooms_collect_good_objects_test": 0.073,
"rooms_exploit_deferred_effects_test": 8.501,
"rooms_select_nonmatching_object": 0.312,
"rooms_watermaze": 4.065,
"rooms_keys_doors_puzzle": 4.135,
"language_select_described_object": -0.07,
"language_select_located_object": 1.929,
"language_execute_random_task": -5.913,
"language_answer_quantitative_question": -0.33,
"lasertag_one_opponent_small": -0.224,
"lasertag_three_opponents_small": -0.214,
"lasertag_one_opponent_large": -0.083,
"lasertag_three_opponents_large": -0.102,
"natlab_fixed_large_map": 2.173,
"natlab_varying_map_regrowth": 2.989,
"natlab_varying_map_randomized": 7.346,
"skymaze_irreversible_path_hard": 0.1,
"skymaze_irreversible_path_varied": 14.4,
"psychlab_arbitrary_visuomotor_mapping": 0.163,
"psychlab_continuous_recognition": 0.224,
"psychlab_sequential_comparison": 0.129,
"psychlab_visual_search": 0.085,
"explore_object_locations_small": 3.575,
"explore_object_locations_large": 4.673,
"explore_obstructed_goals_small": 6.76,
"explore_obstructed_goals_large": 2.61,
"explore_goal_locations_small": 7.66,
"explore_goal_locations_large": 3.14,
"explore_object_rewards_few": 2.073,
"explore_object_rewards_many": 2.438,
}
RANDOM_POLICY_EPISODE_LEN = {
"rooms_collect_good_objects_train": 3600,
"rooms_exploit_deferred_effects_train": 3600,
"rooms_select_nonmatching_object": 720,
"rooms_watermaze": 7200,
"rooms_keys_doors_puzzle": 3468,
"language_select_described_object": 3600,
"language_select_located_object": 7200,
"language_execute_random_task": 7200,
"language_answer_quantitative_question": 3600,
"lasertag_one_opponent_small": 14400,
"lasertag_three_opponents_small": 14400,
"lasertag_one_opponent_large": 14400,
"lasertag_three_opponents_large": 14400,
"natlab_fixed_large_map": 7200,
"natlab_varying_map_regrowth": 7200,
"natlab_varying_map_randomized": 7200,
"skymaze_irreversible_path_hard": 3600,
"skymaze_irreversible_path_varied": 3372,
"psychlab_arbitrary_visuomotor_mapping": 18000,
"psychlab_continuous_recognition": 18000,
"psychlab_sequential_comparison": 18000,
"psychlab_visual_search": 9000,
"explore_object_locations_small": 5400,
"explore_object_locations_large": 7200,
"explore_obstructed_goals_small": 5400,
"explore_obstructed_goals_large": 7200,
"explore_goal_locations_small": 5400,
"explore_goal_locations_large": 7200,
"explore_object_rewards_few": 5400,
"explore_object_rewards_many": 7200,
}
# this is how many episodes are required for one billion frames of training on DMLab-30
# Used for level cache generation. Only levels that require level cache generation are listed.
# here 1B = 250M samples * frameskip, frameskip=4
# the actual value will of course be different since episode lengths change as policy improves
# this is also under assumption that the agent is trained for the same number of envs on every level
DMLAB30_APPROX_NUM_EPISODES_PER_BILLION_FRAMES = {
"rooms_keys_doors_puzzle": 11200,
"lasertag_one_opponent_small": 2400,
"lasertag_three_opponents_small": 2400,
"lasertag_one_opponent_large": 2400,
"lasertag_three_opponents_large": 2400,
"skymaze_irreversible_path_hard": 11200,
"skymaze_irreversible_path_varied": 13500,
"explore_object_locations_small": 6200,
"explore_object_locations_large": 4700,
"explore_obstructed_goals_small": 6200,
"explore_obstructed_goals_large": 4700,
"explore_goal_locations_small": 6200,
"explore_goal_locations_large": 4700,
"explore_object_rewards_few": 6200,
"explore_object_rewards_many": 4700,
}
DMLAB30_LEVELS_THAT_USE_LEVEL_CACHE = tuple(DMLAB30_APPROX_NUM_EPISODES_PER_BILLION_FRAMES.keys())
def dmlab30_num_envs():
num_envs = len(tuple(LEVEL_MAPPING.keys()))
return num_envs
def dmlab30_level_name_to_level(level_name):
return f"contributed/dmlab30/{level_name}"
|
/sample-factory-2.1.1.tar.gz/sample-factory-2.1.1/sf_examples/dmlab/dmlab30.py
| 0.568056 | 0.310211 |
dmlab30.py
|
pypi
|
from typing import Dict, List, Tuple
import brax
import numpy as np
from brax import math
from brax.io.image import _BASIC, _GROUND, _TARGET, _eye, _up
from brax.physics.base import vec_to_arr
from PIL import Image
from pytinyrenderer import TinyRenderCamera as Camera
from pytinyrenderer import TinyRenderLight as Light
from pytinyrenderer import TinySceneRenderer as Renderer
def _flatten_vectors(vectors):
"""Returns the flattened array of the vectors."""
return sum(map(lambda v: [v.x, v.y, v.z], vectors), [])
def _scene(sys: brax.System) -> Tuple[Renderer, List[int], Dict]:
"""Converts a brax System and qp to a pytinyrenderer scene and instances."""
scene = Renderer()
extra_info = dict()
instances = []
offsets, rotations, body_indices = [], [], []
mesh_geoms = {g.name: g for g in sys.config.mesh_geometries}
for i, body in enumerate(sys.config.bodies):
tex = _TARGET if body.name.lower() == "target" else _BASIC
for col in body.colliders:
col_type = col.WhichOneof("type")
if col_type == "capsule":
half_height = col.capsule.length / 2 - col.capsule.radius
model = scene.create_capsule(col.capsule.radius, half_height, 2, tex.pixels, tex.width, tex.height)
elif col_type == "box":
hs = col.box.halfsize
model = scene.create_cube([hs.x, hs.y, hs.z], _BASIC.pixels, tex.width, tex.height, 16.0)
elif col_type == "sphere":
model = scene.create_capsule(col.sphere.radius, 0, 2, tex.pixels, tex.width, _BASIC.height)
elif col_type == "plane":
tex = _GROUND
model = scene.create_cube([1000.0, 1000.0, 0.0001], tex.pixels, tex.width, tex.height, 8192)
elif col_type == "mesh":
mesh = col.mesh
g = mesh_geoms[mesh.name]
scale = mesh.scale if mesh.scale else 1
model = scene.create_mesh(
np.array(_flatten_vectors(g.vertices)) * scale,
_flatten_vectors(g.vertex_normals),
[0] * len(g.vertices) * 2,
g.faces,
tex.pixels,
tex.width,
tex.height,
1.0,
)
else:
raise RuntimeError(f"unrecognized collider: {col_type}")
instance = scene.create_object_instance(model)
off = np.array([col.position.x, col.position.y, col.position.z])
col_rot = math.euler_to_quat(vec_to_arr(col.rotation))
instances.append(instance)
offsets.append(off)
rotations.append(np.array(col_rot))
body_indices.append(i)
extra_info["offsets"] = offsets
extra_info["rotations"] = rotations
extra_info["body_indices"] = body_indices # to refer to the body idx in qp
return scene, instances, extra_info
def quat_mul(u, v):
"""Multiplies two quaternions.
Args:
u: (4,) quaternion (w,x,y,z)
v: (4,) quaternion (w,x,y,z)
Returns:
A quaternion u * v.
"""
return np.array(
[
u[0] * v[0] - u[1] * v[1] - u[2] * v[2] - u[3] * v[3],
u[0] * v[1] + u[1] * v[0] + u[2] * v[3] - u[3] * v[2],
u[0] * v[2] - u[1] * v[3] + u[2] * v[0] + u[3] * v[1],
u[0] * v[3] + u[1] * v[2] - u[2] * v[1] + u[3] * v[0],
]
)
def _update_scene(scene: Renderer, instances: List[int], extra_info: Dict, qp: brax.QP):
"""Updates the scene with the new qp."""
offsets = extra_info["offsets"]
rotations = extra_info["rotations"]
body_indices = extra_info["body_indices"]
np_pos = np.array(qp.pos)
np_rot = np.array(qp.rot)
for i, instance in enumerate(instances):
body = body_indices[i]
off = offsets[i]
col_rot = rotations[i]
pos = np_pos[body] + math.rotate(off, qp.rot[body])
rot = quat_mul(np_rot[body], col_rot)
scene.set_object_position(instances[i], list(pos))
scene.set_object_orientation(instances[i], [rot[1], rot[2], rot[3], rot[0]])
def create_scene(qp, sys):
if (len(qp.pos.shape), len(qp.rot.shape)) != (2, 2):
raise RuntimeError("unexpected shape in qp")
scene, instances, extra_info = _scene(sys)
return instances, scene, extra_info
def create_camera(height, width, qp, ssaa, sys, target):
eye, up = _eye(sys, qp), _up(sys)
hfov = 58.0
vfov = hfov * height / width
camera = Camera(
viewWidth=width * ssaa, viewHeight=height * ssaa, position=eye, target=target, up=up, hfov=hfov, vfov=vfov
)
return camera
def create_light(target):
direction = [0.57735, -0.57735, 0.57735]
light = Light(direction=direction, ambient=0.8, diffuse=0.8, specular=0.6, shadowmap_center=target)
return light
class BraxRenderer:
def __init__(self, env, render_mode: str, brax_video_res_px: int = 200):
self.env = env
self.screen = None
self.render_mode = render_mode
self.instances, self.extra_info, self.scene = None, None, None
self.ssaa = 2 # supersampling factor
self.width = self.height = brax_video_res_px # anything higher is super slow because CPU renderer :|
# noinspection PyProtectedMember
def render(self):
sys = self.env._env.sys
from brax import jumpy as jp
qp = jp.take(self.env._state.qp, 0)
if self.scene is None:
self.instances, self.scene, self.extra_info = create_scene(qp, sys)
_update_scene(self.scene, self.instances, self.extra_info, qp)
target = [qp.pos[0, 0], qp.pos[0, 1], 0]
light = create_light(target)
camera = create_camera(self.height, self.width, qp, self.ssaa, sys, target)
img = self.scene.get_camera_image(self.instances, light, camera).rgb
arr = np.reshape(np.array(img, dtype=np.uint8), (camera.view_height, camera.view_width, -1))
if self.ssaa > 1:
arr = np.asarray(Image.fromarray(arr).resize((self.width, self.height)))
if self.render_mode == "human":
import pygame
if self.screen is None:
pygame.init()
pygame.display.init()
self.screen = pygame.display.set_mode((self.width, self.height))
pygame.surfarray.blit_array(self.screen, arr.swapaxes(0, 1))
pygame.display.update()
return arr
|
/sample-factory-2.1.1.tar.gz/sample-factory-2.1.1/sf_examples/brax/brax_render.py
| 0.846451 | 0.420243 |
brax_render.py
|
pypi
|
import sys
from typing import Dict, List, Optional, Tuple, Union
import gymnasium as gym
import numpy as np
import torch
import torch.utils.dlpack as tpack
from gymnasium.core import RenderFrame
from torch import Tensor
from sample_factory.algo.utils.gymnasium_utils import convert_space
from sample_factory.cfg.arguments import parse_full_cfg, parse_sf_args
from sample_factory.envs.env_utils import register_env
from sample_factory.train import run_rl
from sample_factory.utils.typing import Config, Env
from sample_factory.utils.utils import log, str2bool
BRAX_EVALUATION = False
torch.ones(1, device="cuda") # init torch cuda before jax
def jax_to_torch(tensor):
# noinspection PyProtectedMember
from jax._src.dlpack import to_dlpack
tensor = to_dlpack(tensor)
tensor = tpack.from_dlpack(tensor)
return tensor
def torch_to_jax(tensor):
# noinspection PyProtectedMember
from jax._src.dlpack import from_dlpack
tensor = tpack.to_dlpack(tensor)
tensor = from_dlpack(tensor)
return tensor
class BraxEnv(gym.Env):
# noinspection PyProtectedMember
def __init__(
self,
brax_env,
num_actors,
render_mode: Optional[str],
render_res: int,
clamp_actions: bool,
clamp_rew_obs: bool,
):
self.env = brax_env
self.num_agents = num_actors
self.env.closed = False
self.env.viewer = None
self.renderer = None
self.render_mode = render_mode
self.brax_video_res_px = render_res
self.clamp_actions = clamp_actions
self.clamp_rew_obs = clamp_rew_obs
if len(self.env.observation_space.shape) > 1:
observation_size = self.env.observation_space.shape[1]
action_size = self.env.action_space.shape[1]
obs_high = np.inf * np.ones(observation_size)
self.observation_space = gym.spaces.Box(-obs_high, obs_high, dtype=np.float32)
action_high = np.ones(action_size)
self.action_space = gym.spaces.Box(-action_high, action_high, dtype=np.float32)
else:
self.observation_space = convert_space(self.env.observation_space)
self.action_space = convert_space(self.env.action_space)
def reset(self, *args, **kwargs) -> Tuple[Tensor, Dict]:
log.debug(f"Resetting env {self.env} with {self.num_agents} parallel agents...")
obs = self.env.reset()
obs = jax_to_torch(obs)
log.debug(f"reset() done, obs.shape={obs.shape}!")
return obs, {}
def step(self, action):
action_clipped = action
if self.clamp_actions:
action_clipped = torch.clamp(action, -1, 1)
action_clipped = torch_to_jax(action_clipped)
next_obs, reward, terminated, info = self.env.step(action_clipped)
next_obs = jax_to_torch(next_obs)
reward = jax_to_torch(reward)
terminated = jax_to_torch(terminated).to(torch.bool)
truncated = jax_to_torch(info["truncation"]).to(torch.bool)
if self.clamp_rew_obs:
reward = torch.clamp(reward, -100, 100)
next_obs = torch.clamp(next_obs, -100, 100)
return next_obs, reward, terminated, truncated, info
def render(self) -> Optional[Union[RenderFrame, List[RenderFrame]]]:
if self.renderer is None:
from sf_examples.brax.brax_render import BraxRenderer
self.renderer = BraxRenderer(self.env, self.render_mode, self.brax_video_res_px)
return self.renderer.render()
def make_brax_env(full_env_name: str, cfg: Config, _env_config=None, render_mode: Optional[str] = None) -> Env:
assert (
full_env_name in env_configs.keys()
), f"Env {full_env_name} is not supported. Supported envs: {list(env_configs.keys())}"
# use batch size 2 instead of 1 so we don't have to deal with vector-nonvector env issues
batch_size = 64 if BRAX_EVALUATION else cfg.env_agents
from brax import envs
gym_env = envs.create_gym_env(env_name=full_env_name, batch_size=batch_size, seed=0, backend="gpu")
env = BraxEnv(gym_env, batch_size, render_mode, cfg.brax_render_res, cfg.clamp_actions, cfg.clamp_rew_obs)
return env
def add_extra_params_func(parser) -> None:
"""
Specify any additional command line arguments for this family of custom environments.
"""
p = parser
p.add_argument(
"--env_agents",
default=2048,
type=int,
help="Num. agents in a vectorized env",
)
p.add_argument(
"--clamp_actions",
default=False,
type=str2bool,
help="Clamp actions to -1,1",
)
p.add_argument(
"--clamp_rew_obs",
default=False,
type=str2bool,
help="Clamp rewards and observations to -100,100",
)
p.add_argument(
"--brax_render_res",
default=200,
type=int,
help="Brax render resolution. Software renderer is very slow so use larger resolution only for offscreen "
"video generation, i.e. with push_to_hub",
)
def override_default_params_func(env, parser):
"""Most of these parameters are the same as IsaacGymEnvs default config files."""
parser.set_defaults(
# we're using a single very vectorized env, no need to parallelize it further
batched_sampling=True,
num_workers=1,
num_envs_per_worker=1,
worker_num_splits=1,
actor_worker_gpus=[0], # obviously need a GPU
train_for_env_steps=100000000,
use_rnn=False,
adaptive_stddev=False,
policy_initialization="torch_default",
env_gpu_actions=True,
reward_scale=0.01,
max_grad_norm=1.0,
rollout=32,
batch_size=32768,
num_batches_per_epoch=2,
num_epochs=5,
ppo_clip_ratio=0.2,
ppo_clip_value=1.0,
value_loss_coeff=2.0,
exploration_loss_coeff=0.0,
nonlinearity="elu",
encoder_mlp_layers=[256, 128, 64],
actor_critic_share_weights=True,
learning_rate=3e-4,
lr_schedule="kl_adaptive_epoch",
lr_schedule_kl_threshold=0.008,
lr_adaptive_max=2e-3,
shuffle_minibatches=False,
gamma=0.99,
gae_lambda=0.95,
with_vtrace=False,
value_bootstrap=True,
normalize_input=True,
normalize_returns=True,
save_best_after=int(5e6),
serial_mode=True,
async_rl=False,
experiment_summaries_interval=3, # experiments are short so we should save summaries often
# use_env_info_cache=True, # speeds up startup
)
# override default config parameters for specific envs
if env in env_configs:
parser.set_defaults(**env_configs[env])
# custom default configuration parameters for specific envs
# add more envs here analogously (env names should match config file names in IGE)
env_configs = dict(
ant=dict(
encoder_mlp_layers=[256, 128, 64],
save_every_sec=15,
),
humanoid=dict(
encoder_mlp_layers=[512, 256, 128],
),
halfcheetah=dict(
encoder_mlp_layers=[256, 128, 64],
),
walker2d=dict(
encoder_mlp_layers=[256, 128, 64],
),
)
def register_brax_custom_components(evaluation: bool = False) -> None:
global BRAX_EVALUATION
BRAX_EVALUATION = evaluation
for env_name in env_configs:
register_env(env_name, make_brax_env)
def parse_brax_cfg(evaluation=False):
parser, partial_cfg = parse_sf_args(evaluation=evaluation)
add_extra_params_func(parser)
override_default_params_func(partial_cfg.env, parser)
final_cfg = parse_full_cfg(parser)
return final_cfg
def main():
"""Script entry point."""
register_brax_custom_components()
cfg = parse_brax_cfg()
status = run_rl(cfg)
return status
if __name__ == "__main__":
sys.exit(main())
|
/sample-factory-2.1.1.tar.gz/sample-factory-2.1.1/sf_examples/brax/train_brax.py
| 0.829181 | 0.471162 |
train_brax.py
|
pypi
|
import sys
import time
from collections import deque
from typing import Deque
from signal_slot.signal_slot import StatusCode
from sample_factory.algo.sampling.simplified_sampling_api import SyncSamplingAPI
from sample_factory.algo.utils.env_info import EnvInfo, obtain_env_info_in_a_separate_process
from sample_factory.algo.utils.rl_utils import samples_per_trajectory
from sample_factory.utils.typing import Config
from sample_factory.utils.utils import log
from sf_examples.atari.train_atari import parse_atari_args, register_atari_components
def _print_fps_stats(cfg: Config, fps_stats: Deque):
sampled = fps_stats[-1][1]
delta_sampled = sampled - fps_stats[0][1]
delta_time = fps_stats[-1][0] - fps_stats[0][0]
fps = delta_sampled / delta_time
fps_frameskip = fps * cfg.env_frameskip
fps_frameskip_str = f" ({fps_frameskip:.1f} FPS with frameskip)" if cfg.env_frameskip > 1 else ""
log.debug(f"Samples collected: {sampled}, throughput: {fps:.1f} FPS{fps_frameskip_str}")
def generate_trajectories(cfg: Config, env_info: EnvInfo, sample_env_steps: int = 1_000_000) -> StatusCode:
sampler = SyncSamplingAPI(cfg, env_info)
sampler.start()
print_interval_sec = 1.0
fps_stats = deque([(time.time(), 0)], maxlen=10)
sampled = 0
last_print = time.time()
while sampled < sample_env_steps:
try:
trajectory = sampler.get_trajectories_sync()
if trajectory is None:
break
sampled += samples_per_trajectory(trajectory)
if time.time() - last_print > print_interval_sec:
fps_stats.append((time.time(), sampled))
_print_fps_stats(cfg, fps_stats)
last_print = time.time()
except KeyboardInterrupt:
log.info(f"KeyboardInterrupt in {generate_trajectories.__name__}()")
break
status = sampler.stop()
return status
def main() -> StatusCode:
register_atari_components()
cfg = parse_atari_args()
env_info = obtain_env_info_in_a_separate_process(cfg)
return generate_trajectories(cfg, env_info)
if __name__ == "__main__":
sys.exit(main())
|
/sample-factory-2.1.1.tar.gz/sample-factory-2.1.1/sf_examples/sampler/use_simplified_sampling_api.py
| 0.44553 | 0.209227 |
use_simplified_sampling_api.py
|
pypi
|
import isaacgym
# isort: on
import os
import sys
from os.path import join
from typing import Dict, List, Optional, Tuple
import gymnasium as gym
import torch
from isaacgymenvs.tasks import isaacgym_task_map
from isaacgymenvs.utils.reformat import omegaconf_to_dict
from torch import Tensor
from sample_factory.algo.utils.gymnasium_utils import convert_space
from sample_factory.cfg.arguments import parse_full_cfg, parse_sf_args
from sample_factory.envs.env_utils import register_env
from sample_factory.train import run_rl
from sample_factory.utils.typing import Config, Env
from sample_factory.utils.utils import str2bool
class IsaacGymVecEnv(gym.Env):
def __init__(self, isaacgym_env, obs_key):
self.env = isaacgym_env
# what about vectorized multi-agent envs? should we take num_agents into account also?
self.num_agents = self.env.num_envs
self.action_space = convert_space(self.env.action_space)
# isaacgym_examples environments actually return dicts
if obs_key == "obs":
self.observation_space = gym.spaces.Dict(dict(obs=convert_space(self.env.observation_space)))
self._proc_obs_func = lambda obs_dict: obs_dict
elif obs_key == "states":
self.observation_space = gym.spaces.Dict(dict(obs=convert_space(self.env.state_space)))
self._proc_obs_func = self._use_states_as_obs
else:
raise ValueError(f"Unknown observation key: {obs_key}")
self._truncated: Tensor = torch.zeros(self.num_agents, dtype=torch.bool)
@staticmethod
def _use_states_as_obs(obs_dict: Dict) -> Dict[str, Tensor]:
obs_dict["obs"] = obs_dict["states"]
return obs_dict
def reset(self, *args, **kwargs) -> Tuple[Dict[str, Tensor], Dict]:
# some IGE envs return all zeros on the first timestep, but this is probably okay
obs_dict = self.env.reset()
self._truncated = self._truncated.to(obs_dict["obs"].device) # make sure all tensors are on the same device
return self._proc_obs_func(obs_dict), {} # after Gym 0.26 reset() returns info dict
def step(self, actions) -> Tuple[Dict[str, Tensor], Tensor, Tensor, Tensor, Dict]:
obs, rew, terminated, infos = self.env.step(actions)
if infos and "time_outs" in infos:
truncated = infos["time_outs"]
else:
truncated = self._truncated
return self._proc_obs_func(obs), rew, terminated, truncated, infos
def render(self):
pass
def make_isaacgym_env(full_env_name: str, cfg: Config, _env_config=None, render_mode: Optional[str] = None) -> Env:
task_name = full_env_name
overrides = ige_task_cfg_overrides(task_name, cfg)
import isaacgymenvs
from hydra import compose, initialize
# this will register resolvers for the hydra config
# noinspection PyUnresolvedReferences
from isaacgymenvs import train
module_dir = isaacgymenvs.__path__[0]
cfg_dir = join(module_dir, "cfg")
curr_file_dir = os.path.dirname(os.path.abspath(__file__))
cfg_dir = os.path.relpath(cfg_dir, curr_file_dir)
initialize(config_path=cfg_dir, job_name="sf_isaacgym")
ige_cfg = compose(config_name="config", overrides=overrides)
rl_device = ige_cfg.rl_device
sim_device = ige_cfg.sim_device
graphics_device_id = ige_cfg.graphics_device_id
ige_cfg_dict = omegaconf_to_dict(ige_cfg)
task_cfg = ige_cfg_dict["task"]
make_env = isaacgym_task_map[task_cfg["name"]]
if render_mode == "human":
headless = False
elif render_mode is None:
headless = True
else:
raise ValueError(f"{render_mode=} not supported by IsaacGym")
if cfg.ige_api_version == "preview3":
env = make_env(
cfg=task_cfg,
sim_device=sim_device,
graphics_device_id=graphics_device_id,
headless=headless,
)
elif cfg.ige_api_version == "preview4":
env = make_env(
cfg=task_cfg,
sim_device=sim_device,
rl_device=rl_device,
graphics_device_id=graphics_device_id,
headless=headless,
virtual_screen_capture=False,
force_render=not headless,
)
else:
raise ValueError(f"Unknown ige_api_version: {cfg.ige_api_version}")
env = IsaacGymVecEnv(env, cfg.obs_key)
return env
def add_extra_params_func(parser):
"""
Specify any additional command line arguments for this family of custom environments.
"""
p = parser
p.add_argument(
"--env_agents",
default=-1,
type=int,
help="Num agents in each env (default: -1, means use default value from isaacgymenvs env yaml config file)",
)
p.add_argument(
"--obs_key",
default="obs",
type=str,
help='IsaacGym envs return dicts, some envs return just "obs", and some return "obs" and "states".'
"States key denotes the full state of the environment, and obs key corresponds to limited observations "
'available in real world deployment. If we use "states" here we can train will full information '
"(although the original idea was to use asymmetric training - critic sees full state and policy only sees obs).",
)
p.add_argument(
"--subtask",
default=None,
type=str,
help="Subtask for envs that support it (i.e. AllegroKuka regrasping or manipulation or throw).",
)
p.add_argument(
"--ige_api_version",
default="preview4",
type=str,
choices=["preview3", "preview4"],
help="We can switch between different versions of IsaacGymEnvs API using this parameter.",
)
p.add_argument(
"--eval_stats",
default=False,
type=str2bool,
help="Whether to collect env stats during evaluation.",
)
def override_default_params_func(env, parser):
"""Most of these parameters are taken from IsaacGymEnvs default config files."""
parser.set_defaults(
# we're using a single very vectorized env, no need to parallelize it further
batched_sampling=True,
num_workers=1,
num_envs_per_worker=1,
worker_num_splits=1,
actor_worker_gpus=[0], # obviously need a GPU
train_for_env_steps=10000000,
use_rnn=False,
adaptive_stddev=False,
policy_initialization="torch_default",
env_gpu_actions=True,
reward_scale=0.01,
rollout=16,
max_grad_norm=0.0,
batch_size=32768,
num_batches_per_epoch=2,
num_epochs=4,
ppo_clip_ratio=0.2,
value_loss_coeff=2.0,
exploration_loss_coeff=0.0,
nonlinearity="elu",
learning_rate=3e-4,
lr_schedule="kl_adaptive_epoch",
lr_schedule_kl_threshold=0.008,
shuffle_minibatches=False,
gamma=0.99,
gae_lambda=0.95,
with_vtrace=False,
value_bootstrap=True, # assuming reward from the last step in the episode can generally be ignored
normalize_input=True,
normalize_returns=True, # does not improve results on all envs, but with return normalization we don't need to tune reward scale
save_best_after=int(5e6),
serial_mode=True, # it makes sense to run isaacgym envs in serial mode since most of the parallelism comes from the env itself (although async mode works!)
async_rl=False,
use_env_info_cache=True, # speeds up startup
)
# override default config parameters for specific envs
if env in env_configs:
parser.set_defaults(**env_configs[env])
# custom default configuration parameters for specific envs
# add more envs here analogously (env names should match config file names in IGE)
env_configs = dict(
Ant=dict(
encoder_mlp_layers=[256, 128, 64],
experiment_summaries_interval=3, # experiments are short so we should save summaries often
save_every_sec=15,
# trains better without normalized returns, but we keep the default value for consistency
# normalize_returns=False,
),
Anymal=dict(
train_for_env_steps=1310000000,
encoder_mlp_layers=[256, 128, 64],
gamma=0.99,
rollout=16,
learning_rate=3e-4,
lr_schedule_kl_threshold=0.008,
num_epochs=5,
max_grad_norm=1.0,
num_batches_per_epoch=4,
exploration_loss_coeff=0.0,
),
AnymalTerrain=dict(
train_for_env_steps=1310000000,
encoder_mlp_layers=[256, 128, 64],
gamma=0.99,
rollout=16,
learning_rate=3e-4,
lr_schedule_kl_threshold=0.008,
num_epochs=5,
max_grad_norm=1.0,
num_batches_per_epoch=4,
exploration_loss_coeff=0.001,
),
BallBalance=dict(
train_for_env_steps=1310000000,
encoder_mlp_layers=[128, 64, 32],
gamma=0.99,
rollout=16,
learning_rate=3e-4,
lr_schedule_kl_threshold=0.008,
num_epochs=5,
max_grad_norm=1.0,
num_batches_per_epoch=8,
exploration_loss_coeff=0.0,
),
Cartpole=dict(
train_for_env_steps=1310000000,
encoder_mlp_layers=[128, 64, 32],
gamma=0.99,
rollout=16,
learning_rate=3e-4,
lr_schedule_kl_threshold=0.008,
num_epochs=5,
max_grad_norm=1.0,
num_batches_per_epoch=8,
exploration_loss_coeff=0.0,
),
ShadowHand=dict(
train_for_env_steps=1310000000,
encoder_mlp_layers=[512, 512, 256, 128],
gamma=0.99,
rollout=16,
learning_rate=5e-4,
lr_schedule_kl_threshold=0.016,
num_epochs=5,
max_grad_norm=1.0,
num_batches_per_epoch=8,
exploration_loss_coeff=0.0,
),
Humanoid=dict(
train_for_env_steps=1310000000, # to match how much it is trained in rl-games
encoder_mlp_layers=[400, 200, 100],
rollout=32,
num_epochs=5,
value_loss_coeff=4.0,
max_grad_norm=1.0,
num_batches_per_epoch=4,
experiment_summaries_interval=3, # experiments are short so we should save summaries often
save_every_sec=15,
# trains a lot better with higher gae_lambda, but we keep the default value for consistency
# gae_lambda=0.99,
),
AllegroHand=dict(
train_for_env_steps=10_000_000_000,
encoder_mlp_layers=[512, 256, 128],
gamma=0.99,
rollout=16,
learning_rate=5e-3,
lr_schedule_kl_threshold=0.02,
reward_scale=0.01,
num_epochs=4,
max_grad_norm=1.0,
num_batches_per_epoch=8,
),
AllegroHandLSTM=dict(
train_for_env_steps=10_000_000_000,
encoder_mlp_layers=[512, 256, 128],
gamma=0.99,
rollout=16,
use_rnn=True,
rnn_type="lstm",
learning_rate=1e-4,
lr_schedule_kl_threshold=0.016,
reward_scale=0.01,
num_epochs=4,
max_grad_norm=1.0,
num_batches_per_epoch=8,
obs_key="states",
),
AllegroKukaLSTM=dict(
subtask="regrasping",
env_agents=8192,
train_for_env_steps=3_000_000_000,
# No encoder, we directly feed observations into LSTM. A bit weird but this is what IGE does as well.
encoder_mlp_layers=[],
use_rnn=True,
rnn_size=768,
rnn_type="lstm",
decoder_mlp_layers=[768, 512, 256], # mlp layers AFTER the LSTM
gamma=0.99,
rollout=16,
batch_size=32768,
num_epochs=2,
num_batches_per_epoch=4,
value_loss_coeff=4.0,
learning_rate=1e-4,
lr_schedule_kl_threshold=0.016,
reward_scale=0.01,
max_grad_norm=1.0,
obs_key="obs",
save_best_every_sec=120,
save_best_after=int(2e7),
experiment_summaries_interval=30,
flush_summaries_interval=300,
),
)
env_configs["AllegroKukaTwoArmsLSTM"] = env_configs["AllegroKukaLSTM"]
def ige_task_cfg_overrides(task_name: str, cfg: Config) -> List[str]:
"""
Ideally we would directly override these in CLI in Hydra config, but this would require integrating
Hydra config into Sample Factory, which would require anyone who uses Sample Factory to use Hydra as well.
We might want to do this in the future, but for now this should be sufficient.
"""
overrides = [f"task={task_name}"]
if cfg.env_agents > 0:
overrides.append(f"num_envs={cfg.env_agents}")
if cfg.subtask is not None:
overrides.append(f"task/env={cfg.subtask}")
if "AllegroKuka" in task_name and cfg.eval_stats:
overrides.append("task.env.evalStats=True")
# overrides.append("task.env.successTolerance=0.01")
# overrides.append("task.env.withSmallCuboids=False")
# overrides.append("task.env.withBigCuboids=False")
# overrides.append("task.env.withSticks=True")
return overrides
def register_isaacgym_custom_components():
for env_name in env_configs:
register_env(env_name, make_isaacgym_env)
def parse_isaacgym_cfg(evaluation=False):
parser, partial_cfg = parse_sf_args(evaluation=evaluation)
add_extra_params_func(parser)
override_default_params_func(partial_cfg.env, parser)
final_cfg = parse_full_cfg(parser)
return final_cfg
def main():
"""Script entry point."""
register_isaacgym_custom_components()
cfg = parse_isaacgym_cfg()
status = run_rl(cfg)
return status
if __name__ == "__main__":
sys.exit(main())
|
/sample-factory-2.1.1.tar.gz/sample-factory-2.1.1/sf_examples/isaacgym_examples/train_isaacgym.py
| 0.724968 | 0.377713 |
train_isaacgym.py
|
pypi
|
from typing import Tuple, Union
import gymnasium as gym
import numpy as np
from gymnasium.core import ObsType
DoneStepType = Tuple[
Union[ObsType, np.ndarray],
Union[float, np.ndarray],
Union[bool, np.ndarray],
Union[dict, list],
]
TerminatedTruncatedStepType = Tuple[
Union[ObsType, np.ndarray],
Union[float, np.ndarray],
Union[bool, np.ndarray],
Union[bool, np.ndarray],
Union[dict, list],
]
def has_image_observations(observation_space):
"""It's a heuristic."""
return len(observation_space.shape) >= 2
class EnvPoolResetFixWrapper(gym.Wrapper):
def __init__(self, env):
super().__init__(env)
def step(self, actions):
obs, reward, terminated, truncated, info = self.env.step(actions)
needs_reset = np.nonzero(terminated | truncated)[0]
obs[needs_reset], _ = self.env.reset(needs_reset)
return obs, reward, terminated, truncated, info
def reset(self, **kwargs):
kwargs.pop("seed", None) # envpool does not support the seed in reset, even with the updated API
kwargs.pop("options", None)
return self.env.reset(**kwargs)
class BatchedRecordEpisodeStatistics(gym.Wrapper):
def __init__(self, env, num_envs, deque_size=100):
super().__init__(env)
self.num_envs = getattr(env, "num_envs", num_envs)
self.episode_returns = None
self.episode_lengths = None
def reset(self, **kwargs):
observations, infos = self.env.reset(**kwargs)
self.episode_returns = np.zeros(self.num_envs, dtype=np.float32)
self.episode_lengths = np.zeros(self.num_envs, dtype=np.int32)
self.lives = np.zeros(self.num_envs, dtype=np.int32)
self.returned_episode_returns = np.zeros(self.num_envs, dtype=np.float32)
self.returned_episode_lengths = np.zeros(self.num_envs, dtype=np.int32)
return observations, infos
def step(self, action):
observations, rewards, terminated, truncated, infos = super().step(action)
self.episode_returns += infos["reward"]
self.episode_lengths += 1
self.returned_episode_returns[:] = self.episode_returns
self.returned_episode_lengths[:] = self.episode_lengths
self.episode_returns *= 1 - terminated
self.episode_lengths *= 1 - terminated
infos["r"] = self.returned_episode_returns
infos["l"] = self.returned_episode_lengths
return observations, rewards, terminated, truncated, infos
|
/sample-factory-2.1.1.tar.gz/sample-factory-2.1.1/sf_examples/envpool/envpool_wrappers.py
| 0.925919 | 0.639314 |
envpool_wrappers.py
|
pypi
|
import argparse
def atari_override_defaults(_env, parser):
"""RL params specific to Atari envs."""
parser.set_defaults(
# let's set this to True by default so it's consistent with how we report results for other envs
# (i.e. VizDoom or DMLab). When running evaluations for reports or to compare with other frameworks we can
# set this to false in command line
summaries_use_frameskip=True,
use_record_episode_statistics=True,
encoder_conv_architecture="convnet_atari",
obs_scale=255.0,
gamma=0.99,
env_frameskip=4,
env_framestack=4,
exploration_loss_coeff=0.01,
num_workers=4,
num_envs_per_worker=1,
worker_num_splits=1,
env_agents=64,
train_for_env_steps=10000000,
nonlinearity="relu",
kl_loss_coeff=0.0,
use_rnn=False,
adaptive_stddev=False,
reward_scale=1.0,
with_vtrace=False,
recurrence=1,
batch_size=256,
rollout=128,
max_grad_norm=0.5,
num_epochs=4,
num_batches_per_epoch=4,
ppo_clip_ratio=0.1,
value_loss_coeff=0.5,
exploration_loss="entropy",
learning_rate=0.00025,
lr_schedule="linear_decay",
shuffle_minibatches=False,
gae_lambda=0.95,
batched_sampling=True,
normalize_input=False,
normalize_returns=False,
serial_mode=False,
async_rl=True,
experiment_summaries_interval=3,
adam_eps=1e-5, # choosing the same value as CleanRL used
)
def add_atari_env_args(_env, p: argparse.ArgumentParser, evaluation=False):
if evaluation:
# apparently env.render(mode="human") is not supported anymore and we need to specify the render mode in
# the env ctor
p.add_argument("--render_mode", default="human", type=str, help="")
p.add_argument(
"--env_agents",
default=2,
type=int,
help="Num agents in each envpool (if used)",
)
|
/sample-factory-2.1.1.tar.gz/sample-factory-2.1.1/sf_examples/envpool/atari/envpool_atari_params.py
| 0.645232 | 0.325212 |
envpool_atari_params.py
|
pypi
|
from typing import Optional
from sample_factory.utils.utils import log
try:
import envpool
except ImportError as e:
print(e)
print("Trying to import envpool when it is not installed. install with 'pip install envpool'")
from sf_examples.atari.atari_utils import ATARI_ENVS, AtariSpec
from sf_examples.envpool.envpool_wrappers import BatchedRecordEpisodeStatistics, EnvPoolResetFixWrapper
# Note NoFrameskip-v4 in gym[atari] is the same game configuration as -v5 in envpool
ENVPOOL_ATARI_ENVS = [
AtariSpec(
spec.name,
spec.env_id.replace("NoFrameskip-v4", "-v5"),
default_timeout=spec.default_timeout,
)
for spec in ATARI_ENVS
]
def atari_env_by_name(name):
for cfg in ENVPOOL_ATARI_ENVS:
if cfg.name == name:
return cfg
raise Exception("Unknown Atari env")
def make_atari_env(env_name, cfg, env_config, render_mode: Optional[str] = None):
if cfg.num_envs_per_worker > 1:
log.warning(
"When using envpool, set num_envs_per_worker=1 and use --env_agents={desired number of envs}. "
f"Setting --num_envs_per_worker={cfg.num_envs_per_worker} will create multiple envpools per worker process "
f"which is not the desirable behavior in most configurations."
)
atari_spec = atari_env_by_name(env_name)
env_kwargs = dict()
if atari_spec.default_timeout is not None:
# envpool max_episode_steps does not take into account frameskip. see https://github.com/sail-sg/envpool/issues/195
env_kwargs["max_episode_steps"] = atari_spec.default_timeout // 4
if env_config is not None:
env_kwargs["seed"] = env_config.env_id
env = envpool.make(
atari_spec.env_id,
env_type="gym",
num_envs=cfg.env_agents,
reward_clip=True,
episodic_life=True,
frame_skip=cfg.env_frameskip,
**env_kwargs,
)
env = EnvPoolResetFixWrapper(env)
env = BatchedRecordEpisodeStatistics(env, num_envs=cfg.env_agents)
env.num_agents = cfg.env_agents
return env
|
/sample-factory-2.1.1.tar.gz/sample-factory-2.1.1/sf_examples/envpool/atari/envpool_atari_utils.py
| 0.799755 | 0.270184 |
envpool_atari_utils.py
|
pypi
|
import gymnasium as gym
import numpy as np
from gymnasium.spaces import Box, Discrete
from sample_factory.algo.utils.spaces.discretized import Discretized
def key_to_action_basic(key):
from pynput.keyboard import Key
table = {Key.left: 0, Key.right: 1, Key.up: 2, Key.down: 3}
return table.get(key, None)
def doom_turn_and_attack_only():
"""
TURN_LEFT
TURN_RIGHT
ATTACK
"""
space = gym.spaces.Tuple(
(
Discrete(3),
Discrete(2),
)
) # noop, turn left, turn right # noop, attack
return space
def doom_action_space_basic():
"""
TURN_LEFT
TURN_RIGHT
MOVE_FORWARD
MOVE_BACKWARD
"""
space = gym.spaces.Tuple(
(
Discrete(3),
Discrete(3),
)
) # noop, turn left, turn right # noop, forward, backward
space.key_to_action = key_to_action_basic
return space
def doom_action_space_extended():
"""
This function assumes the following list of available buttons:
TURN_LEFT
TURN_RIGHT
MOVE_FORWARD
MOVE_BACKWARD
MOVE_LEFT
MOVE_RIGHT
ATTACK
"""
space = gym.spaces.Tuple(
(
Discrete(3), # noop, turn left, turn right
Discrete(3), # noop, forward, backward
Discrete(3), # noop, strafe left, strafe right
Discrete(2), # noop, attack
)
)
return space
def doom_action_space():
"""
Standard action space for full-featured Doom environments (e.g. deathmatch).
TODO: crouch?
TODO: strafe?
This should precisely correspond to the available_buttons configuration in the .cfg file.
This function assumes:
MOVE_FORWARD
MOVE_BACKWARD
MOVE_RIGHT
MOVE_LEFT
SELECT_NEXT_WEAPON
SELECT_PREV_WEAPON
ATTACK
SPEED
TURN_LEFT_RIGHT_DELTA
"""
return gym.spaces.Tuple(
(
Discrete(3), # noop, forward, backward
Discrete(3), # noop, move right, move left
Discrete(3), # noop, prev_weapon, next_weapon
Discrete(2), # noop, attack
Discrete(2), # noop, sprint
Box(np.float32(-1.0), np.float32(1.0), (1,)),
)
)
def doom_action_space_discretized():
return gym.spaces.Tuple(
(
Discrete(3), # noop, forward, backward
Discrete(3), # noop, move right, move left
Discrete(3), # noop, prev_weapon, next_weapon
Discrete(2), # noop, attack
Discrete(2), # noop, sprint
Discretized(11, min_action=-10.0, max_action=10.0), # turning using discretized continuous control
)
)
def doom_action_space_discretized_no_weap():
return gym.spaces.Tuple(
(
Discrete(3), # noop, forward, backward
Discrete(3), # noop, move right, move left
Discrete(2), # noop, attack
Discrete(2), # noop, sprint
Discretized(11, min_action=-10.0, max_action=10.0), # turning using discretized continuous control
)
)
def doom_action_space_continuous_no_weap():
return gym.spaces.Tuple(
(
Discrete(3), # noop, forward, backward
Discrete(3), # noop, move right, move left
Discrete(2), # noop, attack
Discrete(2), # noop, sprint
Box(float(-1.0), float(1.0), (1,)),
)
)
def doom_action_space_discrete():
return gym.spaces.Tuple(
(
Discrete(3), # noop, forward, backward
Discrete(3), # noop, move right, move left
Discrete(3), # noop, turn right, turn left
Discrete(3), # noop, prev_weapon, next_weapon
Discrete(2), # noop, attack
Discrete(2), # noop, sprint
)
)
def doom_action_space_discrete_no_weap():
return gym.spaces.Tuple(
(
Discrete(3), # noop, forward, backward
Discrete(3), # noop, move right, move left
Discrete(3), # noop, turn right, turn left
Discrete(2), # noop, attack
Discrete(2), # noop, sprint
)
)
def doom_action_space_full_discretized(with_use=False):
"""
MOVE_FORWARD
MOVE_BACKWARD
MOVE_RIGHT
MOVE_LEFT
SELECT_WEAPON1
SELECT_WEAPON2
SELECT_WEAPON3
SELECT_WEAPON4
SELECT_WEAPON5
SELECT_WEAPON6
SELECT_WEAPON7
ATTACK
SPEED
TURN_LEFT_RIGHT_DELTA
"""
spaces = [
Discrete(3), # noop, forward, backward
Discrete(3), # noop, move right, move left
Discrete(8), # noop, select weapons 1-7
Discrete(2), # noop, attack
Discrete(2), # noop, sprint
]
if with_use:
spaces.append(Discrete(2)) # noop, use
spaces.append(Discretized(21, min_action=-12.5, max_action=12.5)) # turning using discretized continuous control
return gym.spaces.Tuple(spaces)
|
/sample-factory-2.1.1.tar.gz/sample-factory-2.1.1/sf_examples/vizdoom/doom/action_space.py
| 0.621311 | 0.518668 |
action_space.py
|
pypi
|
import copy
import operator
from collections import deque
from typing import Callable
import gymnasium as gym
from sample_factory.algo.utils.misc import EPS
from sample_factory.envs.env_utils import RewardShapingInterface
from sample_factory.utils.utils import log
NUM_WEAPONS = 8
# these are somewhat arbitrary, but can be optimized via PBT
WEAPON_PREFERENCE = {
2: 1, # pistol
3: 5, # shotguns
4: 5, # machinegun
5: 5, # rocket launcher
6: 10, # plasmagun
7: 10, # bfg
}
WEAPON_DELTA_REWARDS = dict()
SELECTED_WEAPON_REWARDS = dict()
for weapon in range(NUM_WEAPONS):
pref = WEAPON_PREFERENCE.get(weapon, 1)
# reward/penalty for finding/losing a weapon
WEAPON_DELTA_REWARDS[f"WEAPON{weapon}"] = (+0.02 * pref, -0.01 * pref)
# reward/penalty for picking up/spending weapon ammo
WEAPON_DELTA_REWARDS[f"AMMO{weapon}"] = (+0.0002 * pref, -0.0001 * pref)
# reward for choosing a weapon and sticking to it; really helps learning in the beginning, otherwise the agent
# just keeps changing weapons all the time, unable to shoot. Towards the later stages of the training agents
# tend to ignore this, and change weapons at will
SELECTED_WEAPON_REWARDS[f"SELECTED{weapon}"] = 0.0002 * pref
# reward shaping scheme to convert env info into scalar reward
REWARD_SHAPING_DEATHMATCH_V0 = dict(
delta=dict(
FRAGCOUNT=(+1, -1.5), # reward per unit of positive or negative change
DEATHCOUNT=(-0.75, +0.75),
HITCOUNT=(+0.01, -0.01),
DAMAGECOUNT=(+0.003, -0.003),
HEALTH=(+0.005, -0.003),
ARMOR=(+0.005, -0.001),
**WEAPON_DELTA_REWARDS,
),
selected_weapon=SELECTED_WEAPON_REWARDS,
)
# "zero-sum" scheme for self-play scenarios
REWARD_SHAPING_DEATHMATCH_V1 = copy.deepcopy(REWARD_SHAPING_DEATHMATCH_V0)
REWARD_SHAPING_DEATHMATCH_V1["delta"].update(
dict(
FRAGCOUNT=(+1, -0.001),
DEATHCOUNT=(-1, +1),
HITCOUNT=(0, 0),
DAMAGECOUNT=(+0.01, -0.01),
HEALTH=(+0.01, -0.01),
)
)
# just the same reward scheme for consistency, only battle does not have most game variables,
# so only a very small reward shaping for collecting Health and Ammo will be applied.
# It works pretty much the same without this.
REWARD_SHAPING_BATTLE = copy.deepcopy(REWARD_SHAPING_DEATHMATCH_V0)
def true_objective_winning_the_game(info):
if info["LEADER_GAP"] == 0:
# tied with the leader for the win, we don't reward for ties, only for the win
return 0.0
elif info["FINAL_PLACE"] > 1:
# lost the match (don't care about the place, losing is losing)
return 0.0
else:
# won the match!
assert info["FINAL_PLACE"] == 1
return 1.0
def true_objective_frags(info):
return info["FRAGCOUNT"]
class DoomRewardShapingWrapper(gym.Wrapper, RewardShapingInterface):
"""Convert game info variables into scalar reward using a reward shaping scheme."""
def __init__(self, env, reward_shaping_scheme=None, true_objective_func=None):
gym.Wrapper.__init__(self, env)
RewardShapingInterface.__init__(self)
self.reward_shaping_scheme = reward_shaping_scheme
self.true_objective_func: Callable = true_objective_func
# without this we reward using BFG and shotguns too much
self.reward_delta_limits = dict(DAMAGECOUNT=200, HITCOUNT=5)
self.prev_vars = dict()
self.prev_dead = True
self.orig_env_reward = self.total_shaping_reward = 0.0
self.selected_weapon = deque([], maxlen=5)
self.reward_structure = {}
self.verbose = False
self.print_once = False
# save a reference to this wrapper in the actual env class, for other wrappers
self.env.unwrapped.reward_shaping_interface = self
def get_default_reward_shaping(self):
return self.reward_shaping_scheme
def set_reward_shaping(self, reward_shaping: dict, agent_idx: int):
self.reward_shaping_scheme = reward_shaping
def _delta_rewards(self, info):
reward = 0.0
deltas = []
for var_name, rewards in self.reward_shaping_scheme["delta"].items():
if var_name not in self.prev_vars:
continue
# generate reward based on how the env variable values changed
new_value = info.get(var_name, 0.0)
prev_value = self.prev_vars[var_name]
delta = new_value - prev_value
if var_name in self.reward_delta_limits:
delta = min(delta, self.reward_delta_limits[var_name])
if abs(delta) > EPS:
if delta > EPS:
reward_delta = delta * rewards[0]
else:
reward_delta = -delta * rewards[1]
reward += reward_delta
deltas.append((var_name, reward_delta, delta))
self.reward_structure[var_name] = self.reward_structure.get(var_name, 0.0) + reward_delta
return reward, deltas
def _selected_weapon_rewards(self, selected_weapon, selected_weapon_ammo, deltas):
# we must keep the weapon ready for a certain number of frames to get rewards
unholstered = len(self.selected_weapon) > 4 and all(sw == selected_weapon for sw in self.selected_weapon)
reward = 0.0
if selected_weapon_ammo > 0 and unholstered:
try:
reward = self.reward_shaping_scheme["selected_weapon"][f"SELECTED{weapon}"]
except KeyError:
log.error("%r", self.reward_shaping_scheme)
log.error("%r", selected_weapon)
weapon_key = f"weapon{selected_weapon}"
deltas.append((weapon_key, reward))
self.reward_structure[weapon_key] = self.reward_structure.get(weapon_key, 0.0) + reward
return reward
def _parse_info(self, info, done):
if self.reward_shaping_scheme is None:
# skip reward calculation
return 0.0
# by default these are negative values if no weapon is selected
selected_weapon = info.get("SELECTED_WEAPON", 0.0)
selected_weapon = int(max(0, selected_weapon))
selected_weapon_ammo = float(max(0.0, info.get("SELECTED_WEAPON_AMMO", 0.0)))
self.selected_weapon.append(selected_weapon)
was_dead = self.prev_dead
is_alive = not info.get("DEAD", 0.0)
just_respawned = was_dead and is_alive
shaping_reward = 0.0
if not done and not just_respawned:
shaping_reward, deltas = self._delta_rewards(info)
shaping_reward += self._selected_weapon_rewards(
selected_weapon,
selected_weapon_ammo,
deltas,
)
if abs(shaping_reward) > 2.5 and not self.print_once:
log.info("Large shaping reward %.3f for %r", shaping_reward, deltas)
self.print_once = True
if done and "FRAGCOUNT" in self.reward_structure:
sorted_rew = sorted(self.reward_structure.items(), key=operator.itemgetter(1))
sum_rew = sum(r for key, r in sorted_rew)
sorted_rew = {key: f"{r:.3f}" for key, r in sorted_rew}
log.info("Sum rewards: %.3f, reward structure: %r", sum_rew, sorted_rew)
return shaping_reward
def reset(self, **kwargs):
obs, info = self.env.reset(**kwargs)
self.prev_vars = dict()
self.prev_dead = True
self.reward_structure = dict()
self.selected_weapon.clear()
self.orig_env_reward = self.total_shaping_reward = 0.0
self.print_once = False
return obs, info
def step(self, action):
obs, rew, terminated, truncated, info = self.env.step(action)
if obs is None:
return obs, rew, terminated, truncated, info
done = terminated | truncated
self.orig_env_reward += rew
shaping_rew = self._parse_info(info, done)
rew += shaping_rew
self.total_shaping_reward += shaping_rew
if self.verbose:
log.info("Original env reward before shaping: %.3f", self.orig_env_reward)
player_id = 1
if hasattr(self.env.unwrapped, "player_id"):
player_id = self.env.unwrapped.player_id
log.info(
"Total shaping reward is %.3f for %d (done %d)",
self.total_shaping_reward,
player_id,
done,
)
# remember new variable values
for var_name in self.reward_shaping_scheme["delta"].keys():
self.prev_vars[var_name] = info.get(var_name, 0.0)
self.prev_dead = not not info.get("DEAD", 0.0) # float -> bool
if done:
if self.true_objective_func is None:
true_objective = self.orig_env_reward
else:
true_objective = self.true_objective_func(info)
info["true_objective"] = true_objective
return obs, rew, terminated, truncated, info
def close(self):
self.env.unwrapped.reward_shaping_interface = None
return self.env.close()
|
/sample-factory-2.1.1.tar.gz/sample-factory-2.1.1/sf_examples/vizdoom/doom/wrappers/reward_shaping.py
| 0.831793 | 0.265102 |
reward_shaping.py
|
pypi
|
import gymnasium as gym
import numpy as np
from sample_factory.algo.utils.rl_utils import make_dones
class MultiplayerStatsWrapper(gym.Wrapper):
"""Add to info things like place in the match, gap to leader, kill-death ratio etc."""
def __init__(self, env):
super().__init__(env)
self.timestep = 0
self.prev_extra_info = dict()
def _parse_info(self, info, done):
if (self.timestep % 20 == 0 or done) and "FRAGCOUNT" in info:
# no need to update these stats every frame
kdr = info.get("FRAGCOUNT", 0.0) / (info.get("DEATHCOUNT", 0.0) + 1)
extra_info = {"KDR": float(kdr)}
player_count = int(info.get("PLAYER_COUNT", 1))
player_num = int(info.get("PLAYER_NUMBER", 0))
fragcounts = [int(info.get(f"PLAYER{pi}_FRAGCOUNT", -100000)) for pi in range(1, player_count + 1)]
places = list(np.argsort(fragcounts))
final_place = places.index(player_num)
final_place = player_count - final_place # inverse, because fragcount is sorted in increasing order
extra_info["FINAL_PLACE"] = final_place
if final_place > 1:
extra_info["LEADER_GAP"] = max(fragcounts) - fragcounts[player_num]
elif player_count > 1:
# we won, let's log gap to 2nd place
assert places.index(player_num) == player_count - 1
fragcounts.sort(reverse=True)
extra_info["LEADER_GAP"] = fragcounts[1] - fragcounts[0] # should be negative or 0
assert extra_info["LEADER_GAP"] <= 0
else:
extra_info["LEADER_GAP"] = 0
self.prev_extra_info = extra_info
else:
extra_info = self.prev_extra_info
info.update(extra_info)
return info
def reset(self, **kwargs):
self.timestep = 0
self.prev_extra_info = dict()
return self.env.reset(**kwargs)
def step(self, action):
obs, reward, terminated, truncated, info = self.env.step(action)
if obs is None:
return obs, reward, terminated, truncated, info
done = make_dones(terminated, truncated)
info = self._parse_info(info, done)
self.timestep += 1
return obs, reward, terminated, truncated, info
|
/sample-factory-2.1.1.tar.gz/sample-factory-2.1.1/sf_examples/vizdoom/doom/wrappers/multiplayer_stats.py
| 0.642208 | 0.416144 |
multiplayer_stats.py
|
pypi
|
import gymnasium as gym
import numpy as np
from sf_examples.vizdoom.doom.wrappers.reward_shaping import NUM_WEAPONS
class DoomAdditionalInput(gym.Wrapper):
"""Add game variables to the observation space + reward shaping."""
def __init__(self, env):
super().__init__(env)
current_obs_space = self.observation_space
self.num_weapons = NUM_WEAPONS
weapons_low = [0.0] * self.num_weapons
ammo_low = [0.0] * self.num_weapons
low = [0.0, 0.0, -1.0, -1.0, -50.0, 0.0, 0.0] + weapons_low + ammo_low
weapons_high = [5.0] * self.num_weapons # can have multiple weapons in the same slot?
ammo_high = [50.0] * self.num_weapons
high = [20.0, 50.0, 50.0, 50.0, 50.0, 1.0, 10.0] + weapons_high + ammo_high
self.observation_space = gym.spaces.Dict(
{
"obs": current_obs_space,
"measurements": gym.spaces.Box(
low=np.array(low, dtype=np.float32),
high=np.array(high, dtype=np.float32),
),
}
)
num_measurements = len(low)
self.measurements_vec = np.zeros([num_measurements], dtype=np.float32)
def _parse_info(self, obs, info):
obs_dict = {"obs": obs, "measurements": self.measurements_vec}
# by default these are negative values if no weapon is selected
selected_weapon = info.get("SELECTED_WEAPON", 0.0)
selected_weapon = round(max(0, selected_weapon))
selected_weapon_ammo = max(0.0, info.get("SELECTED_WEAPON_AMMO", 0.0))
# similar to DFP paper, scaling all measurements so that they are small numbers
selected_weapon_ammo /= 15.0
selected_weapon_ammo = min(selected_weapon_ammo, 5.0)
# we don't really care how much negative health we have, dead is dead
info["HEALTH"] = max(0.0, info.get("HEALTH", 0.0))
health = info.get("HEALTH", 0.0) / 30.0
armor = info.get("ARMOR", 0.0) / 30.0
kills = info.get("USER2", 0.0) / 10.0 # only works in battle and battle2, this is not really useful
attack_ready = info.get("ATTACK_READY", 0.0)
num_players = info.get("PLAYER_COUNT", 1) / 5.0
# TODO add FRAGCOUNT to the input, so agents know when they are winning/losing
measurements = obs_dict["measurements"]
i = 0
measurements[i] = float(selected_weapon)
i += 1
measurements[i] = float(selected_weapon_ammo)
i += 1
measurements[i] = float(health)
i += 1
measurements[i] = float(armor)
i += 1
measurements[i] = float(kills)
i += 1
measurements[i] = float(attack_ready)
i += 1
measurements[i] = float(num_players)
i += 1
for weapon in range(self.num_weapons):
measurements[i] = float(max(0.0, info.get(f"WEAPON{weapon}", 0.0)))
i += 1
for weapon in range(self.num_weapons):
ammo = float(max(0.0, info.get(f"AMMO{weapon}", 0.0)))
ammo /= 15.0 # scaling factor similar to DFP paper (to keep everything small)
ammo = min(ammo, 5.0) # to avoid values that are too big
measurements[i] = ammo
i += 1
return obs_dict
def reset(self, **kwargs):
obs, _ = self.env.reset(**kwargs)
info = self.env.unwrapped.get_info()
obs = self._parse_info(obs, info)
return obs, info
def step(self, action):
obs, rew, terminated, truncated, info = self.env.step(action)
if obs is None:
return obs, rew, terminated, truncated, info
obs_dict = self._parse_info(obs, info)
return obs_dict, rew, terminated, truncated, info
|
/sample-factory-2.1.1.tar.gz/sample-factory-2.1.1/sf_examples/vizdoom/doom/wrappers/additional_input.py
| 0.594316 | 0.449755 |
additional_input.py
|
pypi
|
import gymnasium as gym
from sample_factory.utils.utils import log
# Wrapper no longer in use
class BotDifficultyWrapper(gym.Wrapper):
"""Adjust bot difficulty according to agent's final position in the match."""
def __init__(self, env, initial_difficulty=None):
super().__init__(env)
self._min_difficulty = 0
self._max_difficulty = 150
self._difficulty_step = 10
self._curr_difficulty = 20 if initial_difficulty is None else initial_difficulty
self._difficulty_std = 10
log.info("Starting with bot difficulty %d", self._curr_difficulty)
self._adaptive_curriculum = True
if initial_difficulty == self._max_difficulty:
log.debug("Starting at max difficulty, disable adaptive skill curriculum")
self._adaptive_curriculum = False
def _analyze_standings(self, info):
if "FINAL_PLACE" in info:
final_place = info["FINAL_PLACE"]
if final_place <= 1 and info.get("LEADER_GAP", 0.0) < 0:
# we beat all the bots, increase difficulty
self._curr_difficulty += self._difficulty_step
self._curr_difficulty = min(self._curr_difficulty, self._max_difficulty)
else:
player_count = int(info.get("PLAYER_COUNT", 1))
if final_place >= player_count - 1:
# got beaten badly, decrease difficulty
self._curr_difficulty -= self._difficulty_step
self._curr_difficulty = max(self._curr_difficulty, self._min_difficulty)
else:
# it's fine, keep the difficulty
pass
def reset(self, **kwargs):
if hasattr(self.env.unwrapped, "bot_difficulty_mean"):
self.env.unwrapped.bot_difficulty_mean = self._curr_difficulty
self.env.unwrapped.bot_difficulty_std = self._difficulty_std
return self.env.reset(**kwargs)
def step(self, action):
obs, reward, terminated, truncated, info = self.env.step(action)
if obs is None:
return obs, reward, terminated, truncated, info
if (terminated | truncated) and self._adaptive_curriculum:
self._analyze_standings(info)
info["BOT_DIFFICULTY"] = self._curr_difficulty
return obs, reward, terminated, truncated, info
|
/sample-factory-2.1.1.tar.gz/sample-factory-2.1.1/sf_examples/vizdoom/doom/wrappers/bot_difficulty.py
| 0.701202 | 0.289993 |
bot_difficulty.py
|
pypi
|
import pandas as pd
import subprocess
import re
import random
import warnings
from io import StringIO
import argparse
from pathlib import Path
DIFF_EXP = re.compile("@@[ 0-9\.,\-\+]{2,50}@@")
def sample_from_diff(s, n=1, filename=""):
lines = s.split("\n")
intro = []
diffs = []
for l in lines:
# Check '@' first for performance reasons
if "@" in l and DIFF_EXP.search(l) is not None:
diffs.append(l)
elif len(diffs) >= 1:
diffs[-1] += f"\n{l}"
else:
intro.append(l)
intro = "\n".join(intro)
if n > len(diffs):
warnings.warn(f"n ({n}) is larger than the number of diffs in the file ({len(diffs)}) {filename}", stacklevel=2)
diff_sample = diffs
else:
diff_sample = random.sample(diffs, n)
diff = "\n".join(diff_sample)
return f"{intro}\n{diff}"
def sample_diffs(diffstat="git diff --stat", diffcommand="git diff", n=150):
diffstat = diffstat.replace("--stat", "--stat=1000")
call = list(diffstat.split())
result = subprocess.run(call, capture_output=True)
csv_data = result.stdout.decode("utf-8")
df = pd.read_csv(StringIO(csv_data), delimiter="|", names=["filename", "changes"])
df["changes"] = df["changes"].str.strip().str.split(" ").str[0]
df = df[df["changes"].notnull()]
df = df[~df["changes"].str.contains("Bin")]
df["changes"] = df["changes"].astype(int)
df["p"] = df["changes"] / df["changes"].sum()
if len(df) == 0:
warnings.warn(f"No diffs detected", stacklevel=2)
return ""
sample = df.sample(n, weights="p", replace=True)
sample = sample.groupby(['filename'], as_index=False).size()
output = []
diffcommand_prime = list(filter(lambda fp: not Path(fp).exists(), diffcommand.split()))
for _, row in sample.iterrows():
filename, n_row = row["filename"], row["size"]
call = diffcommand_prime + [filename.strip()]
result = subprocess.run(call, capture_output=True)
s = result.stdout.decode("utf-8")
diff = sample_from_diff(s, n=n_row, filename=filename.strip())
output.append(diff)
return "\n".join(output)
def cli():
argparser = argparse.ArgumentParser(description=__doc__)
argparser.add_argument("--n", type=int, default=150, help="Total number of diffs to be sampled")
argparser.add_argument("--diffstat", type=str, default="git diff --stat",
help="Custom git diff command for the sampling probabilities")
argparser.add_argument("--diffcommand", type=str, default=None,
help="Custom git diff command for the actual diff")
args = argparser.parse_args()
diffstat, diffcommand = args.diffstat, args.diffcommand
n = args.n
if diffcommand is None:
diffcommand = diffstat.replace("--stat", "")
output = sample_diffs(diffstat=diffstat, diffcommand=diffcommand, n=n)
print(output)
|
/sample_git_diffs-0.4.3-py3-none-any.whl/sample_git_diffs/sample_git_diffs.py
| 0.415017 | 0.249493 |
sample_git_diffs.py
|
pypi
|
import functools
import logging
import os
import pathlib
import shutil
import tarfile
import tempfile
import weakref
from typing import Any, Dict, Iterable, Optional, Sequence, Union
import numpy as np
from pigz_python import pigz_python
logger = logging.getLogger(__name__)
COMPRESS_LEVEL_BEST = pigz_python._COMPRESS_LEVEL_BEST
DEFAULT_BLOCK_SIZE_KB = pigz_python.DEFAULT_BLOCK_SIZE_KB
CPU_COUNT = os.cpu_count()
def class_repr(cls, filter_types: Sequence[Any] = [], **kwargs) -> str:
attributes = class_attributes(cls, filter_types=filter_types)
kwargstring = kv_string((kwargs, attributes))
return f"{cls.__class__.__name__}({kwargstring})"
def kv_string(dicts: Iterable[Dict[Any, Any]]) -> str:
return ",".join(f"{k}={v}" for d in dicts for k, v in d.items())
def class_attributes(cls, filter_types: Sequence[Any] = (int, float, bool, str)) -> Dict[str, Any]:
return {
k: v for k, v in vars(cls).items() if (type(v) in filter_types or not filter_types) and len(v.__repr__()) < 80
}
def basic_attribute_repr(cls):
@functools.wraps(cls, updated=())
class ReprDecorated(cls):
def __repr__(self) -> str:
return class_repr(self)
return ReprDecorated
def human_bytes(bytes: float) -> str:
"""Human readable string representation of bytes"""
units = "bytes"
if bytes > 1024:
units = "KiB"
bytes = bytes / 1024
if bytes > 1024:
units = "MiB"
bytes = bytes / 1024
if bytes > 1024:
units = "GiB"
bytes = bytes / 1024
return f"%.1f {units}" % bytes
def filesize(filename: str) -> str:
"""Human readable string representation of filesize"""
if not os.path.exists(filename):
logger.warn(f"File {filename} does not exist")
return human_bytes(0)
return human_bytes(os.path.getsize(filename))
def tar_files(
output_filename: str,
files: Iterable[str],
file_arcnames: Iterable[str],
compression: str = "gz",
compresslevel=9,
delete_added: bool = True,
) -> str:
"""Tar files."""
kwargs = {"compresslevel": compresslevel} if compression else {}
with tarfile.open(output_filename, mode=f"w:{compression}", **kwargs) as tarf:
for file, arcname in zip(files, file_arcnames):
tarf.add(file, arcname=arcname)
if delete_added:
os.remove(file)
return output_filename
def untar_members(input_tarfile: str, members: Iterable[str], output_dir: str) -> Iterable[str]:
"""Extract files from a tarball."""
output_filenames = []
with tarfile.open(input_tarfile, mode="r") as tarf:
for member in members:
out_filename = os.path.join(output_dir, member)
logger.info(f"Extracting {member} to {out_filename}...")
tarf.extract(member, path=output_dir)
output_filenames.append(out_filename)
return output_filenames
def tar_gz_files(
output_filename: str,
files: Iterable[str],
file_arcnames: Iterable[str],
compression: str = "gz",
compresslevel=COMPRESS_LEVEL_BEST,
blocksize=DEFAULT_BLOCK_SIZE_KB,
workers=CPU_COUNT,
delete_added: bool = True,
) -> str:
"""Tar and gzip files using pigz for multithreading."""
with tempfile.NamedTemporaryFile() as tarf:
if compression != "gz":
# Can't use pigz for the compression
return tar_files(
output_filename,
files,
file_arcnames,
delete_added=delete_added,
compression=compression,
compresslevel=compresslevel,
)
tmp_file = tarf.name
else:
tar_files(tarf.name, files, file_arcnames, delete_added=delete_added, compression="")
tmp_file = f"{tarf.name}.gz"
pigz_python.compress_file(
tarf.name,
compresslevel=compresslevel,
blocksize=blocksize,
workers=workers,
)
os.makedirs(os.path.dirname(output_filename), exist_ok=True)
shutil.move(tmp_file, output_filename)
return output_filename
class NamedTemporaryDirectory(tempfile.TemporaryDirectory):
"""A wrapper around tempfile.TemporaryDirectory that allows for specifying the path."""
def __init__(self, path: str):
pathlib.Path(path).mkdir(parents=True, exist_ok=True)
self.name = path
self._finalizer = weakref.finalize(
self, self._cleanup, self.name, warn_message="Implicitly cleaning up {!r}".format(self)
)
@classmethod
def of(cls, dir: Union[str, tempfile.TemporaryDirectory, None]) -> tempfile.TemporaryDirectory:
"""Convenience method to get a TemporaryDirectory from a Union of possible dir references."""
if isinstance(dir, tempfile.TemporaryDirectory):
return dir
if isinstance(dir, str):
return cls(dir)
return tempfile.TemporaryDirectory()
def round_array(array: np.ndarray, to_nearest: float = 10.0) -> np.ndarray:
return (array / to_nearest).round() * to_nearest
|
/sample_id-0.1.27-py3-none-any.whl/sample_id/util.py
| 0.77518 | 0.207817 |
util.py
|
pypi
|
import argparse
import logging
import logging.config
import os
import statistics
from collections import defaultdict
import joblib
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import seaborn
from matplotlib.backends.backend_pgf import FigureCanvasPgf
from sample_recognition import Match, Neighbor, Result
from tabulate import tabulate
matplotlib.backend_bases.register_backend("pdf", FigureCanvasPgf)
seaborn.set(style="ticks")
seaborn.set_context("paper")
logger = logging.getLogger(__name__)
scriptdir = os.path.dirname(os.path.realpath(__file__))
logfile = os.path.join(scriptdir, "logging.ini")
logging.config.fileConfig(logfile, disable_existing_loggers=False)
def load_results(path):
logger.info("Loading result into memory: {}".format(path))
result = joblib.load(path)
return result
def display_result(result):
print("{} sampled from:".format(str(result.track).encode("ascii", "ignore")))
for source, times in result.times.items():
print("{} at ".format(source.encode("ascii", "ignore")))
for i, time in enumerate(times):
print("\t{} => {}".format(*time), end="")
print("\tPitch_shift: {}".format(statistics.median(result.pitch_shift[source][i])), end="")
print("\tTime_stretch: {}".format(statistics.median(result.time_stretch[source][i])))
print("True Positives: {}".format(result.true_pos))
print("False Positives: {}".format(result.false_pos))
print("False Negatives: {}".format(result.false_neg))
print("\n")
def parse_input(track_param):
if len(track_param) == 1:
track_param = track_param[0]
if isinstance(track_param, str) and os.path.isdir(track_param):
tracks = []
for f in os.listdir(track_param):
path = os.path.join(track_param, f)
if os.path.isfile(path):
tracks.append(path)
return tracks
else:
if isinstance(track_param, str):
track_param = [track_param]
return track_param
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Create a binaural stereo wav file from a mono audio file.")
parser.add_argument("results", type=str, nargs="+", help="Either a directory or list of files")
parser.add_argument("-v", "--verbose", action="store_true", help="Print debug messages to stdout")
args = parser.parse_args()
import logging.config
scriptdir = os.path.dirname(os.path.realpath(__file__))
logfile = os.path.join(scriptdir, "logging.ini")
logging.config.fileConfig(logfile, disable_existing_loggers=False)
if args.verbose:
logger.setLevel(logging.DEBUG)
logger.debug("Verbose debugging activated")
inputs = sorted(parse_input(args.results))
train = joblib.load("data/tracks/train_full.p")
precisions = []
recalls = []
for item in inputs:
results = load_results(item)
true_pos = np.float64(sum(len(r.true_pos) for r in results))
false_pos = np.float64(sum(len(r.false_pos) for r in results))
false_neg = np.float64(sum(len(r.false_neg) for r in results))
print("Totals:")
print("True Pos: {}".format(true_pos))
print("False Pos: {}".format(false_pos))
print("False Neg: {}".format(false_neg))
precision = true_pos / (true_pos + false_pos)
print("precision: {}".format(precision))
recall = true_pos / (true_pos + false_neg)
print("recall: {}".format(recall))
f_score = (precision * recall) / (precision + recall)
print("F-score: {}".format(f_score))
precisions.append(precision)
recalls.append(recall)
print("\n")
genres_orig = defaultdict(lambda: defaultdict(int))
genres_deriv = defaultdict(lambda: defaultdict(int))
for r in results:
for s in r.true_pos:
genres_deriv[s.derivative.genre]["true_pos"] += 1
genres_orig[s.original.genre]["true_pos"] += 1
for t in r.false_pos:
genres_deriv[r.track.genre]["false_pos"] += 1
genres_orig[t.genre]["false_pos"] += 1
for s in r.false_neg:
genres_deriv[s.derivative.genre]["false_neg"] += 1
genres_orig[s.original.genre]["false_neg"] += 1
genres = []
for g in genres_deriv:
true_pos = np.float64(genres_deriv[g]["true_pos"])
false_pos = np.float64(genres_deriv[g]["false_pos"])
false_neg = np.float64(genres_deriv[g]["false_neg"])
recall = true_pos / (true_pos + false_neg)
precision = true_pos / (true_pos + false_pos)
f_score = (precision * recall) / (precision + recall)
genres.append([g, recall, precision, f_score])
print(tabulate(genres, headers=["recall", "precision", "f_score"], tablefmt="latex"))
print("\n")
genres = []
for g in genres_orig:
true_pos = np.float64(genres_orig[g]["true_pos"])
false_pos = np.float64(genres_orig[g]["false_pos"])
false_neg = np.float64(genres_orig[g]["false_neg"])
recall = true_pos / (true_pos + false_neg)
precision = true_pos / (true_pos + false_pos)
f_score = (precision * recall) / (precision + recall)
genres.append([g, recall, precision, f_score])
print(tabulate(genres, headers=["recall", "precision", "f_score"], tablefmt="latex"))
print("\n")
instruments = defaultdict(lambda: defaultdict(int))
for r in results:
for s in r.true_pos:
instruments[s.instrument]["true_pos"] += 1
for i in r.false_pos:
instruments[s.instrument]["false_pos"] += 1
for s in r.false_neg:
instruments[s.instrument]["false_neg"] += 1
inst_recalls = []
for i in instruments:
pos = np.float64(instruments[i]["true_pos"])
neg = np.float64(instruments[i]["false_neg"])
inst_recalls.append([i, pos / (pos + neg)])
print(tabulate(inst_recalls, headers=["recall"], tablefmt="latex"))
if len(inputs) > 1:
plt.plot(recalls, precisions, label="Precision-Recall curve")
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.show()
|
/sample_id-0.1.27-py3-none-any.whl/sample_id/analysis.py
| 0.447943 | 0.201951 |
analysis.py
|
pypi
|
from __future__ import annotations
import itertools
import logging
import math
from collections import defaultdict
from typing import Dict, List, Set, Tuple
from . import query
logger = logging.getLogger(__name__)
def cluster(
matches: List[query.Match], cluster_size: int = 3, cluster_dist: float = 20
) -> Tuple[List[query.Cluster], List[query.Cluster]]:
logger.info("Clustering matches...")
logger.debug(f"cluster_dist: {cluster_dist} samples")
clusters = set()
votes = ght(matches, cluster_dist)
for source, bins in votes.items():
source_clusters = set()
for bin, cluster in bins.items():
if len(cluster) >= cluster_size:
source_clusters.add(query.Cluster(cluster))
source_clusters = merge_nearby_clusters(source_clusters, cluster_dist)
clusters = clusters.union(source_clusters)
clusters = list(clusters)
total_clusters = [query.Cluster(c) for bins in votes.values() for c in bins.values()]
return clusters, total_clusters
def ght(
matches: List[query.Match], cluster_dist: float = 20
) -> Dict[str, Dict[Tuple[float, float, float], Set[query.Match]]]:
"""Generalized Hough transform"""
votes: Dict[str, Dict[Tuple[float, float, float], Set[query.Match]]] = defaultdict(lambda: defaultdict(set))
try:
dim = max(m.neighbors[0].keypoint.scale for m in matches)
except:
dim = 2
for match in matches:
ds = round_to(match.keypoint.scale / match.neighbors[0].keypoint.scale, 2)
# d_theta = round_to(match.keypoint.orientation - match.neighbors[0].keypoint.orientation, 0.5)
dx = round_to(match.keypoint.x - match.neighbors[0].keypoint.x, 1.5 * dim)
dy = round_to(match.keypoint.y - match.neighbors[0].keypoint.y, 1.5 * dim)
bins = itertools.product(*(dx, dy, ds))
for bin in bins:
x_vals = [m.neighbors[0].keypoint.x for m in votes[match.neighbors[0].source_id][bin]]
try:
min_x = min(x_vals)
max_x = max(x_vals)
except:
min_x = max_x = match.neighbors[0].keypoint.x
if min_x - cluster_dist < match.neighbors[0].keypoint.x < max_x + cluster_dist:
votes[match.neighbors[0].source_id][bin].add(match)
return votes
def round_to(x: float, base: float = 1, sig_figs: int = 4) -> Tuple[float, float]:
lo = round(base * math.floor(float(x) / base), sig_figs)
hi = round(base * math.ceil(float(x) / base), sig_figs)
return (lo, hi)
def merge_nearby_clusters(clusters: Set[query.Cluster], cluster_dist: float) -> Set[query.Cluster]:
# Merge nearby clusters
merged_clusters = set()
for cluster in clusters:
merged = False
for c in merged_clusters:
if (
c.min_deriv_x - cluster_dist <= cluster.min_deriv_x
and cluster.max_deriv_x <= c.max_deriv_x + cluster_dist
) and (
c.min_source_x - cluster_dist <= cluster.min_source_x
and cluster.max_source_x <= c.max_source_x + cluster_dist
):
c.merge(cluster)
merged = True
if not merged:
merged_clusters.add(cluster)
return merged_clusters
|
/sample_id-0.1.27-py3-none-any.whl/sample_id/ann/hough.py
| 0.672439 | 0.402539 |
hough.py
|
pypi
|
from __future__ import annotations
import logging
import operator
from typing import Any, Iterable, List, Optional, Sequence
from sample_id.fingerprint import Fingerprint
from . import Matcher, MatcherMetadata
from .query import Match
logger = logging.getLogger(__name__)
# TODO: Refactor, maybe to separate training from inference, because this should not be trainable
class HiveMatcher(Matcher):
"""A wrapper around a list of Matchers so that they act like a single Matcher (for inference only)."""
def __init__(self, matchers: List[Matcher]):
sr = next((matcher.meta.sr for matcher in matchers), None)
hop_length = next((matcher.meta.hop_length for matcher in matchers), None)
for matcher in matchers:
if matcher.meta.sr != sr or matcher.meta.hop_length != hop_length:
raise ValueError(f"Hive must all have the same sr and hop_length, can't add {matcher}")
self.meta = MatcherMetadata(sr=sr, hop_length=hop_length)
self.matchers = matchers
def add_matcher(self, matcher: Matcher) -> HiveMatcher:
if self.meta.sr is None:
self.meta.sr = matcher.meta.sr
if self.meta.hop_length is None:
self.meta.hop_length = matcher.meta.hop_length
if matcher.meta.sr != self.meta.sr or matcher.meta.hop_length != self.meta.hop_length:
raise ValueError(f"Hive must all have the same sr and hop_length, can't add {matcher}")
self.matchers.append(matcher)
return self
def init_model(self) -> Any:
raise NotImplementedError(f"Don't do this.")
def save_model(self, filepath: str, **kwargs) -> str:
raise NotImplementedError(f"Don't do this.")
def load_model(self, filepath: str, **kwargs) -> Any:
raise NotImplementedError(f"Don't do this.")
def add_fingerprint(self, fingerprint: Fingerprint, dedupe=True) -> Matcher:
raise NotImplementedError(f"Don't do this.")
def add_fingerprints(self, fingerprints: Iterable[Fingerprint], **kwargs) -> Matcher:
raise NotImplementedError(f"Don't do this.")
def can_add_fingerprint(self, fingerprint: Fingerprint) -> bool:
return False
def save(self, filepath: str, compress: bool = True, **kwargs) -> str:
raise NotImplementedError(f"Don't do this, save the matchers individually.")
@classmethod
def create(cls, sr: Optional[int] = None, hop_length: Optional[int] = None, **kwargs) -> Matcher:
raise NotImplementedError(f"Don't do this.")
@classmethod
def from_fingerprint(cls, fp: Fingerprint, **kwargs) -> Matcher:
raise NotImplementedError(f"Don't do this.")
@classmethod
def from_fingerprints(cls, fingerprints: Sequence[Fingerprint], **kwargs) -> Matcher:
raise NotImplementedError(f"Don't do this.")
@classmethod
def load(cls, filepaths: Iterable[str], **kwargs) -> Matcher:
"""Load multiple matchers from disk into hive."""
matchers = []
for filepath in filepaths:
matcher = Matcher.load(filepath, **kwargs)
matchers.append(matcher)
return cls(matchers)
def nearest_neighbors(self, fp: Fingerprint, k: int = 1) -> Sequence[Match]:
hive_matches = []
for matcher in self.matchers:
matches = matcher.nearest_neighbors(fp, k)
hive_matches.append(matches)
resorted_matches = self.resolve_hive_matches(hive_matches, k)
return resorted_matches
def resolve_hive_matches(self, hive_matches: List[List[Match]], k: int = 1) -> Sequence[Match]:
resorted_matches = []
for kp_hive_matches in zip(*hive_matches):
top_k_neighbors = sorted(
(neighbor for match in kp_hive_matches for neighbor in match.neighbors),
key=operator.attrgetter("distance"),
)[:k]
head = next(match for match in kp_hive_matches)
resorted_matches.append(Match(head.keypoint, top_k_neighbors))
return resorted_matches
|
/sample_id-0.1.27-py3-none-any.whl/sample_id/ann/hive.py
| 0.77535 | 0.202897 |
hive.py
|
pypi
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.