instance_id
stringlengths 10
57
| patch
stringlengths 261
37.7k
| repo
stringlengths 7
53
| base_commit
stringlengths 40
40
| hints_text
stringclasses 301
values | test_patch
stringlengths 212
2.22M
| problem_statement
stringlengths 23
37.7k
| version
stringclasses 1
value | environment_setup_commit
stringlengths 40
40
| FAIL_TO_PASS
listlengths 1
4.94k
| PASS_TO_PASS
listlengths 0
7.82k
| meta
dict | created_at
stringlengths 25
25
| license
stringclasses 8
values | __index_level_0__
int64 0
6.41k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
alexa__alexa-skills-kit-sdk-for-python-92 | diff --git a/ask-sdk-runtime/ask_sdk_runtime/dispatch.py b/ask-sdk-runtime/ask_sdk_runtime/dispatch.py
index 855f563..3fbf63f 100644
--- a/ask-sdk-runtime/ask_sdk_runtime/dispatch.py
+++ b/ask-sdk-runtime/ask_sdk_runtime/dispatch.py
@@ -119,7 +119,7 @@ class GenericRequestDispatcher(AbstractRequestDispatcher):
for response_interceptor in self.response_interceptors:
response_interceptor.process(
- handler_input=handler_input, dispatch_output=output)
+ handler_input=handler_input, response=output)
return output
except Exception as e:
@@ -186,6 +186,6 @@ class GenericRequestDispatcher(AbstractRequestDispatcher):
request_handler_chain.response_interceptors)
for response_interceptor in local_response_interceptors:
response_interceptor.process(
- handler_input=handler_input, dispatch_output=output)
+ handler_input=handler_input, response=output)
return output
diff --git a/ask-sdk-runtime/ask_sdk_runtime/dispatch_components/request_components.py b/ask-sdk-runtime/ask_sdk_runtime/dispatch_components/request_components.py
index 96da21b..d74b53f 100644
--- a/ask-sdk-runtime/ask_sdk_runtime/dispatch_components/request_components.py
+++ b/ask-sdk-runtime/ask_sdk_runtime/dispatch_components/request_components.py
@@ -97,16 +97,16 @@ class AbstractResponseInterceptor(object):
__metaclass__ = ABCMeta
@abstractmethod
- def process(self, handler_input, dispatch_output):
+ def process(self, handler_input, response):
# type: (Input, Output) -> None
"""Process the input and the output after the Handler is run.
:param handler_input: Generic input passed to the
dispatcher.
:type handler_input: Input
- :param dispatch_output: Execution result of the Handler on
+ :param response: Execution result of the Handler on
dispatch input.
- :type dispatch_output: Union[None, Output]
+ :type response: Union[None, Output]
:rtype: None
"""
raise NotImplementedError
| alexa/alexa-skills-kit-sdk-for-python | 928027fac4bcd42ee9d32510ab11386d86ea3ec1 | diff --git a/ask-sdk-runtime/tests/unit/test_dispatch.py b/ask-sdk-runtime/tests/unit/test_dispatch.py
index 825c645..a7a52a1 100644
--- a/ask-sdk-runtime/tests/unit/test_dispatch.py
+++ b/ask-sdk-runtime/tests/unit/test_dispatch.py
@@ -301,14 +301,14 @@ class TestRequestDispatcher(unittest.TestCase):
test_interceptor_1.process.assert_called_once_with(
handler_input=self.valid_handler_input,
- dispatch_output=test_response_before_interceptor), (
+ response=test_response_before_interceptor), (
"Dispatcher dispatch method didn't process local response "
"interceptors after calling request handler "
"handle")
test_interceptor_2.process.assert_called_once_with(
handler_input=self.valid_handler_input,
- dispatch_output=test_response_from_interceptor_1), (
+ response=test_response_from_interceptor_1), (
"Dispatcher dispatch method didn't process local response "
"interceptors after calling request handler "
"handle")
@@ -350,13 +350,13 @@ class TestRequestDispatcher(unittest.TestCase):
test_interceptor_1.process.assert_called_once_with(
handler_input=self.valid_handler_input,
- dispatch_output=test_response_before_interceptor), (
+ response=test_response_before_interceptor), (
"Dispatcher dispatch method didn't process global request "
"interceptors after calling dispatch request")
test_interceptor_2.process.assert_called_once_with(
handler_input=self.valid_handler_input,
- dispatch_output=test_response_from_interceptor_1), (
+ response=test_response_from_interceptor_1), (
"Dispatcher dispatch method didn't process global request "
"interceptors after calling dispatch request")
| Regression in GlobalResponseInterceptor - unexpected keyword argument 'dispatch_output'
<!-- Check one of the following options with "x" -->
<pre><code>
[X] Regression (a behavior that used to work and stopped working in a new release)
[ ] Bug report <!-- Please search GitHub for a similar issue or PR before submitting -->
[ ] Performance issue
[ ] Feature request
[ ] Documentation issue or request
[ ] Other... Please describe:
</code></pre>
<!--- Provide a general summary of the issue in the Title above -->
With the recent update to 1.10 and any resultant changes in the backend - a regression was introduced by the Alexa Skills Kit involving the usage of decorated @SkillBuilder.global_response_interceptor(). Attempting to implement a response interceptor will result in an exception during runtime: `<lambda>() got an unexpected keyword argument 'dispatch_output'`
## Expected Behavior
<!--- If you're describing a bug, tell us what should happen -->
<!--- If you're suggesting a change/improvement, tell us how it should work -->
I shouldn't have to comment out my response logging code. Especially from a core functional platform defect introduced as a regression.
## Current Behavior
<!--- If describing a bug, tell us what happens instead of the expected behavior -->
<!--- Include full errors, uncaught exceptions, stack traces, and relevant logs -->
<!--- If service responses are relevant, please include any -->
<!--- If suggesting a change/improvement, explain the difference from current behavior -->
It fails with the following error:
[ERROR] 2019-05-30T00:46:47.187Z 0c4a841e-0ad3-4c66-bb6e-96d4726c2d38 <lambda>() got an unexpected keyword argument 'dispatch_output'
Traceback (most recent call last):
File "/opt/python/ask_sdk_runtime/dispatch.py", line 122, in dispatch
handler_input=handler_input, dispatch_output=output)
TypeError: <lambda>() got an unexpected keyword argument 'dispatch_output'
[DEBUG] 2019-05-30T00:46:47.229Z 0c4a841e-0ad3-4c66-bb6e-96d4726c2d38 get_prompt for exception: There was a problem on our end and we have recorded the issue. Please try again.
## Possible Solution
```
// Not required, but suggest a fix/reason for the bug,
// or ideas how to implement the addition or change
```
Commenting out any usage of global_response_interceptor will fix it but you lose response interception.
## Steps to Reproduce (for bugs)
```
// Provide a self-contained, concise snippet of code
// For more complex issues provide a repo with the smallest sample that reproduces the bug
// Including business logic or unrelated code makes diagnosis more difficult
```
1. Configure a custom skill.
2. Using the following Python code as a the lambda handler and including the 1.10 or 1.9 ASK SDK will reproduce the issue when attempting to launch the skill.
```
from ask_sdk_core.skill_builder import SkillBuilder
from ask_sdk_core.utils import is_request_type
sb = SkillBuilder()
@sb.exception_handler(can_handle_func=lambda i, e: True)
def all_exception_handler(handler_input, exception):
print(exception)
return handler_input \
.response_builder \
.speak('Exception') \
.set_should_end_session(True) \
.response
@sb.request_handler(can_handle_func=is_request_type('LaunchRequest'))
def launch_request_handler(handler_input):
return handler_input \
.response_builder \
.speak('Launch Request') \
.set_should_end_session(False) \
.response
@sb.global_request_interceptor()
def request_logger(handler_input):
print('request', handler_input.request_envelope)
@sb.global_response_interceptor()
def response_logger(handler_input, response):
print('response', handler_input.request_envelope)
if response is not None:
print(response)
handler = sb.lambda_handler()
```
## Context
<!--- How has this issue affected you? What are you trying to accomplish? -->
<!--- Providing context helps us come up with a solution that is most useful in the real world -->
This was a regression, the code was working then it was not.
## Your Environment
<!--- Include as many relevant details about the environment where the bug was discovered -->
* ASK SDK for Python used: x.x.x
1.9 and 1.10 were tested.
* Operating System and version:
Lambda
## Python version info
* Python version used for development:
Python 3.7
| 0.0 | 928027fac4bcd42ee9d32510ab11386d86ea3ec1 | [
"ask-sdk-runtime/tests/unit/test_dispatch.py::TestRequestDispatcher::test_handler_input_successful_global_response_interceptors_execution",
"ask-sdk-runtime/tests/unit/test_dispatch.py::TestRequestDispatcher::test_handler_input_successful_local_response_interceptors_execution"
]
| [
"ask-sdk-runtime/tests/unit/test_dispatch.py::TestRequestDispatcher::test_dispatch_process_handled_exception_when_suitable_exception_handler_registered",
"ask-sdk-runtime/tests/unit/test_dispatch.py::TestRequestDispatcher::test_dispatch_raise_low_level_exception_when_exception_handler_not_registered",
"ask-sdk-runtime/tests/unit/test_dispatch.py::TestRequestDispatcher::test_dispatch_raise_low_level_exception_when_no_suitable_exception_handler_registered",
"ask-sdk-runtime/tests/unit/test_dispatch.py::TestRequestDispatcher::test_handler_input_successful_execution_with_supported_chain_and_supported_adapter",
"ask-sdk-runtime/tests/unit/test_dispatch.py::TestRequestDispatcher::test_handler_input_successful_global_request_interceptors_execution",
"ask-sdk-runtime/tests/unit/test_dispatch.py::TestRequestDispatcher::test_handler_input_successful_local_request_interceptors_execution",
"ask-sdk-runtime/tests/unit/test_dispatch.py::TestRequestDispatcher::test_handler_input_unsuccessful_global_request_interceptors_execution",
"ask-sdk-runtime/tests/unit/test_dispatch.py::TestRequestDispatcher::test_handler_input_with_no_chains_in_request_mapper",
"ask-sdk-runtime/tests/unit/test_dispatch.py::TestRequestDispatcher::test_handler_input_with_supported_chain_in_mapper_and_unsupported_adapter",
"ask-sdk-runtime/tests/unit/test_dispatch.py::TestRequestDispatcher::test_handler_input_with_supported_chain_in_mapper_no_adapters",
"ask-sdk-runtime/tests/unit/test_dispatch.py::TestRequestDispatcher::test_handler_input_with_unsupported_chains_in_request_mapper"
]
| {
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
} | 2019-05-30 16:51:30+00:00 | apache-2.0 | 1,020 |
|
alexander-akhmetov__python-telegram-168 | diff --git a/telegram/client.py b/telegram/client.py
index 9744d32..5a1ea27 100644
--- a/telegram/client.py
+++ b/telegram/client.py
@@ -227,6 +227,15 @@ class Telegram:
return self.call_method('getUser', params={'user_id': user_id})
+ def get_user_full_info(self, user_id: int) -> AsyncResult:
+ """
+ Requests the full information about a user with id = user_id.
+
+ https://core.telegram.org/tdlib/docs/classtd_1_1td__api_1_1get_user_full_info.html
+ """
+
+ return self.call_method('getUserFullInfo', params={'user_id': user_id})
+
def get_chats(self, offset_order: int = 0, offset_chat_id: int = 0, limit: int = 100) -> AsyncResult:
"""
Returns a list of chats:
| alexander-akhmetov/python-telegram | 9d5bece5ac489e70dc773f1d8ec7592c7a9fea78 | diff --git a/tests/test_telegram_methods.py b/tests/test_telegram_methods.py
index 80e7954..e5d7229 100644
--- a/tests/test_telegram_methods.py
+++ b/tests/test_telegram_methods.py
@@ -201,6 +201,32 @@ class TestTelegram:
telegram._tdjson.send.assert_called_once_with(exp_data)
+ def test_get_user(self, telegram):
+ user_id = 1
+
+ async_result = telegram.get_user(user_id=user_id)
+
+ exp_data = {
+ '@type': 'getUser',
+ 'user_id': user_id,
+ '@extra': {'request_id': async_result.id},
+ }
+
+ telegram._tdjson.send.assert_called_once_with(exp_data)
+
+ def test_get_user_full_info(self, telegram):
+ user_id = 1
+
+ async_result = telegram.get_user_full_info(user_id=user_id)
+
+ exp_data = {
+ '@type': 'getUserFullInfo',
+ 'user_id': user_id,
+ '@extra': {'request_id': async_result.id},
+ }
+
+ telegram._tdjson.send.assert_called_once_with(exp_data)
+
def test_get_chat(self, telegram):
chat_id = 1
| [FeatureRequest] Add getUserFullInfo() support
In the TDLIB update event, there is only the 'sender_user_id' field and no other user identification. Currently, there is no way to get first and last names using python-telegram.
As far as I know, to get that information the client should use getUserFullInfo() method. Please, add it. | 0.0 | 9d5bece5ac489e70dc773f1d8ec7592c7a9fea78 | [
"tests/test_telegram_methods.py::TestTelegram::test_get_user_full_info"
]
| [
"tests/test_telegram_methods.py::TestTelegram::test_phone_bot_token_init",
"tests/test_telegram_methods.py::TestTelegram::test_send_message",
"tests/test_telegram_methods.py::TestTelegram::test_send_phone_number_or_bot_token",
"tests/test_telegram_methods.py::TestTelegram::test_send_bot_token",
"tests/test_telegram_methods.py::TestTelegram::test_add_message_handler",
"tests/test_telegram_methods.py::TestTelegram::test_remove_update_handler",
"tests/test_telegram_methods.py::TestTelegram::test_remove_update_handler_empty_handlers_list",
"tests/test_telegram_methods.py::TestTelegram::test_add_update_handler",
"tests/test_telegram_methods.py::TestTelegram::test_run_handlers",
"tests/test_telegram_methods.py::TestTelegram::test_run_handlers_should_not_be_called_for_another_update_type",
"tests/test_telegram_methods.py::TestTelegram::test_call_method",
"tests/test_telegram_methods.py::TestTelegram::test_get_web_page_instant_view",
"tests/test_telegram_methods.py::TestTelegram::test_get_me",
"tests/test_telegram_methods.py::TestTelegram::test_get_user",
"tests/test_telegram_methods.py::TestTelegram::test_get_chat",
"tests/test_telegram_methods.py::TestTelegram::test_get_chats",
"tests/test_telegram_methods.py::TestTelegram::test_get_chat_history",
"tests/test_telegram_methods.py::TestTelegram::test_set_initial_params",
"tests/test_telegram_methods.py::TestTelegram::test_send_encryption_key[key-a2V5]",
"tests/test_telegram_methods.py::TestTelegram::test_send_encryption_key[byte-key-Ynl0ZS1rZXk=]",
"tests/test_telegram_methods.py::TestTelegram::test_send_encryption_key[-0]",
"tests/test_telegram_methods.py::TestTelegram::test_send_encryption_key[-1]",
"tests/test_telegram_methods.py::TestTelegram__update_async_result::test_update_async_result_returns_async_result_with_same_id",
"tests/test_telegram_methods.py::TestTelegram__update_async_result::test_result_id_should_be_replaced_if_it_is_auth_process",
"tests/test_telegram_methods.py::TestTelegram__login::test_login_process_should_do_nothing_if_already_authorized",
"tests/test_telegram_methods.py::TestTelegram__login::test_login_process_with_phone",
"tests/test_telegram_methods.py::TestTelegram__login_non_blocking::test_login_process_with_phone"
]
| {
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
} | 2021-04-05 16:37:51+00:00 | mit | 1,021 |
|
alexander-akhmetov__python-telegram-208 | diff --git a/telegram/client.py b/telegram/client.py
index 5a1ea27..01a567c 100644
--- a/telegram/client.py
+++ b/telegram/client.py
@@ -41,6 +41,7 @@ class AuthorizationState(enum.Enum):
WAIT_TDLIB_PARAMETERS = 'authorizationStateWaitTdlibParameters'
WAIT_ENCRYPTION_KEY = 'authorizationStateWaitEncryptionKey'
WAIT_PHONE_NUMBER = 'authorizationStateWaitPhoneNumber'
+ WAIT_REGISTRATION = 'authorizationStateWaitRegistration'
READY = 'authorizationStateReady'
CLOSING = 'authorizationStateClosing'
CLOSED = 'authorizationStateClosed'
@@ -542,7 +543,10 @@ class Telegram:
- AuthorizationState.WAIT_PASSWORD if a telegram password is required.
The caller should ask the telegram password
to the end user and then call send_password(password)
- - AuthorizationState.READY if the login process scceeded.
+ - AuthorizationState.WAIT_REGISTRATION if a the user must finish registration
+ The caller should ask the first and last names
+ to the end user and then call register_user(first, last)
+ - AuthorizationState.READY if the login process succeeded.
"""
if self.proxy_server:
self._send_add_proxy()
@@ -554,11 +558,13 @@ class Telegram:
AuthorizationState.WAIT_PHONE_NUMBER: self._send_phone_number_or_bot_token,
AuthorizationState.WAIT_CODE: self._send_telegram_code,
AuthorizationState.WAIT_PASSWORD: self._send_password,
+ AuthorizationState.WAIT_REGISTRATION: self._register_user,
}
blocking_actions = (
AuthorizationState.WAIT_CODE,
AuthorizationState.WAIT_PASSWORD,
+ AuthorizationState.WAIT_REGISTRATION,
)
if self.phone:
@@ -710,3 +716,38 @@ class Telegram:
self.authorization_state = self._wait_authorization_result(result)
return self.authorization_state
+
+ def _register_user(self, first: Optional[str] = None, last: Optional[str] = None) -> AsyncResult:
+ logger.info('Registering user')
+ if first is None:
+ first = input('Enter first name: ')
+ if last is None:
+ last = input('Enter last name: ')
+
+ data = {
+ '@type': 'registerUser',
+ 'first_name': first,
+ 'last_name': last,
+ }
+ return self._send_data(data, result_id='updateAuthorizationState')
+
+ def register_user(self, first: str, last: str) -> AuthorizationState:
+ """
+ Finishes the new user registration process
+
+ Args:
+ first the user's first name
+ last the user's last name
+ If either argument is None, it will be asked to the user using the input() function
+
+ Returns
+ - AuthorizationState. The called have to call `login` to continue the login process.
+
+ Raises:
+ - RuntimeError if the login failed
+
+ """
+ result = self._register_user(first, last)
+ self.authorization_state = self._wait_authorization_result(result)
+
+ return self.authorization_state
| alexander-akhmetov/python-telegram | 534ff81b5fb48eb256ea7e8fbcd6c2ef240b142e | diff --git a/tests/test_telegram_methods.py b/tests/test_telegram_methods.py
index e5d7229..0164f04 100644
--- a/tests/test_telegram_methods.py
+++ b/tests/test_telegram_methods.py
@@ -438,6 +438,9 @@ class TestTelegram__login_non_blocking:
data={'authorization_state': {'@type': 'authorizationStateWaitCode'}}
)
telegram._send_telegram_code = lambda _: _get_async_result(
+ data={'authorization_state': {'@type': 'authorizationStateWaitRegistration'}}
+ )
+ telegram._register_user = lambda _, __: _get_async_result(
data={'authorization_state': {'@type': 'authorizationStateWaitPassword'}}
)
telegram._send_password = lambda _: _get_async_result(
@@ -448,6 +451,10 @@ class TestTelegram__login_non_blocking:
assert state == AuthorizationState.WAIT_CODE
telegram.send_code('123')
+ state = telegram.login(blocking=False)
+ assert state == AuthorizationState.WAIT_REGISTRATION
+ telegram.register_user('new', 'user')
+
state = telegram.login(blocking=False)
assert state == AuthorizationState.WAIT_PASSWORD
telegram.send_password('456')
| Support authorizationStateWaitRegistration state
After manually adding a code from an SMS I received I got the message:
```
ValueError: 'authorizationStateWaitRegistration' is not a valid AuthorizationState
```
It looks like this state is not handled at the moment.
According to this page
https://core.telegram.org/tdlib/docs/classtd_1_1td__api_1_1authorization_state_wait_registration.html
_The user is unregistered and need to accept terms of service and enter their first name and last name to finish registration._
Addition question: Is there a way to add name and term acceptance right now?
I replaced secrets with "11".
Error message:
```
/Users/f/PyCharm/firstTest/venv/bin/python /Users/f/PyCharm/firstTest/Main.py
[ 2][t 4][1606988251.419583082][AuthDataShared.cpp:109][#1][!Td] DcId{2} [auth_key_id:11][state:NoAuth][created_at:11.000000]
[ 2][t 4][1606988251.419795990][SessionMultiProxy.cpp:121][#1][!SessionMultiProxy:2:main] [session_count:1]
[ 2][t 4][1606988251.420382023][Session.cpp:147][#1][!SessionProxy:2:main] Generate new session_id 11 for auth key 11 for main DC10002
ValueError: 'authorizationStateWaitRegistration' is not a valid AuthorizationState
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/f/PyCharm/firstTest/Main.py", line 3, in <module>
tg = Telegram(
File "/Users/f/PyCharm/firstTest/venv/lib/python3.8/site-packages/telegram/client.py", line 134, in __init__
self.login()
File "/Users/f/PyCharm/firstTest/venv/lib/python3.8/site-packages/telegram/client.py", line 574, in login
self.authorization_state = self._wait_authorization_result(result)
File "/Users/f/PyCharm/firstTest/venv/lib/python3.8/site-packages/telegram/client.py", line 514, in _wait_authorization_result
return AuthorizationState(authorization_state)
File "/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/enum.py", line 309, in __call__
return cls.__new__(cls, value)
File "/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/enum.py", line 600, in __new__
raise exc
File "/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/enum.py", line 584, in __new__
result = cls._missing_(value)
File "/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/enum.py", line 613, in _missing_
raise ValueError("%r is not a valid %s" % (value, cls.__name__))
ValueError: 'authorizationStateWaitRegistration' is not a valid AuthorizationState
Process finished with exit code 1
```
Code:
```python
from telegram.client import Telegram
tg = Telegram(
api_id=11,
api_hash='11',
phone='+11',
database_encryption_key='11',
use_test_dc=True,
use_message_database=False,
login=True
)
def main():
code = '11'
password = '11'
state = tg.login(blocking=False)
if state == AuthorizationState.WAIT_CODE:
# Telegram expects a pin code
tg.send_code(code)
state = tg.login(blocking=False) # continue the login process
if state == AuthorizationState.WAIT_PASSWORD:
tg.send_password(password)
state = tg.login(blocking=False) # continue the login process
# TODO I guess here the missing state should be handled
# if this is the first run, library needs to preload all chats
# otherwise the message will not be sent
result = tg.get_chats()
# `tdlib` is asynchronous, so `python-telegram` always returns you an `AsyncResult` object.
# You can wait for a result with the blocking `wait` method.
result.wait()
if result.error:
print(f'get chats error: {result.error_info}')
else:
print(f'chats: {result.update}')
result = tg.send_message(
chat_id=11,
text="A test message",
)
result.wait()
if result.error:
print(f'send message error: {result.error_info}')
else:
print(f'message has been sent: {result.update}')
tg.stop()
main()
``` | 0.0 | 534ff81b5fb48eb256ea7e8fbcd6c2ef240b142e | [
"tests/test_telegram_methods.py::TestTelegram__login_non_blocking::test_login_process_with_phone"
]
| [
"tests/test_telegram_methods.py::TestTelegram::test_phone_bot_token_init",
"tests/test_telegram_methods.py::TestTelegram::test_send_message",
"tests/test_telegram_methods.py::TestTelegram::test_send_phone_number_or_bot_token",
"tests/test_telegram_methods.py::TestTelegram::test_send_bot_token",
"tests/test_telegram_methods.py::TestTelegram::test_add_message_handler",
"tests/test_telegram_methods.py::TestTelegram::test_remove_update_handler",
"tests/test_telegram_methods.py::TestTelegram::test_remove_update_handler_empty_handlers_list",
"tests/test_telegram_methods.py::TestTelegram::test_add_update_handler",
"tests/test_telegram_methods.py::TestTelegram::test_run_handlers",
"tests/test_telegram_methods.py::TestTelegram::test_run_handlers_should_not_be_called_for_another_update_type",
"tests/test_telegram_methods.py::TestTelegram::test_call_method",
"tests/test_telegram_methods.py::TestTelegram::test_get_web_page_instant_view",
"tests/test_telegram_methods.py::TestTelegram::test_get_me",
"tests/test_telegram_methods.py::TestTelegram::test_get_user",
"tests/test_telegram_methods.py::TestTelegram::test_get_user_full_info",
"tests/test_telegram_methods.py::TestTelegram::test_get_chat",
"tests/test_telegram_methods.py::TestTelegram::test_get_chats",
"tests/test_telegram_methods.py::TestTelegram::test_get_chat_history",
"tests/test_telegram_methods.py::TestTelegram::test_set_initial_params",
"tests/test_telegram_methods.py::TestTelegram::test_send_encryption_key[key-a2V5]",
"tests/test_telegram_methods.py::TestTelegram::test_send_encryption_key[byte-key-Ynl0ZS1rZXk=]",
"tests/test_telegram_methods.py::TestTelegram::test_send_encryption_key[-0]",
"tests/test_telegram_methods.py::TestTelegram::test_send_encryption_key[-1]",
"tests/test_telegram_methods.py::TestTelegram__update_async_result::test_update_async_result_returns_async_result_with_same_id",
"tests/test_telegram_methods.py::TestTelegram__update_async_result::test_result_id_should_be_replaced_if_it_is_auth_process",
"tests/test_telegram_methods.py::TestTelegram__login::test_login_process_should_do_nothing_if_already_authorized",
"tests/test_telegram_methods.py::TestTelegram__login::test_login_process_with_phone"
]
| {
"failed_lite_validators": [
"has_hyperlinks",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2021-08-22 00:01:06+00:00 | mit | 1,022 |
|
alexander-akhmetov__python-telegram-29 | diff --git a/docs/source/changelog.rst b/docs/source/changelog.rst
index ce36cc8..a850da7 100644
--- a/docs/source/changelog.rst
+++ b/docs/source/changelog.rst
@@ -2,6 +2,14 @@
Changelog
=========
+[unreleased]
+
+- **Incompatible** default path for files is changed. Now the library uses an md5 hash of the phone number or bot token instead of just a phone number.
+ It should not be noticeable for most cases, but if you rely on locally saved files or database, you need to pass the ``files_directory`` parameter to the ``telegram.client.Telegram``.
+- Fixed problem with randomly raised "Database encryption key is needed" errors during login process. (#12)
+- Fixed `stop` method execution. (#8)
+- Added ``examples/bot_login.py`` example.
+
[0.8.0] - 2019-03-17
- ``telegram.client.Telegram`` now supports any update type with a new method ``add_update_handler(handler_type, func)``
diff --git a/examples/bot_login.py b/examples/bot_login.py
new file mode 100644
index 0000000..5b1767a
--- /dev/null
+++ b/examples/bot_login.py
@@ -0,0 +1,28 @@
+import argparse
+
+from telegram.client import Telegram
+
+
+def bot_get_me(api_id, api_hash, token):
+ tg = Telegram(
+ api_id=api_id,
+ api_hash=api_hash,
+ bot_token=token,
+ database_encryption_key='changeme1234',
+ )
+ # you must call login method before others
+ tg.login()
+
+ result = tg.get_me()
+ result.wait()
+ print(result.update)
+ tg.stop()
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser()
+ parser.add_argument('api_id', help='API id') # https://my.telegram.org/apps
+ parser.add_argument('api_hash', help='API hash')
+ parser.add_argument('token', help='Bot token')
+ args = parser.parse_args()
+ bot_get_me(args.api_id, args.api_hash, args.token)
diff --git a/telegram/client.py b/telegram/client.py
index 6fc1a1f..0794692 100644
--- a/telegram/client.py
+++ b/telegram/client.py
@@ -1,4 +1,5 @@
import os
+import hashlib
import time
import queue
import signal
@@ -75,7 +76,13 @@ class Telegram:
self._database_encryption_key = database_encryption_key
if not files_directory:
- files_directory = f'/tmp/.tdlib_files/{self.phone}/'
+ hasher = hashlib.md5()
+ hasher.update(
+ (self.phone or self.bot_token).encode('utf-8') # type: ignore
+ )
+ directory_name = hasher.hexdigest()
+ files_directory = f'/tmp/.tdlib_files/{directory_name}/'
+
self.files_directory = files_directory
self._authorized = False
@@ -328,15 +335,22 @@ class Telegram:
def _signal_handler(self, signum, frame):
self._is_enabled = False
+ def get_authorization_state(self):
+ logger.debug('Getting authorization state')
+ data = {'@type': 'getAuthorizationState'}
+
+ return self._send_data(data, result_id='getAuthorizationState')
+
def login(self):
"""
Login process (blocking)
- Must be called before any other call. It sends initial params to the tdlib, sets database encryption key, etc.
+ Must be called before any other call.
+ It sends initial params to the tdlib, sets database encryption key, etc.
"""
authorization_state = None
actions = {
- None: self._set_initial_params,
+ None: self.get_authorization_state,
'authorizationStateWaitTdlibParameters': self._set_initial_params,
'authorizationStateWaitEncryptionKey': self._send_encryption_key,
'authorizationStateWaitPhoneNumber': self._send_phone_number_or_bot_token,
@@ -355,7 +369,11 @@ class Telegram:
if result:
result.wait(raise_exc=True)
- authorization_state = result.update['authorization_state']['@type']
+
+ if result.id == 'getAuthorizationState':
+ authorization_state = result.update['@type']
+ else:
+ authorization_state = result.update['authorization_state']['@type']
def _set_initial_params(self) -> AsyncResult:
logger.info(
diff --git a/telegram/tdjson.py b/telegram/tdjson.py
index 2b448c0..122e015 100644
--- a/telegram/tdjson.py
+++ b/telegram/tdjson.py
@@ -111,6 +111,4 @@ class TDJson:
return result
def stop(self) -> None:
- self._tdjson._td_json_client_destroy(
- self.td_json_client
- ) # pylint: disable=protected-access
+ self._td_json_client_destroy(self.td_json_client)
diff --git a/telegram/utils.py b/telegram/utils.py
index f233fcb..634c817 100644
--- a/telegram/utils.py
+++ b/telegram/utils.py
@@ -3,7 +3,7 @@ import uuid
from typing import TYPE_CHECKING, Any, Dict, Optional
if TYPE_CHECKING:
- from telegram.client import Telegram # noqa pylint: disable=cyclic-import
+ from telegram.client import Telegram # noqa pylint: disable=cyclic-import
class AsyncResult:
diff --git a/telegram/worker.py b/telegram/worker.py
index e836579..a2b6a55 100644
--- a/telegram/worker.py
+++ b/telegram/worker.py
@@ -24,7 +24,9 @@ class SimpleWorker(BaseWorker):
"""Simple one-thread worker"""
def run(self) -> None:
- self._thread = threading.Thread(target=self._run_thread) # pylint: disable=attribute-defined-outside-init
+ self._thread = threading.Thread( # pylint: disable=attribute-defined-outside-init
+ target=self._run_thread
+ )
self._thread.daemon = True
self._thread.start()
| alexander-akhmetov/python-telegram | 0866b1f281f2274abc9081b17924c8046458dfff | diff --git a/tests/test_tdjson.py b/tests/test_tdjson.py
index bb20ee8..fbc4c17 100644
--- a/tests/test_tdjson.py
+++ b/tests/test_tdjson.py
@@ -7,13 +7,13 @@ class Test_get_tdjson_lib_path(object):
mocked_resource = mocker.Mock()
with mocker.mock_module.patch('telegram.tdjson.platform.system', mocked_system):
- with mocker.mock_module.patch('telegram.tdjson.pkg_resources.resource_filename',
- mocked_resource):
+ with mocker.mock_module.patch(
+ 'telegram.tdjson.pkg_resources.resource_filename', mocked_resource
+ ):
_get_tdjson_lib_path()
mocked_resource.assert_called_once_with(
- 'telegram',
- 'lib/darwin/libtdjson.dylib',
+ 'telegram', 'lib/darwin/libtdjson.dylib'
)
def test_for_linux(self, mocker):
@@ -21,39 +21,33 @@ class Test_get_tdjson_lib_path(object):
mocked_resource = mocker.Mock(return_value='/tmp/')
with mocker.mock_module.patch('telegram.tdjson.platform.system', mocked_system):
- with mocker.mock_module.patch('telegram.tdjson.pkg_resources.resource_filename',
- mocked_resource):
+ with mocker.mock_module.patch(
+ 'telegram.tdjson.pkg_resources.resource_filename', mocked_resource
+ ):
_get_tdjson_lib_path()
- mocked_resource.assert_called_once_with(
- 'telegram',
- 'lib/linux/libtdjson.so',
- )
+ mocked_resource.assert_called_once_with('telegram', 'lib/linux/libtdjson.so')
def test_for_windows(self, mocker):
mocked_system = mocker.Mock(return_value='Windows')
mocked_resource = mocker.Mock(return_value='/tmp/')
with mocker.mock_module.patch('telegram.tdjson.platform.system', mocked_system):
- with mocker.mock_module.patch('telegram.tdjson.pkg_resources.resource_filename',
- mocked_resource):
+ with mocker.mock_module.patch(
+ 'telegram.tdjson.pkg_resources.resource_filename', mocked_resource
+ ):
_get_tdjson_lib_path()
- mocked_resource.assert_called_once_with(
- 'telegram',
- 'lib/linux/libtdjson.so',
- )
+ mocked_resource.assert_called_once_with('telegram', 'lib/linux/libtdjson.so')
def test_unknown(self, mocker):
mocked_system = mocker.Mock(return_value='Unknown')
mocked_resource = mocker.Mock(return_value='/tmp/')
with mocker.mock_module.patch('telegram.tdjson.platform.system', mocked_system):
- with mocker.mock_module.patch('telegram.tdjson.pkg_resources.resource_filename',
- mocked_resource):
+ with mocker.mock_module.patch(
+ 'telegram.tdjson.pkg_resources.resource_filename', mocked_resource
+ ):
_get_tdjson_lib_path()
- mocked_resource.assert_called_once_with(
- 'telegram',
- 'lib/linux/libtdjson.so',
- )
+ mocked_resource.assert_called_once_with('telegram', 'lib/linux/libtdjson.so')
diff --git a/tests/test_telegram_methods.py b/tests/test_telegram_methods.py
index bbd8d1b..ada7e9e 100644
--- a/tests/test_telegram_methods.py
+++ b/tests/test_telegram_methods.py
@@ -236,6 +236,7 @@ class TestTelegram:
def test_set_initial_params(self, telegram):
async_result = telegram._set_initial_params()
+ phone_md5 = '69560384b84c896952ef20352fbce705'
exp_data = {
'@type': 'setTdlibParameters',
@@ -247,9 +248,9 @@ class TestTelegram:
'system_version': 'unknown',
'application_version': VERSION,
'system_language_code': 'en',
- 'database_directory': f'/tmp/.tdlib_files/{PHONE}/database',
+ 'database_directory': f'/tmp/.tdlib_files/{phone_md5}/database',
'use_message_database': True,
- 'files_directory': f'/tmp/.tdlib_files/{PHONE}/files',
+ 'files_directory': f'/tmp/.tdlib_files/{phone_md5}/files',
},
'@extra': {'request_id': 'updateAuthorizationState'},
}
@@ -296,29 +297,35 @@ class TestTelegram__login:
def test_login_process_with_phone(self, telegram):
telegram._authorized = False
- def _get_ar(data):
- ar = AsyncResult(client=telegram)
+ def _get_async_result(data, request_id=None):
+ result = AsyncResult(client=telegram)
- ar.update = data
+ result.update = data
+ result.id = request_id
- return ar
+ return result
# login process chain
- telegram._set_initial_params = lambda: _get_ar(
+ telegram.get_authorization_state = lambda: _get_async_result(
+ data={'@type': 'authorizationStateWaitEncryptionKey'},
+ request_id='getAuthorizationState',
+ )
+
+ telegram._set_initial_params = lambda: _get_async_result(
data={
'authorization_state': {'@type': 'authorizationStateWaitEncryptionKey'}
}
)
- telegram._send_encryption_key = lambda: _get_ar(
+ telegram._send_encryption_key = lambda: _get_async_result(
data={'authorization_state': {'@type': 'authorizationStateWaitPhoneNumber'}}
)
- telegram._send_phone_number_or_bot_token = lambda: _get_ar(
+ telegram._send_phone_number_or_bot_token = lambda: _get_async_result(
data={'authorization_state': {'@type': 'authorizationStateWaitCode'}}
)
- telegram._send_telegram_code = lambda: _get_ar(
+ telegram._send_telegram_code = lambda: _get_async_result(
data={'authorization_state': {'@type': 'authorizationStateWaitPassword'}}
)
- telegram._send_password = lambda: _get_ar(
+ telegram._send_password = lambda: _get_async_result(
data={'authorization_state': {'@type': 'authorizationStateReady'}}
)
diff --git a/tests/test_utils.py b/tests/test_utils.py
index 8ef8e2c..51ea333 100644
--- a/tests/test_utils.py
+++ b/tests/test_utils.py
@@ -21,10 +21,7 @@ class TestAsyncResult(object):
def test_parse_update_with_error(self):
ar = AsyncResult(client=None)
- update = {
- '@type': 'error',
- 'some': 'data',
- }
+ update = {'@type': 'error', 'some': 'data'}
assert ar.error is False
assert ar.error_info is None
@@ -38,10 +35,7 @@ class TestAsyncResult(object):
def test_parse_update_ok(self):
ar = AsyncResult(client=None)
- update = {
- '@type': 'ok',
- 'some': 'data',
- }
+ update = {'@type': 'ok', 'some': 'data'}
ar.parse_update(update)
@@ -52,10 +46,7 @@ class TestAsyncResult(object):
def test_parse_update(self):
ar = AsyncResult(client=None)
- update = {
- '@type': 'some_type',
- 'some': 'data',
- }
+ update = {'@type': 'some_type', 'some': 'data'}
ar.parse_update(update)
| Random error while decrypting database
Regular class initialization
```python
client = Telegram(
api_id=self.api_id,
api_hash=self.api_hash,
library_path=self.library_path,
phone='+XXXXXXXXXXXX'',
database_encryption_key='XXXXXXXXXXX',
use_test_dc=True,
files_directory=self.files_directory,
use_message_database=False,
login=True,
)
```
Randomly causes an error
```
Telegram error: {'@type': 'error', 'code': 401, 'message': 'Database encryption key is needed: call checkDatabaseEncryptionKey first', '@extra': {'request_id': 'updateAuthorizationState'}}
```
Next time you start everything can work fine
<details><summary>Full log</summary>
<p>
```python
INFO:telegram.tdjson:Using shared library "/app/telegram_data/vendor/tdlib/libtdjson.so"
INFO:telegram.client:[Telegram.td_listener] started
INFO:telegram.worker:[SimpleWorker] started
INFO:telegram.client:[login] Login process has been started
INFO:telegram.client:[login] current authorization state: None
INFO:telegram.client:Setting tdlib initial params: files_dir=/app/telegram_data/data/ test_dc=True
DEBUG:telegram.tdjson:[me ==>] Sent b'{"@type": "setTdlibParameters", "parameters": {"use_test_dc": true, "api_id": API_ID, "api_hash": "API_HASH", "device_model": "python-telegram", "system_version": "unknown", "application_version": "0.7.0", "system_language_code": "en", "database_directory": "/app/telegram_data/data/database", "use_message_database": false, "files_directory": "/app/telegram_data/data/files"}, "@extra": {"request_id": "updateAuthorizationState"}}'
DEBUG:telegram.tdjson:[me <==] Received {'@type': 'updateAuthorizationState', 'authorization_state': {'@type': 'authorizationStateWaitTdlibParameters'}}
DEBUG:telegram.tdjson:[me <==] Received {'@type': 'updateAuthorizationState', 'authorization_state': {'@type': 'authorizationStateWaitEncryptionKey', 'is_encrypted': True}}
DEBUG:telegram.client:async_result has not been found in by request_id=updateAuthorizationState
DEBUG:telegram.tdjson:[me <==] Received {'@type': 'ok', '@extra': {'request_id': 'updateAuthorizationState'}}
DEBUG:telegram.client:async_result has not been found in by request_id=updateAuthorizationState
INFO:telegram.client:[login] current authorization state: authorizationStateWaitTdlibParameters
INFO:telegram.client:Setting tdlib initial params: files_dir=/app/telegram_data/data/ test_dc=True
DEBUG:telegram.tdjson:[me ==>] Sent b'{"@type": "setTdlibParameters", "parameters": {"use_test_dc": true, "api_id": API_ID, "api_hash": "API_HASH", "device_model": "python-telegram", "system_version": "unknown", "application_version": "0.7.0", "system_language_code": "en", "database_directory": "/app/telegram_data/data/database", "use_message_database": false, "files_directory": "/app/telegram_data/data/files"}, "@extra": {"request_id": "updateAuthorizationState"}}'
DEBUG:telegram.tdjson:[me <==] Received {'@type': 'error', 'code': 401, 'message': 'Database encryption key is needed: call checkDatabaseEncryptionKey first', '@extra': {'request_id': 'updateAuthorizationState'}}
Traceback (most recent call last):
File "manage.py", line 15, in <module>
execute_from_command_line(sys.argv)
File "/usr/local/lib/python3.7/site-packages/django/core/management/__init__.py", line 381, in execute_from_command_line
utility.execute()
File "/usr/local/lib/python3.7/site-packages/django/core/management/__init__.py", line 375, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/usr/local/lib/python3.7/site-packages/django/core/management/base.py", line 316, in run_from_argv
self.execute(*args, **cmd_options)
File "/usr/local/lib/python3.7/site-packages/django/core/management/base.py", line 353, in execute
output = self.handle(*args, **options)
File "/app/telegram_data/management/commands/test.py", line 38, in handle
login=True,
File "/usr/local/lib/python3.7/site-packages/telegram/client.py", line 90, in __init__
self.login()
File "/usr/local/lib/python3.7/site-packages/telegram/client.py", line 339, in login
result.wait(raise_exc=True)
File "/usr/local/lib/python3.7/site-packages/telegram/utils.py", line 39, in wait
raise RuntimeError(f'Telegram error: {self.error_info}')
RuntimeError: Telegram error: {'@type': 'error', 'code': 401, 'message': 'Database encryption key is needed: call checkDatabaseEncryptionKey first', '@extra': {'request_id': 'updateAuthorizationState'}}
```
</p>
</details> | 0.0 | 0866b1f281f2274abc9081b17924c8046458dfff | [
"tests/test_telegram_methods.py::TestTelegram::test_set_initial_params"
]
| [
"tests/test_tdjson.py::Test_get_tdjson_lib_path::test_for_darwin",
"tests/test_tdjson.py::Test_get_tdjson_lib_path::test_for_linux",
"tests/test_tdjson.py::Test_get_tdjson_lib_path::test_for_windows",
"tests/test_tdjson.py::Test_get_tdjson_lib_path::test_unknown",
"tests/test_telegram_methods.py::TestTelegram::test_phone_bot_token_init",
"tests/test_telegram_methods.py::TestTelegram::test_send_message",
"tests/test_telegram_methods.py::TestTelegram::test_send_phone_number_or_bot_token",
"tests/test_telegram_methods.py::TestTelegram::test_send_bot_token",
"tests/test_telegram_methods.py::TestTelegram::test_add_message_handler",
"tests/test_telegram_methods.py::TestTelegram::test_add_update_handler",
"tests/test_telegram_methods.py::TestTelegram::test_run_handlers",
"tests/test_telegram_methods.py::TestTelegram::test_run_handlers_should_not_be_called_for_another_update_type",
"tests/test_telegram_methods.py::TestTelegram::test_call_method",
"tests/test_telegram_methods.py::TestTelegram::test_get_web_page_instant_view",
"tests/test_telegram_methods.py::TestTelegram::test_get_me",
"tests/test_telegram_methods.py::TestTelegram::test_get_chat",
"tests/test_telegram_methods.py::TestTelegram::test_get_chats",
"tests/test_telegram_methods.py::TestTelegram::test_get_chat_history",
"tests/test_telegram_methods.py::TestTelegram__update_async_result::test_update_async_result_returns_async_result_with_same_id",
"tests/test_telegram_methods.py::TestTelegram__update_async_result::test_result_id_should_be_replaced_if_it_is_auth_process",
"tests/test_telegram_methods.py::TestTelegram__login::test_login_process_should_do_nothing_if_already_authorized",
"tests/test_telegram_methods.py::TestTelegram__login::test_login_process_with_phone",
"tests/test_utils.py::TestAsyncResult::test_initial_params",
"tests/test_utils.py::TestAsyncResult::test_str",
"tests/test_utils.py::TestAsyncResult::test_parse_update_with_error",
"tests/test_utils.py::TestAsyncResult::test_parse_update_ok",
"tests/test_utils.py::TestAsyncResult::test_parse_update",
"tests/test_utils.py::TestAsyncResult::test_wait_with_timeout",
"tests/test_utils.py::TestAsyncResult::test_wait_with_update",
"tests/test_utils.py::TestAsyncResult::test_wait_with_error_and_raise_exc",
"tests/test_utils.py::TestAsyncResult::test_wait_with_error_and_without_raise_exc"
]
| {
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2019-04-05 15:45:36+00:00 | mit | 1,023 |
|
alexander-akhmetov__python-telegram-73 | diff --git a/telegram/client.py b/telegram/client.py
index 75b538d..a32387e 100644
--- a/telegram/client.py
+++ b/telegram/client.py
@@ -345,6 +345,16 @@ class Telegram:
for handler in self._update_handlers[update_type]:
self._workers_queue.put((handler, update), timeout=self._queue_put_timeout)
+ def remove_update_handler(self, handler_type: str, func: Callable) -> None:
+ """
+ Remove a handler with the specified type
+ """
+ try:
+ self._update_handlers[handler_type].remove(func)
+ except (ValueError, KeyError):
+ # not in the list
+ pass
+
def add_message_handler(self, func: Callable) -> None:
self.add_update_handler(MESSAGE_HANDLER_TYPE, func)
| alexander-akhmetov/python-telegram | c8d2621fcf195ca094732a8795087f5493530e1e | diff --git a/tests/test_telegram_methods.py b/tests/test_telegram_methods.py
index add7068..b019003 100644
--- a/tests/test_telegram_methods.py
+++ b/tests/test_telegram_methods.py
@@ -99,6 +99,29 @@ class TestTelegram:
assert telegram._update_handlers[MESSAGE_HANDLER_TYPE] == [my_handler]
+ def test_remove_update_handler(self, telegram):
+ # check remove_update_handler
+ assert telegram._update_handlers[MESSAGE_HANDLER_TYPE] == []
+
+ def my_handler():
+ pass
+
+ telegram.add_message_handler(my_handler)
+
+ telegram.remove_update_handler(MESSAGE_HANDLER_TYPE, my_handler)
+
+ assert telegram._update_handlers[MESSAGE_HANDLER_TYPE] == []
+
+ def test_remove_update_handler_empty_handlers_list(self, telegram):
+ telegram._update_handlers = {}
+
+ def my_handler():
+ pass
+
+ telegram.remove_update_handler(MESSAGE_HANDLER_TYPE, my_handler)
+
+ assert telegram._update_handlers == {}
+
def test_add_update_handler(self, telegram):
# check that add_update_handler function
# appends passsed func to _update_handlers[type] list
| [Feature Request] Function to remove handler
Shouldn't be that complicated but would be nice to have. | 0.0 | c8d2621fcf195ca094732a8795087f5493530e1e | [
"tests/test_telegram_methods.py::TestTelegram::test_remove_update_handler",
"tests/test_telegram_methods.py::TestTelegram::test_remove_update_handler_empty_handlers_list"
]
| [
"tests/test_telegram_methods.py::TestTelegram::test_phone_bot_token_init",
"tests/test_telegram_methods.py::TestTelegram::test_send_message",
"tests/test_telegram_methods.py::TestTelegram::test_send_phone_number_or_bot_token",
"tests/test_telegram_methods.py::TestTelegram::test_send_bot_token",
"tests/test_telegram_methods.py::TestTelegram::test_add_message_handler",
"tests/test_telegram_methods.py::TestTelegram::test_add_update_handler",
"tests/test_telegram_methods.py::TestTelegram::test_run_handlers",
"tests/test_telegram_methods.py::TestTelegram::test_run_handlers_should_not_be_called_for_another_update_type",
"tests/test_telegram_methods.py::TestTelegram::test_call_method",
"tests/test_telegram_methods.py::TestTelegram::test_get_web_page_instant_view",
"tests/test_telegram_methods.py::TestTelegram::test_get_me",
"tests/test_telegram_methods.py::TestTelegram::test_get_chat",
"tests/test_telegram_methods.py::TestTelegram::test_get_chats",
"tests/test_telegram_methods.py::TestTelegram::test_get_chat_history",
"tests/test_telegram_methods.py::TestTelegram::test_set_initial_params",
"tests/test_telegram_methods.py::TestTelegram__update_async_result::test_update_async_result_returns_async_result_with_same_id",
"tests/test_telegram_methods.py::TestTelegram__update_async_result::test_result_id_should_be_replaced_if_it_is_auth_process",
"tests/test_telegram_methods.py::TestTelegram__login::test_login_process_should_do_nothing_if_already_authorized",
"tests/test_telegram_methods.py::TestTelegram__login::test_login_process_with_phone"
]
| {
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
} | 2020-02-01 09:37:03+00:00 | mit | 1,024 |
|
alexander-akhmetov__python-telegram-77 | diff --git a/telegram/utils.py b/telegram/utils.py
index 4e3100c..0f55d3b 100644
--- a/telegram/utils.py
+++ b/telegram/utils.py
@@ -1,11 +1,15 @@
import uuid
import threading
+import logging
from typing import TYPE_CHECKING, Any, Dict, Optional
if TYPE_CHECKING:
from telegram.client import Telegram # noqa pylint: disable=cyclic-import
+logger = logging.getLogger(__name__)
+
+
class AsyncResult:
"""
tdlib is asynchronous, and this class helps you get results back.
@@ -41,12 +45,18 @@ class AsyncResult:
raise RuntimeError(f'Telegram error: {self.error_info}')
def parse_update(self, update: Dict[Any, Any]) -> None:
- if update.get('@type') == 'ok':
- self.ok_received = True
- self._ready.set()
- return False
+ update_type = update.get('@type')
- if update.get('@type') == 'error':
+ logger.debug('update id=%s type=%s received', self.id, update_type)
+
+ if update_type == 'ok':
+ self.ok_received = True
+ if self.id == 'updateAuthorizationState':
+ # For updateAuthorizationState commands tdlib sends
+ # @type: ok responses
+ # but we want to wait longer to receive the new authorization state
+ return False
+ elif update_type == 'error':
self.error = True
self.error_info = update
else:
| alexander-akhmetov/python-telegram | 8444d607fe572f088fe10a8cd6d81be898bc7c1e | diff --git a/tests/test_utils.py b/tests/test_utils.py
index 7d9b765..f0dcef5 100644
--- a/tests/test_utils.py
+++ b/tests/test_utils.py
@@ -36,6 +36,7 @@ class TestAsyncResult:
assert async_result.error_info == update
assert async_result.update is None
assert async_result.ok_received is False
+ assert async_result._ready.is_set() is True
def test_parse_update_ok(self):
async_result = AsyncResult(client=None)
@@ -47,6 +48,27 @@ class TestAsyncResult:
assert async_result.error_info is None
assert async_result.update is None
assert async_result.ok_received is True
+ assert async_result._ready.is_set() is True
+
+ def test_parse_update_authorization_state_ok(self):
+ # when id=updateAuthorizationState
+ # and @type=ok
+ # it should not set async_result._ready
+ # because for updateAuthorizationState we want to wait for the
+ # next message with result_id=updateAuthorizationState
+ async_result = AsyncResult(
+ client=None,
+ result_id='updateAuthorizationState',
+ )
+ update = {'@type': 'ok', 'some': 'data'}
+
+ async_result.parse_update(update)
+
+ assert async_result.error is False
+ assert async_result.error_info is None
+ assert async_result.update is None
+ assert async_result.ok_received is True
+ assert async_result._ready.is_set() is False
def test_parse_update(self):
async_result = AsyncResult(client=None)
@@ -58,6 +80,7 @@ class TestAsyncResult:
assert async_result.error_info is None
assert async_result.update == update
assert async_result.ok_received is False
+ assert async_result._ready.is_set() is True
def test_wait_with_timeout(self):
async_result = AsyncResult(client=None)
| Client wait for result forever if result update have only @type='ok'
Hello,
I play with master branch of python-telegram and found a bug. When I try to set my first/last name then AsyncResult wait forever. It happens because Event self._ready.set() in the AsyncResult is set only if update `@type != 'ok'`.
I guess it was made for login purposes where result `@type='ok' `is not means that result is ready.
This is my code:
```...
tg.login()
result = tg.get_me()
result.wait()
print(result.update)
logger.debug("Setting name")
result = tg.call_method('setName', params={'first_name': 'First', 'last_name': 'Last'})
result.wait() # there it waits forever
print(result.update)
```
I changed the `parse_update` method of AsyncResult as follows by adding condition `if self.id != 'updateAuthorizationState'` and it start working.
```
def parse_update(self, update: Dict[Any, Any]) -> None:
if update.get('@type') == 'ok':
self.ok_received = True
# added following 3 lines
if self.id != 'updateAuthorizationState':
self._ready.set()
return True
return False
if update.get('@type') == 'error':
self.error = True
self.error_info = update
else:
self.update = update
self._ready.set()
return True
```
But I not 100% sure that my way is correct, I just started to play with Telegram lib a day ago..
| 0.0 | 8444d607fe572f088fe10a8cd6d81be898bc7c1e | [
"tests/test_utils.py::TestAsyncResult::test_parse_update_authorization_state_ok"
]
| [
"tests/test_utils.py::TestAsyncResult::test_initial_params",
"tests/test_utils.py::TestAsyncResult::test_str",
"tests/test_utils.py::TestAsyncResult::test_parse_update_with_error",
"tests/test_utils.py::TestAsyncResult::test_parse_update_ok",
"tests/test_utils.py::TestAsyncResult::test_parse_update",
"tests/test_utils.py::TestAsyncResult::test_wait_with_timeout",
"tests/test_utils.py::TestAsyncResult::test_wait_with_update",
"tests/test_utils.py::TestAsyncResult::test_wait_with_error_and_raise_exc",
"tests/test_utils.py::TestAsyncResult::test_wait_with_error_and_without_raise_exc"
]
| {
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
} | 2020-02-24 19:01:51+00:00 | mit | 1,025 |
|
alexander-akhmetov__python-telegram-92 | diff --git a/telegram/client.py b/telegram/client.py
index e427a88..17bf2a5 100644
--- a/telegram/client.py
+++ b/telegram/client.py
@@ -6,8 +6,9 @@ import signal
import typing
import getpass
import logging
+import base64
import threading
-from typing import Any, Dict, List, Type, Callable, Optional, DefaultDict, Tuple
+from typing import Any, Dict, List, Type, Callable, Optional, DefaultDict, Tuple, Union
from types import FrameType
from collections import defaultdict
@@ -27,7 +28,7 @@ class Telegram:
self,
api_id: int,
api_hash: str,
- database_encryption_key: str,
+ database_encryption_key: Union[str, bytes],
phone: Optional[str] = None,
bot_token: Optional[str] = None,
library_path: Optional[str] = None,
@@ -530,9 +531,14 @@ class Telegram:
def _send_encryption_key(self) -> AsyncResult:
logger.info('Sending encryption key')
+
+ key = self._database_encryption_key
+ if isinstance(key, str):
+ key = key.encode()
+
data = {
'@type': 'checkDatabaseEncryptionKey',
- 'encryption_key': self._database_encryption_key,
+ 'encryption_key': base64.b64encode(key).decode(),
}
return self._send_data(data, result_id='updateAuthorizationState')
| alexander-akhmetov/python-telegram | 05eff25b59a3520e77449e14618f5d34d83c1388 | diff --git a/tests/test_telegram_methods.py b/tests/test_telegram_methods.py
index 24cec65..96a1f01 100644
--- a/tests/test_telegram_methods.py
+++ b/tests/test_telegram_methods.py
@@ -17,13 +17,20 @@ DATABASE_ENCRYPTION_KEY = 'changeme1234'
def telegram():
with patch('telegram.client.TDJson'):
with patch('telegram.client.threading'):
- tg = Telegram(
- api_id=API_ID,
- api_hash=API_HASH,
- phone=PHONE,
- library_path=LIBRARY_PATH,
- database_encryption_key=DATABASE_ENCRYPTION_KEY,
- )
+ return _get_telegram_instance()
+
+
+def _get_telegram_instance(**kwargs):
+ kwargs.setdefault('api_id', API_ID)
+ kwargs.setdefault('api_hash', API_HASH)
+ kwargs.setdefault('phone', PHONE)
+ kwargs.setdefault('library_path', LIBRARY_PATH)
+ kwargs.setdefault('database_encryption_key', DATABASE_ENCRYPTION_KEY)
+
+ with patch('telegram.client.TDJson'):
+ with patch('telegram.client.threading'):
+ tg = Telegram(**kwargs)
+
return tg
@@ -278,6 +285,25 @@ class TestTelegram:
telegram._tdjson.send.assert_called_once_with(exp_data)
assert async_result.id == 'updateAuthorizationState'
+ @pytest.mark.parametrize(
+ 'key, exp_key',
+ [('key', 'a2V5'), (b'byte-key', 'Ynl0ZS1rZXk='), ('', ''), (b'', '')],
+ )
+ def test_send_encryption_key(self, key, exp_key):
+ # check that _send_encryption_key calls tdlib with
+ # correct parameters encoded using base64
+ tg = _get_telegram_instance(database_encryption_key=key)
+
+ tg._send_encryption_key()
+
+ exp_data = {
+ '@type': 'checkDatabaseEncryptionKey',
+ 'encryption_key': exp_key,
+ '@extra': {'request_id': 'updateAuthorizationState'},
+ }
+
+ tg._tdjson.send.assert_called_once_with(exp_data)
+
class TestTelegram__update_async_result:
def test_update_async_result_returns_async_result_with_same_id(self, telegram):
| "Failed to parse JSON object as TDLib request: Wrong padding length" When tried to login
File "/app/index.py", line 17, in <module>
tg.login()
File "/usr/local/lib/python3.8/site-packages/telegram/client.py", line 439, in login
result.wait(raise_exc=True)
File "/usr/local/lib/python3.8/site-packages/telegram/utils.py", line 41, in wait
raise RuntimeError(f'Telegram error: {self.error_info}')
RuntimeError: Telegram error: {'@type': 'error', 'code': 400, 'message': 'Failed to parse JSON object as TDLib request: Wrong padding length', '@extra': {'request_id': 'updateAuthorizationState'}}
Code is running in docker!
Thank you | 0.0 | 05eff25b59a3520e77449e14618f5d34d83c1388 | [
"tests/test_telegram_methods.py::TestTelegram::test_send_encryption_key[key-a2V5]",
"tests/test_telegram_methods.py::TestTelegram::test_send_encryption_key[byte-key-Ynl0ZS1rZXk=]",
"tests/test_telegram_methods.py::TestTelegram::test_send_encryption_key[-1]"
]
| [
"tests/test_telegram_methods.py::TestTelegram::test_phone_bot_token_init",
"tests/test_telegram_methods.py::TestTelegram::test_send_message",
"tests/test_telegram_methods.py::TestTelegram::test_send_phone_number_or_bot_token",
"tests/test_telegram_methods.py::TestTelegram::test_send_bot_token",
"tests/test_telegram_methods.py::TestTelegram::test_add_message_handler",
"tests/test_telegram_methods.py::TestTelegram::test_remove_update_handler",
"tests/test_telegram_methods.py::TestTelegram::test_remove_update_handler_empty_handlers_list",
"tests/test_telegram_methods.py::TestTelegram::test_add_update_handler",
"tests/test_telegram_methods.py::TestTelegram::test_run_handlers",
"tests/test_telegram_methods.py::TestTelegram::test_run_handlers_should_not_be_called_for_another_update_type",
"tests/test_telegram_methods.py::TestTelegram::test_call_method",
"tests/test_telegram_methods.py::TestTelegram::test_get_web_page_instant_view",
"tests/test_telegram_methods.py::TestTelegram::test_get_me",
"tests/test_telegram_methods.py::TestTelegram::test_get_chat",
"tests/test_telegram_methods.py::TestTelegram::test_get_chats",
"tests/test_telegram_methods.py::TestTelegram::test_get_chat_history",
"tests/test_telegram_methods.py::TestTelegram::test_set_initial_params",
"tests/test_telegram_methods.py::TestTelegram::test_send_encryption_key[-0]",
"tests/test_telegram_methods.py::TestTelegram__update_async_result::test_update_async_result_returns_async_result_with_same_id",
"tests/test_telegram_methods.py::TestTelegram__update_async_result::test_result_id_should_be_replaced_if_it_is_auth_process",
"tests/test_telegram_methods.py::TestTelegram__login::test_login_process_should_do_nothing_if_already_authorized",
"tests/test_telegram_methods.py::TestTelegram__login::test_login_process_with_phone"
]
| {
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
} | 2020-03-25 09:47:08+00:00 | mit | 1,026 |
|
alexgolec__tda-api-105 | diff --git a/tda/auth.py b/tda/auth.py
index ff8d2cf..fec6b1e 100644
--- a/tda/auth.py
+++ b/tda/auth.py
@@ -3,6 +3,7 @@
from requests_oauthlib import OAuth2Session
+import json
import logging
import pickle
import time
@@ -19,11 +20,28 @@ def __token_updater(token_path):
def update_token(t):
get_logger().info('Updating token to file {}'.format(token_path))
- with open(token_path, 'wb') as f:
- pickle.dump(t, f)
+ with open(token_path, 'w') as f:
+ json.dump(t, f)
return update_token
+def __token_loader(token_path):
+ def load_token():
+ get_logger().info('Loading token from file {}'.format(token_path))
+
+ with open(token_path, 'rb') as f:
+ token_data = f.read()
+ try:
+ return json.loads(token_data.decode())
+ except ValueError:
+ get_logger().warning(
+ "Unable to load JSON token from file {}, falling back to pickle"\
+ .format(token_path)
+ )
+ return pickle.loads(token_data)
+ return load_token
+
+
def __normalize_api_key(api_key):
api_key_suffix = '@AMER.OAUTHAP'
@@ -51,9 +69,8 @@ def client_from_token_file(token_path, api_key):
:param api_key: Your TD Ameritrade application's API key, also known as the
client ID.
'''
- def load():
- with open(token_path, 'rb') as f:
- return pickle.load(f)
+
+ load = __token_loader(token_path)
return client_from_access_functions(
api_key, load, __token_updater(token_path))
| alexgolec/tda-api | 8e4897d378b20dfd9fd14abf354902a8414f48e7 | diff --git a/tests/auth_test.py b/tests/auth_test.py
index adbd092..8b165b0 100644
--- a/tests/auth_test.py
+++ b/tests/auth_test.py
@@ -3,6 +3,7 @@ from tests.test_utils import no_duplicates
from unittest.mock import patch, ANY, MagicMock
from unittest.mock import ANY as _
+import json
import os
import pickle
import tempfile
@@ -17,11 +18,14 @@ class ClientFromTokenFileTest(unittest.TestCase):
def setUp(self):
self.tmp_dir = tempfile.TemporaryDirectory()
self.pickle_path = os.path.join(self.tmp_dir.name, 'token.pickle')
+ self.json_path = os.path.join(self.tmp_dir.name, 'token.json')
self.token = {'token': 'yes'}
def write_token(self):
with open(self.pickle_path, 'wb') as f:
pickle.dump(self.token, f)
+ with open(self.json_path, 'w') as f:
+ json.dump(self.token, f)
@no_duplicates
def test_no_such_file(self):
@@ -31,7 +35,7 @@ class ClientFromTokenFileTest(unittest.TestCase):
@no_duplicates
@patch('tda.auth.Client')
@patch('tda.auth.OAuth2Session')
- def test_file_exists(self, session, client):
+ def test_pickle_loads(self, session, client):
self.write_token()
client.return_value = 'returned client'
@@ -46,13 +50,31 @@ class ClientFromTokenFileTest(unittest.TestCase):
auto_refresh_kwargs=_,
token_updater=_)
+ @no_duplicates
+ @patch('tda.auth.Client')
+ @patch('tda.auth.OAuth2Session')
+ def test_json_loads(self, session, client):
+ self.write_token()
+
+ client.return_value = 'returned client'
+
+ self.assertEqual('returned client',
+ auth.client_from_token_file(self.json_path, API_KEY))
+ client.assert_called_once_with(API_KEY, _)
+ session.assert_called_once_with(
+ API_KEY,
+ token=self.token,
+ auto_refresh_url=_,
+ auto_refresh_kwargs=_,
+ token_updater=_)
+
@no_duplicates
@patch('tda.auth.Client')
@patch('tda.auth.OAuth2Session')
def test_token_updater_updates_token(self, session, client):
self.write_token()
- auth.client_from_token_file(self.pickle_path, API_KEY)
+ auth.client_from_token_file(self.json_path, API_KEY)
session.assert_called_once()
session_call = session.mock_calls[0]
@@ -60,8 +82,8 @@ class ClientFromTokenFileTest(unittest.TestCase):
updated_token = {'updated': 'token'}
token_updater(updated_token)
- with open(self.pickle_path, 'rb') as f:
- self.assertEqual(pickle.load(f), updated_token)
+ with open(self.json_path, 'r') as f:
+ self.assertEqual(json.load(f), updated_token)
@no_duplicates
@@ -149,7 +171,7 @@ class ClientFromLoginFlow(unittest.TestCase):
def setUp(self):
self.tmp_dir = tempfile.TemporaryDirectory()
- self.pickle_path = os.path.join(self.tmp_dir.name, 'token.pickle')
+ self.json_path = os.path.join(self.tmp_dir.name, 'token.json')
self.token = {'token': 'yes'}
@no_duplicates
@@ -171,11 +193,11 @@ class ClientFromLoginFlow(unittest.TestCase):
self.assertEqual('returned client',
auth.client_from_login_flow(
webdriver, API_KEY, REDIRECT_URL,
- self.pickle_path,
+ self.json_path,
redirect_wait_time_seconds=0.0))
- with open(self.pickle_path, 'rb') as f:
- self.assertEqual(self.token, pickle.load(f))
+ with open(self.json_path, 'r') as f:
+ self.assertEqual(self.token, json.load(f))
@no_duplicates
@patch('tda.auth.Client')
@@ -198,11 +220,11 @@ class ClientFromLoginFlow(unittest.TestCase):
self.assertEqual('returned client',
auth.client_from_login_flow(
webdriver, API_KEY, redirect_url,
- self.pickle_path,
+ self.json_path,
redirect_wait_time_seconds=0.0))
- with open(self.pickle_path, 'rb') as f:
- self.assertEqual(self.token, pickle.load(f))
+ with open(self.json_path, 'r') as f:
+ self.assertEqual(self.token, json.load(f))
@no_duplicates
@patch('tda.auth.Client')
@@ -227,11 +249,11 @@ class ClientFromLoginFlow(unittest.TestCase):
self.assertEqual('returned client',
auth.client_from_login_flow(
webdriver, API_KEY, redirect_url,
- self.pickle_path,
+ self.json_path,
redirect_wait_time_seconds=0.0))
- with open(self.pickle_path, 'rb') as f:
- self.assertEqual(self.token, pickle.load(f))
+ with open(self.json_path, 'r') as f:
+ self.assertEqual(self.token, json.load(f))
@no_duplicates
@patch('tda.auth.Client')
@@ -252,7 +274,7 @@ class ClientFromLoginFlow(unittest.TestCase):
self.assertEqual('returned client',
auth.client_from_login_flow(
webdriver, 'API_KEY', REDIRECT_URL,
- self.pickle_path,
+ self.json_path,
redirect_wait_time_seconds=0.0))
self.assertEqual(
@@ -280,7 +302,7 @@ class ClientFromLoginFlow(unittest.TestCase):
'timed out waiting for redirect'):
auth.client_from_login_flow(
webdriver, API_KEY, redirect_url,
- self.pickle_path,
+ self.json_path,
redirect_wait_time_seconds=0.0)
@@ -288,12 +310,12 @@ class EasyClientTest(unittest.TestCase):
def setUp(self):
self.tmp_dir = tempfile.TemporaryDirectory()
- self.pickle_path = os.path.join(self.tmp_dir.name, 'token.pickle')
+ self.json_path = os.path.join(self.tmp_dir.name, 'token.json')
self.token = {'token': 'yes'}
def write_token(self):
- with open(self.pickle_path, 'wb') as f:
- pickle.dump(self.token, f)
+ with open(self.json_path, 'w') as f:
+ json.dump(self.token, f)
@no_duplicates
@patch('tda.auth.client_from_token_file')
@@ -302,7 +324,7 @@ class EasyClientTest(unittest.TestCase):
client_from_token_file.side_effect = FileNotFoundError()
with self.assertRaises(FileNotFoundError):
- auth.easy_client(API_KEY, REDIRECT_URL, self.pickle_path)
+ auth.easy_client(API_KEY, REDIRECT_URL, self.json_path)
@no_duplicates
@patch('tda.auth.client_from_token_file')
@@ -311,7 +333,7 @@ class EasyClientTest(unittest.TestCase):
client_from_token_file.return_value = self.token
self.assertEquals(self.token,
- auth.easy_client(API_KEY, REDIRECT_URL, self.pickle_path))
+ auth.easy_client(API_KEY, REDIRECT_URL, self.json_path))
@no_duplicates
@patch('tda.auth.client_from_login_flow')
@@ -327,7 +349,7 @@ class EasyClientTest(unittest.TestCase):
self.assertEquals('returned client',
auth.easy_client(
- API_KEY, REDIRECT_URL, self.pickle_path,
+ API_KEY, REDIRECT_URL, self.json_path,
webdriver_func=webdriver_func))
webdriver_func.assert_called_once()
| Current token format is not portable
Pickle is not a portable data format, we should use JSON which is generally the standard for storing oauth credentials across multiple libraries. | 0.0 | 8e4897d378b20dfd9fd14abf354902a8414f48e7 | [
"tests/auth_test.py::ClientFromTokenFileTest::test_json_loads",
"tests/auth_test.py::ClientFromTokenFileTest::test_token_updater_updates_token",
"tests/auth_test.py::ClientFromLoginFlow::test_no_token_file_http",
"tests/auth_test.py::ClientFromLoginFlow::test_no_token_file_http_redirected_to_https",
"tests/auth_test.py::ClientFromLoginFlow::test_no_token_file_https"
]
| [
"tests/auth_test.py::ClientFromTokenFileTest::test_api_key_is_normalized",
"tests/auth_test.py::ClientFromTokenFileTest::test_no_such_file",
"tests/auth_test.py::ClientFromTokenFileTest::test_pickle_loads",
"tests/auth_test.py::ClientFromAccessFunctionsTest::test_success_no_write_func",
"tests/auth_test.py::ClientFromAccessFunctionsTest::test_success_with_write_func",
"tests/auth_test.py::ClientFromLoginFlow::test_normalize_api_key",
"tests/auth_test.py::ClientFromLoginFlow::test_unexpected_redirect_url",
"tests/auth_test.py::EasyClientTest::test_no_token_file_no_wd_func",
"tests/auth_test.py::EasyClientTest::test_no_token_file_with_wd_func",
"tests/auth_test.py::EasyClientTest::test_token_file"
]
| {
"failed_lite_validators": [
"has_short_problem_statement",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
} | 2020-09-11 22:59:30+00:00 | mit | 1,027 |
|
alexgolec__tda-api-129 | diff --git a/.github/workflows/python.yml b/.github/workflows/python.yml
index 2820fe4..f482edc 100644
--- a/.github/workflows/python.yml
+++ b/.github/workflows/python.yml
@@ -22,7 +22,7 @@ jobs:
steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python.version }}
- uses: actions/[email protected]
+ uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python.version }}
@@ -57,7 +57,7 @@ jobs:
steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python.version }}
- uses: actions/[email protected]
+ uses: actions/setup-python@v2
with:
python-version: 3.8
- name: Install dependencies
diff --git a/tda/auth.py b/tda/auth.py
index 5187f14..4ec47c9 100644
--- a/tda/auth.py
+++ b/tda/auth.py
@@ -263,9 +263,8 @@ def client_from_access_functions(api_key, token_read_func,
api_key = __normalize_api_key(api_key)
session_kwargs = {
- 'token': token,
- 'auto_refresh_url': 'https://api.tdameritrade.com/v1/oauth2/token',
- 'auto_refresh_kwargs': {'client_id': api_key},
+ 'token': token,
+ 'token_endpoint': 'https://api.tdameritrade.com/v1/oauth2/token',
}
if token_write_func is not None:
| alexgolec/tda-api | fb7c7e9ee3205bc2f0cbedf2def89707afb78d56 | diff --git a/tests/test_auth.py b/tests/test_auth.py
index a39b05a..be6ea4b 100644
--- a/tests/test_auth.py
+++ b/tests/test_auth.py
@@ -46,8 +46,7 @@ class ClientFromTokenFileTest(unittest.TestCase):
session.assert_called_once_with(
API_KEY,
token=self.token,
- auto_refresh_url=_,
- auto_refresh_kwargs=_,
+ token_endpoint=_,
update_token=_)
@no_duplicates
@@ -64,8 +63,7 @@ class ClientFromTokenFileTest(unittest.TestCase):
session.assert_called_once_with(
API_KEY,
token=self.token,
- auto_refresh_url=_,
- auto_refresh_kwargs=_,
+ token_endpoint=_,
update_token=_)
@no_duplicates
@@ -100,8 +98,7 @@ class ClientFromTokenFileTest(unittest.TestCase):
session.assert_called_once_with(
'[email protected]',
token=self.token,
- auto_refresh_url=_,
- auto_refresh_kwargs=_,
+ token_endpoint=_,
update_token=_)
@@ -128,8 +125,7 @@ class ClientFromAccessFunctionsTest(unittest.TestCase):
session.assert_called_once_with(
'[email protected]',
token=token,
- auto_refresh_url=_,
- auto_refresh_kwargs=_,
+ token_endpoint=_,
update_token=_)
token_read_func.assert_called_once()
@@ -159,8 +155,7 @@ class ClientFromAccessFunctionsTest(unittest.TestCase):
session.assert_called_once_with(
'[email protected]',
token=token,
- auto_refresh_url=_,
- auto_refresh_kwargs=_)
+ token_endpoint=_)
token_read_func.assert_called_once()
| Auto token-refresh not working any more
**Description of Bug**
The login flow that's supposed to do an auto-refresh of the token loaded from disk doesn't work any more. I traced it to the parameter passing in `client_from_access_functions` to the `authlib` functions. `authlib`'s `OAuth2Client` expects the URL in `kwargs` with key `token_endpoint`, but instead `auto_refresh_url` key is passed.
This issue was introduced when moving from `request_oauthlib` to `authlib` around the bigger changes to support async operation, commit f667554248f17ee990691b81ba0e76a868cbd606, #99. `authlib` expects different parameters than the prior library.
**Code to Reproduce**
(Note: I ran this in a Colab notebook.)
```python
!pip install git+https://github.com/alexgolec/tda-api.git@master
import tda
import json
token_path = "/content/drive/MyDrive/Colab Notebooks/tdameritrade_tokens.json"
api_key_path = "/content/drive/MyDrive/Colab Notebooks/tdameritrade_api_key.key"
redirect_uri = "https://127.0.0.1"
with open(api_key_path) as f:
api_key = f.read()
client = tda.auth.client_from_token_file(token_path, api_key)
tsla_tick_data = client.get_price_history('TSLA',
period_type=tda.client.Client.PriceHistory.PeriodType.MONTH,
period=tda.client.Client.PriceHistory.Period.ONE_MONTH,
frequency_type=tda.client.Client.PriceHistory.FrequencyType.DAILY,
frequency=tda.client.Client.PriceHistory.Frequency.DAILY)
```
**Expected Behavior**
The call to `get_price_history` should succeed, and the token should have been refreshed.
**Actual Behavior**
The call to `get_price_history` throws the following exception:
```
---------------------------------------------------------------------------
InvalidTokenError Traceback (most recent call last)
<ipython-input-5-18c975b65f80> in <module>()
3 period=tda.client.Client.PriceHistory.Period.ONE_MONTH,
4 frequency_type=tda.client.Client.PriceHistory.FrequencyType.DAILY,
----> 5 frequency=tda.client.Client.PriceHistory.Frequency.DAILY)
4 frames
/usr/local/lib/python3.6/dist-packages/authlib/integrations/httpx_client/oauth2_client.py in ensure_active_token(self)
212 self.update_token(token, access_token=access_token)
213 else:
--> 214 raise InvalidTokenError()
InvalidTokenError: token_invalid:
```
**Error/Exception Log, If Applicable**
(Note: When trying to capture the debug log per the instructions at https://tda-api.readthedocs.io/en/latest/help.html, I stumbled upon an issue with `enable_bug_report_logging` for which I've submitted #126 and relatedly #127.)
```
DEBUG:tda.debug:tda-api version 0.7.1
INFO:tda.auth:Loading token from file /content/drive/MyDrive/Colab Notebooks/tdameritrade_tokens.json
INFO:tda.auth:Appending @AMER.OAUTHAP to API key
DEBUG:tda.client.base:Req 1: GET to https://api.tdameritrade.com/v1/marketdata/TSLA/pricehistory, params={
"apikey": "<redacted>@AMER.OAUTHAP",
"periodType": "month",
"period": 1,
"frequencyType": "daily",
"frequency": 1
}
```
| 0.0 | fb7c7e9ee3205bc2f0cbedf2def89707afb78d56 | [
"tests/test_auth.py::ClientFromTokenFileTest::test_api_key_is_normalized",
"tests/test_auth.py::ClientFromTokenFileTest::test_json_loads",
"tests/test_auth.py::ClientFromTokenFileTest::test_pickle_loads",
"tests/test_auth.py::ClientFromAccessFunctionsTest::test_success_no_write_func",
"tests/test_auth.py::ClientFromAccessFunctionsTest::test_success_with_write_func"
]
| [
"tests/test_auth.py::ClientFromTokenFileTest::test_no_such_file",
"tests/test_auth.py::ClientFromTokenFileTest::test_update_token_updates_token",
"tests/test_auth.py::ClientFromLoginFlow::test_no_token_file_http",
"tests/test_auth.py::ClientFromLoginFlow::test_no_token_file_http_redirected_to_https",
"tests/test_auth.py::ClientFromLoginFlow::test_no_token_file_https",
"tests/test_auth.py::ClientFromLoginFlow::test_normalize_api_key",
"tests/test_auth.py::ClientFromLoginFlow::test_unexpected_redirect_url",
"tests/test_auth.py::EasyClientTest::test_no_token_file_no_wd_func",
"tests/test_auth.py::EasyClientTest::test_no_token_file_with_wd_func",
"tests/test_auth.py::EasyClientTest::test_token_file"
]
| {
"failed_lite_validators": [
"has_hyperlinks",
"has_issue_reference",
"has_git_commit_hash",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
} | 2021-01-02 07:55:35+00:00 | mit | 1,028 |
|
alexgolec__tda-api-37 | diff --git a/tda/auth.py b/tda/auth.py
index d9c7976..105dadb 100644
--- a/tda/auth.py
+++ b/tda/auth.py
@@ -58,6 +58,7 @@ def client_from_token_file(token_path, api_key):
__register_token_redactions(token)
# Return a new session configured to refresh credentials
+ api_key = __normalize_api_key(api_key)
return Client(
api_key,
OAuth2Session(api_key, token=token,
@@ -89,6 +90,8 @@ def client_from_login_flow(webdriver, api_key, redirect_url, token_path,
get_logger().info(('Creating new token with redirect URL \'{}\' ' +
'and token path \'{}\'').format(redirect_url, token_path))
+ api_key = __normalize_api_key(api_key)
+
oauth = OAuth2Session(api_key, redirect_uri=redirect_url)
authorization_url, state = oauth.authorization_url(
'https://auth.tdameritrade.com/auth')
@@ -155,7 +158,6 @@ def easy_client(api_key, redirect_uri, token_path, webdriver_func=None):
a new token. Will only be called if the token file
cannot be found.
'''
- api_key = __normalize_api_key(api_key)
logger = get_logger()
try:
| alexgolec/tda-api | c9a9a9e0ff215aec66af2a622170e0aad8558a5f | diff --git a/tests/auth_test.py b/tests/auth_test.py
index d9457ed..ebf9d43 100644
--- a/tests/auth_test.py
+++ b/tests/auth_test.py
@@ -9,7 +9,7 @@ import tempfile
import unittest
-API_KEY = 'APIKEY'
+API_KEY = '[email protected]'
class ClientFromTokenFileTest(unittest.TestCase):
@@ -46,6 +46,24 @@ class ClientFromTokenFileTest(unittest.TestCase):
auto_refresh_kwargs=_,
token_updater=_)
+ @no_duplicates
+ @patch('tda.auth.Client')
+ @patch('tda.auth.OAuth2Session')
+ def test_api_key_is_normalized(self, session, client):
+ self.write_token()
+
+ client.return_value = 'returned client'
+
+ self.assertEqual('returned client',
+ auth.client_from_token_file(self.pickle_path, 'API_KEY'))
+ client.assert_called_once_with('[email protected]', _)
+ session.assert_called_once_with(
+ '[email protected]',
+ token=self.token,
+ auto_refresh_url=_,
+ auto_refresh_kwargs=_,
+ token_updater=_)
+
REDIRECT_URL = 'https://redirect.url.com'
@@ -82,6 +100,32 @@ class ClientFromLoginFlow(unittest.TestCase):
with open(self.pickle_path, 'rb') as f:
self.assertEqual(self.token, pickle.load(f))
+ @no_duplicates
+ @patch('tda.auth.Client')
+ @patch('tda.auth.OAuth2Session')
+ def test_normalize_api_key(self, session_constructor, client):
+ AUTH_URL = 'https://auth.url.com'
+
+ session = MagicMock()
+ session_constructor.return_value = session
+ session.authorization_url.return_value = AUTH_URL, None
+ session.fetch_token.return_value = self.token
+
+ webdriver = MagicMock()
+ webdriver.get.return_value = REDIRECT_URL + '/token_params'
+
+ client.return_value = 'returned client'
+
+ self.assertEqual('returned client',
+ auth.client_from_login_flow(
+ webdriver, 'API_KEY', REDIRECT_URL,
+ self.pickle_path,
+ redirect_wait_time_seconds=0.0))
+
+ self.assertEqual(
+ '[email protected]',
+ session_constructor.call_args[0][0])
+
class EasyClientTest(unittest.TestCase):
| authentication failure
Hey
I've created an app on TD Ameritrade with a callback URL `https://127.0.0.1:8080` (as [suggested](https://developer.tdameritrade.com/content/authentication-faq) on developer pages). I am making sure I am sending the correct API_KEY and redirect_url to the authentication flow
```
with webdriver.Chrome() as driver:
c = auth.client_from_login_flow(driver, apikey, redirect_uri, token_path)
```
However, I am getting the error page `a third party application` [you document](https://tda-api.readthedocs.io/en/latest/auth.html).
Logging shows the port on the localhost being sent with the GET is different however
```
2020-06-08 16:48:47,541 DEBUG GET http://127.0.0.1:57214/session/<SESSION>/url {}
2020-06-08 16:48:47,544 DEBUG http://127.0.0.1:57214 "GET /session/<SESSION>/url HTTP/1.1" 200 189
2020-06-08 16:48:47,544 DEBUG Finished Request
``` | 0.0 | c9a9a9e0ff215aec66af2a622170e0aad8558a5f | [
"tests/auth_test.py::ClientFromTokenFileTest::test_api_key_is_normalized",
"tests/auth_test.py::ClientFromLoginFlow::test_normalize_api_key"
]
| [
"tests/auth_test.py::ClientFromTokenFileTest::test_file_exists",
"tests/auth_test.py::ClientFromTokenFileTest::test_no_such_file",
"tests/auth_test.py::ClientFromLoginFlow::test_no_token_file",
"tests/auth_test.py::EasyClientTest::test_no_token_file_no_wd_func",
"tests/auth_test.py::EasyClientTest::test_no_token_file_with_wd_func",
"tests/auth_test.py::EasyClientTest::test_token_file"
]
| {
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
} | 2020-06-21 14:47:06+00:00 | mit | 1,029 |
|
alexgolec__tda-api-38 | diff --git a/tda/client.py b/tda/client.py
index b1dbc79..31e82ec 100644
--- a/tda/client.py
+++ b/tda/client.py
@@ -437,6 +437,9 @@ class Client(EnumEnforcer):
projection = self.convert_enum(
projection, self.Instrument.Projection)
+ if isinstance(symbols, str):
+ symbols = [symbols]
+
params = {
'apikey': self.api_key,
'symbol': ','.join(symbols),
@@ -856,6 +859,9 @@ class Client(EnumEnforcer):
`Official documentation
<https://developer.tdameritrade.com/quotes/apis/get/marketdata/
quotes>`__.'''
+ if isinstance(symbols, str):
+ symbols = [symbols]
+
params = {
'apikey': self.api_key,
'symbol': ','.join(symbols)
@@ -956,6 +962,9 @@ class Client(EnumEnforcer):
`Official documentation
<https://developer.tdameritrade.com/user-principal/apis/get/
userprincipals/streamersubscriptionkeys-0>`__.'''
+ if isinstance(account_ids, int) or isinstance(account_ids, str):
+ account_ids = [account_ids]
+
params = {
'apikey': self.api_key,
'accountIds': ','.join(str(i) for i in account_ids)
diff --git a/tda/utils.py b/tda/utils.py
index 152ae10..55f7473 100644
--- a/tda/utils.py
+++ b/tda/utils.py
@@ -32,6 +32,9 @@ class EnumEnforcer:
if iterable is None:
return None
+ if isinstance(iterable, required_enum_type):
+ return [iterable.value]
+
values = []
for value in iterable:
if isinstance(value, required_enum_type):
| alexgolec/tda-api | eb0aecdb65d326c56727dd26be3848557cb069ee | diff --git a/tests/client_test.py b/tests/client_test.py
index 73f0fe6..842ad1f 100644
--- a/tests/client_test.py
+++ b/tests/client_test.py
@@ -210,6 +210,18 @@ class TestClient(unittest.TestCase):
'status': 'FILLED,EXPIRED'
})
+ @no_duplicates
+ @patch('tda.client.datetime.datetime', mockdatetime)
+ def test_get_orders_by_path_statuses_scalar(self):
+ self.client.get_orders_by_path(
+ ACCOUNT_ID, statuses=Client.Order.Status.FILLED)
+ self.mock_session.get.assert_called_once_with(
+ self.make_url('/v1/accounts/{accountId}/orders'), params={
+ 'fromEnteredTime': MIN_ISO,
+ 'toEnteredTime': NOW_DATETIME_ISO,
+ 'status': 'FILLED'
+ })
+
@no_duplicates
@patch('tda.client.datetime.datetime', mockdatetime)
def test_get_orders_by_path_statuses_unchecked(self):
@@ -313,6 +325,17 @@ class TestClient(unittest.TestCase):
'status': 'FILLED,EXPIRED'
})
+ @no_duplicates
+ @patch('tda.client.datetime.datetime', mockdatetime)
+ def test_get_orders_by_query_statuses_scalar(self):
+ self.client.get_orders_by_query(statuses=Client.Order.Status.FILLED)
+ self.mock_session.get.assert_called_once_with(
+ self.make_url('/v1/orders'), params={
+ 'fromEnteredTime': MIN_ISO,
+ 'toEnteredTime': NOW_DATETIME_ISO,
+ 'status': 'FILLED'
+ })
+
@no_duplicates
@patch('tda.client.datetime.datetime', mockdatetime)
def test_get_orders_by_query_statuses_unchecked(self):
@@ -467,6 +490,14 @@ class TestClient(unittest.TestCase):
self.make_url('/v1/accounts/{accountId}'),
params={'fields': 'positions,orders'})
+ @no_duplicates
+ def test_get_account_fields_scalar(self):
+ self.client.get_account(
+ ACCOUNT_ID, fields=Client.Account.Fields.POSITIONS)
+ self.mock_session.get.assert_called_once_with(
+ self.make_url('/v1/accounts/{accountId}'),
+ params={'fields': 'positions'})
+
@no_duplicates
def test_get_account_fields_unchecked(self):
self.client.set_enforce_enums(False)
@@ -492,6 +523,13 @@ class TestClient(unittest.TestCase):
self.make_url('/v1/accounts'),
params={'fields': 'positions,orders'})
+ @no_duplicates
+ def test_get_accounts_fields_scalar(self):
+ self.client.get_accounts(fields=Client.Account.Fields.POSITIONS)
+ self.mock_session.get.assert_called_once_with(
+ self.make_url('/v1/accounts'),
+ params={'fields': 'positions'})
+
@no_duplicates
def test_get_accounts_fields_unchecked(self):
self.client.set_enforce_enums(False)
@@ -512,6 +550,16 @@ class TestClient(unittest.TestCase):
'symbol': 'AAPL,MSFT',
'projection': 'fundamental'})
+ @no_duplicates
+ def test_search_instruments_one_instrument(self):
+ self.client.search_instruments(
+ 'AAPL', Client.Instrument.Projection.FUNDAMENTAL)
+ self.mock_session.get.assert_called_once_with(
+ self.make_url('/v1/instruments'), params={
+ 'apikey': API_KEY,
+ 'symbol': 'AAPL',
+ 'projection': 'fundamental'})
+
@no_duplicates
def test_search_instruments_unchecked(self):
self.client.set_enforce_enums(False)
@@ -550,6 +598,16 @@ class TestClient(unittest.TestCase):
'markets': 'EQUITY,BOND',
'date': NOW_DATE_ISO})
+ @no_duplicates
+ def test_get_hours_for_multiple_markets_single_market(self):
+ self.client.get_hours_for_multiple_markets(
+ Client.Markets.EQUITY, NOW_DATETIME)
+ self.mock_session.get.assert_called_once_with(
+ self.make_url('/v1/marketdata/hours'), params={
+ 'apikey': API_KEY,
+ 'markets': 'EQUITY',
+ 'date': NOW_DATE_ISO})
+
@no_duplicates
def test_get_hours_for_multiple_markets_date(self):
self.client.get_hours_for_multiple_markets([
@@ -568,8 +626,8 @@ class TestClient(unittest.TestCase):
Client.Markets.EQUITY,
Client.Markets.BOND], '2020-01-01')
self.assertEqual(str(cm.exception),
- "expected type in (datetime.date, datetime.datetime) for " +
- "date, got 'builtins.str'")
+ "expected type in (datetime.date, datetime.datetime) "
+ "for date, got 'builtins.str'")
@no_duplicates
def test_get_hours_for_multiple_markets_unchecked(self):
@@ -1020,6 +1078,14 @@ class TestClient(unittest.TestCase):
'apikey': API_KEY,
'symbol': 'AAPL,MSFT'})
+ @no_duplicates
+ def test_get_quotes_single_symbol(self):
+ self.client.get_quotes('AAPL')
+ self.mock_session.get.assert_called_once_with(
+ self.make_url('/v1/marketdata/quotes'), params={
+ 'apikey': API_KEY,
+ 'symbol': 'AAPL'})
+
# get_transaction
@no_duplicates
@@ -1156,6 +1222,15 @@ class TestClient(unittest.TestCase):
'apikey': API_KEY,
'accountIds': '1000,2000,3000'})
+ @no_duplicates
+ def test_get_streamer_subscription_keys_one_account_id(self):
+ self.client.get_streamer_subscription_keys(1000)
+ self.mock_session.get.assert_called_once_with(
+ self.make_url('/v1/userprincipals/streamersubscriptionkeys'),
+ params={
+ 'apikey': API_KEY,
+ 'accountIds': '1000'})
+
@no_duplicates
def test_get_streamer_subscription_keys_str(self):
self.client.get_streamer_subscription_keys(['1000', '2000', '3000'])
@@ -1185,6 +1260,15 @@ class TestClient(unittest.TestCase):
'apikey': API_KEY,
'fields': 'streamerSubscriptionKeys,preferences'})
+ @no_duplicates
+ def test_get_user_principals_one_field(self):
+ self.client.get_user_principals(
+ fields=Client.UserPrincipals.Fields.PREFERENCES)
+ self.mock_session.get.assert_called_once_with(
+ self.make_url('/v1/userprincipals'), params={
+ 'apikey': API_KEY,
+ 'fields': 'preferences'})
+
@no_duplicates
def test_get_user_principals_fields_unchecked(self):
self.client.set_enforce_enums(False)
| Functions expecting arrays should tolerate single scalar values
Provided they are of the right type. | 0.0 | eb0aecdb65d326c56727dd26be3848557cb069ee | [
"tests/client_test.py::TestClient::test_get_account_fields_scalar",
"tests/client_test.py::TestClient::test_get_accounts_fields_scalar",
"tests/client_test.py::TestClient::test_get_hours_for_multiple_markets_single_market",
"tests/client_test.py::TestClient::test_get_orders_by_path_statuses_scalar",
"tests/client_test.py::TestClient::test_get_orders_by_query_statuses_scalar",
"tests/client_test.py::TestClient::test_get_quotes_single_symbol",
"tests/client_test.py::TestClient::test_get_streamer_subscription_keys_one_account_id",
"tests/client_test.py::TestClient::test_get_user_principals_one_field",
"tests/client_test.py::TestClient::test_search_instruments_one_instrument"
]
| [
"tests/client_test.py::TestClient::test_cancel_order",
"tests/client_test.py::TestClient::test_cancel_order_str",
"tests/client_test.py::TestClient::test_create_saved_order",
"tests/client_test.py::TestClient::test_create_saved_order_str",
"tests/client_test.py::TestClient::test_create_watchlist",
"tests/client_test.py::TestClient::test_create_watchlist_str",
"tests/client_test.py::TestClient::test_delete_saved_order",
"tests/client_test.py::TestClient::test_delete_saved_order_str",
"tests/client_test.py::TestClient::test_delete_watchlist",
"tests/client_test.py::TestClient::test_delete_watchlist_str",
"tests/client_test.py::TestClient::test_get_account",
"tests/client_test.py::TestClient::test_get_account_fields",
"tests/client_test.py::TestClient::test_get_account_fields_unchecked",
"tests/client_test.py::TestClient::test_get_account_str",
"tests/client_test.py::TestClient::test_get_accounts",
"tests/client_test.py::TestClient::test_get_accounts_fields",
"tests/client_test.py::TestClient::test_get_accounts_fields_unchecked",
"tests/client_test.py::TestClient::test_get_hours_for_multiple_markets_date",
"tests/client_test.py::TestClient::test_get_hours_for_multiple_markets_datetime",
"tests/client_test.py::TestClient::test_get_hours_for_multiple_markets_str",
"tests/client_test.py::TestClient::test_get_hours_for_multiple_markets_unchecked",
"tests/client_test.py::TestClient::test_get_hours_for_single_market_date",
"tests/client_test.py::TestClient::test_get_hours_for_single_market_datetime",
"tests/client_test.py::TestClient::test_get_hours_for_single_market_str",
"tests/client_test.py::TestClient::test_get_hours_for_single_market_unchecked",
"tests/client_test.py::TestClient::test_get_instrument",
"tests/client_test.py::TestClient::test_get_instrument_cusip_must_be_string",
"tests/client_test.py::TestClient::test_get_movers",
"tests/client_test.py::TestClient::test_get_movers_unchecked",
"tests/client_test.py::TestClient::test_get_option_chain_contract_type",
"tests/client_test.py::TestClient::test_get_option_chain_contract_type_unchecked",
"tests/client_test.py::TestClient::test_get_option_chain_days_to_expiration",
"tests/client_test.py::TestClient::test_get_option_chain_exp_month",
"tests/client_test.py::TestClient::test_get_option_chain_exp_month_unchecked",
"tests/client_test.py::TestClient::test_get_option_chain_from_date_date",
"tests/client_test.py::TestClient::test_get_option_chain_from_date_datetime",
"tests/client_test.py::TestClient::test_get_option_chain_from_date_str",
"tests/client_test.py::TestClient::test_get_option_chain_include_quotes",
"tests/client_test.py::TestClient::test_get_option_chain_interest_rate",
"tests/client_test.py::TestClient::test_get_option_chain_interval",
"tests/client_test.py::TestClient::test_get_option_chain_option_type",
"tests/client_test.py::TestClient::test_get_option_chain_option_type_unchecked",
"tests/client_test.py::TestClient::test_get_option_chain_strategy",
"tests/client_test.py::TestClient::test_get_option_chain_strategy_unchecked",
"tests/client_test.py::TestClient::test_get_option_chain_strike",
"tests/client_test.py::TestClient::test_get_option_chain_strike_count",
"tests/client_test.py::TestClient::test_get_option_chain_strike_range",
"tests/client_test.py::TestClient::test_get_option_chain_strike_range_unchecked",
"tests/client_test.py::TestClient::test_get_option_chain_to_date_date",
"tests/client_test.py::TestClient::test_get_option_chain_to_date_datetime",
"tests/client_test.py::TestClient::test_get_option_chain_to_date_str",
"tests/client_test.py::TestClient::test_get_option_chain_underlying_price",
"tests/client_test.py::TestClient::test_get_option_chain_vanilla",
"tests/client_test.py::TestClient::test_get_option_chain_volatility",
"tests/client_test.py::TestClient::test_get_order",
"tests/client_test.py::TestClient::test_get_order_str",
"tests/client_test.py::TestClient::test_get_orders_by_path_from_entered_datetime",
"tests/client_test.py::TestClient::test_get_orders_by_path_from_not_datetime",
"tests/client_test.py::TestClient::test_get_orders_by_path_max_results",
"tests/client_test.py::TestClient::test_get_orders_by_path_status",
"tests/client_test.py::TestClient::test_get_orders_by_path_status_and_statuses",
"tests/client_test.py::TestClient::test_get_orders_by_path_status_unchecked",
"tests/client_test.py::TestClient::test_get_orders_by_path_statuses",
"tests/client_test.py::TestClient::test_get_orders_by_path_statuses_unchecked",
"tests/client_test.py::TestClient::test_get_orders_by_path_to_entered_datetime",
"tests/client_test.py::TestClient::test_get_orders_by_path_to_not_datetime",
"tests/client_test.py::TestClient::test_get_orders_by_path_vanilla",
"tests/client_test.py::TestClient::test_get_orders_by_path_vanilla_str",
"tests/client_test.py::TestClient::test_get_orders_by_query_from_entered_datetime",
"tests/client_test.py::TestClient::test_get_orders_by_query_max_results",
"tests/client_test.py::TestClient::test_get_orders_by_query_status",
"tests/client_test.py::TestClient::test_get_orders_by_query_status_and_statuses",
"tests/client_test.py::TestClient::test_get_orders_by_query_status_unchecked",
"tests/client_test.py::TestClient::test_get_orders_by_query_statuses",
"tests/client_test.py::TestClient::test_get_orders_by_query_statuses_unchecked",
"tests/client_test.py::TestClient::test_get_orders_by_query_to_entered_datetime",
"tests/client_test.py::TestClient::test_get_orders_by_query_vanilla",
"tests/client_test.py::TestClient::test_get_preferences",
"tests/client_test.py::TestClient::test_get_preferences_str",
"tests/client_test.py::TestClient::test_get_price_history_end_datetime",
"tests/client_test.py::TestClient::test_get_price_history_end_datetime_str",
"tests/client_test.py::TestClient::test_get_price_history_frequency",
"tests/client_test.py::TestClient::test_get_price_history_frequency_type",
"tests/client_test.py::TestClient::test_get_price_history_frequency_type_unchecked",
"tests/client_test.py::TestClient::test_get_price_history_frequency_unchecked",
"tests/client_test.py::TestClient::test_get_price_history_need_extended_hours_data",
"tests/client_test.py::TestClient::test_get_price_history_num_periods",
"tests/client_test.py::TestClient::test_get_price_history_num_periods_unchecked",
"tests/client_test.py::TestClient::test_get_price_history_period_type",
"tests/client_test.py::TestClient::test_get_price_history_period_type_unchecked",
"tests/client_test.py::TestClient::test_get_price_history_start_datetime",
"tests/client_test.py::TestClient::test_get_price_history_start_datetime_str",
"tests/client_test.py::TestClient::test_get_price_history_vanilla",
"tests/client_test.py::TestClient::test_get_quote",
"tests/client_test.py::TestClient::test_get_quotes",
"tests/client_test.py::TestClient::test_get_saved_order",
"tests/client_test.py::TestClient::test_get_saved_order_str",
"tests/client_test.py::TestClient::test_get_saved_orders_by_path",
"tests/client_test.py::TestClient::test_get_saved_orders_by_path_str",
"tests/client_test.py::TestClient::test_get_streamer_subscription_keys",
"tests/client_test.py::TestClient::test_get_streamer_subscription_keys_str",
"tests/client_test.py::TestClient::test_get_transaction",
"tests/client_test.py::TestClient::test_get_transaction_str",
"tests/client_test.py::TestClient::test_get_transactions",
"tests/client_test.py::TestClient::test_get_transactions_end_date",
"tests/client_test.py::TestClient::test_get_transactions_end_date_datetime",
"tests/client_test.py::TestClient::test_get_transactions_end_date_str",
"tests/client_test.py::TestClient::test_get_transactions_start_date_date",
"tests/client_test.py::TestClient::test_get_transactions_start_date_datetime",
"tests/client_test.py::TestClient::test_get_transactions_start_date_str",
"tests/client_test.py::TestClient::test_get_transactions_str",
"tests/client_test.py::TestClient::test_get_transactions_symbol",
"tests/client_test.py::TestClient::test_get_transactions_type",
"tests/client_test.py::TestClient::test_get_transactions_type_unchecked",
"tests/client_test.py::TestClient::test_get_user_principals_fields",
"tests/client_test.py::TestClient::test_get_user_principals_fields_unchecked",
"tests/client_test.py::TestClient::test_get_user_principals_vanilla",
"tests/client_test.py::TestClient::test_get_watchlist",
"tests/client_test.py::TestClient::test_get_watchlist_str",
"tests/client_test.py::TestClient::test_get_watchlists_for_multiple_accounts",
"tests/client_test.py::TestClient::test_get_watchlists_for_single_account",
"tests/client_test.py::TestClient::test_get_watchlists_for_single_account_str",
"tests/client_test.py::TestClient::test_place_order",
"tests/client_test.py::TestClient::test_place_order_str",
"tests/client_test.py::TestClient::test_replace_order",
"tests/client_test.py::TestClient::test_replace_order_str",
"tests/client_test.py::TestClient::test_replace_saved_order",
"tests/client_test.py::TestClient::test_replace_saved_order_str",
"tests/client_test.py::TestClient::test_replace_watchlist",
"tests/client_test.py::TestClient::test_replace_watchlist_str",
"tests/client_test.py::TestClient::test_search_instruments",
"tests/client_test.py::TestClient::test_search_instruments_unchecked",
"tests/client_test.py::TestClient::test_update_preferences",
"tests/client_test.py::TestClient::test_update_preferences_str",
"tests/client_test.py::TestClient::test_update_watchlist",
"tests/client_test.py::TestClient::test_update_watchlist_str"
]
| {
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2020-06-21 15:41:49+00:00 | mit | 1,030 |
|
alexhsamuel__ntab-11 | diff --git a/ntab/fmt.py b/ntab/fmt.py
new file mode 100644
index 0000000..7c047c4
--- /dev/null
+++ b/ntab/fmt.py
@@ -0,0 +1,22 @@
+from __future__ import absolute_import, division, print_function, unicode_literals
+
+import six
+
+from .lib.text import palide
+
+#-------------------------------------------------------------------------------
+
+def format_row(row, width=80, max_name_width=32):
+ """
+ @rtype
+ Generator of lines.
+ """
+ vals = row.__dict__
+ name_width = min(max_name_width, max( len(n) for n in vals ))
+ for name, val in six.iteritems(vals):
+ yield "{}: {}".format(
+ palide(name, name_width),
+ palide(str(val), width - name_width - 2)
+ )
+
+
diff --git a/ntab/html.py b/ntab/html.py
index 9ca4ccb..37c25a4 100644
--- a/ntab/html.py
+++ b/ntab/html.py
@@ -82,7 +82,7 @@ def _render(table, css_class="tab-table", max_rows=None):
yield "<thead>"
yield "<tr>"
for name, width in zip(names, widths):
- yield "<th>{}</th>".format(elide(name, max(width, 8)))
+ yield "<th>{}</th>".format(elide(name, max(width, 8), elide_pos=0.7))
yield "</tr>"
yield "</thead>"
yield "<tbody>"
diff --git a/ntab/lib/text.py b/ntab/lib/text.py
index 8ad3e4b..0812d61 100644
--- a/ntab/lib/text.py
+++ b/ntab/lib/text.py
@@ -27,14 +27,18 @@ def pad(string, length, pad=" ", pos=1.0):
if left > 0:
string = pad * (left // pad_len) + pad[: left % pad_len] + string
if right > 0:
- string = string + pad[-(right % pad_len) :] + pad * (right // pad_len)
+ string = (
+ string
+ + pad[pad_len - (right % pad_len) :]
+ + pad * (right // pad_len)
+ )
return string
_pad = pad
-def elide(string, length, ellipsis=u"\u2026", pos=0.7):
+def elide(string, length, ellipsis=u"\u2026", pos=1.0):
"""
Elides characters if necessary to fit `string` in `length` characters.
@@ -63,7 +67,7 @@ def elide(string, length, ellipsis=u"\u2026", pos=0.7):
def palide(string, length, ellipsis=u"\u2026", pad=" ", pad_pos=1.0,
- elide_pos=0.7):
+ elide_pos=1.0):
"""
A combination of `elide` and `pad`.
"""
diff --git a/ntab/tab.py b/ntab/tab.py
index 5b569b5..010dcb1 100644
--- a/ntab/tab.py
+++ b/ntab/tab.py
@@ -13,7 +13,7 @@ import numpy as np
import six
import sys
-from . import nplib
+from . import fmt, nplib
from .lib import *
#-------------------------------------------------------------------------------
@@ -181,11 +181,22 @@ class Row(object):
)
+ def __str__(self):
+ return "\n".join(fmt.format_row(self))
+
+
@property
- def __index__(self):
+ def __idx__(self):
return self.__idx
+ # FIXME: Potentially sketchy.
+ @property
+ def __dict__(self):
+ return odict(
+ (n, a[self.__idx]) for n, a in six.iteritems(self.__arrs) )
+
+
class RowsProxy(collections.Sequence):
# FIXME: Allow modifying values in rows (i.e. mutable rows)?
| alexhsamuel/ntab | e28de6ee1a9bf991225bfbeac40f5852dbecab36 | diff --git a/ntab/lib/test/test_text.py b/ntab/lib/test/test_text.py
new file mode 100644
index 0000000..aff5323
--- /dev/null
+++ b/ntab/lib/test/test_text.py
@@ -0,0 +1,120 @@
+from __future__ import absolute_import, division, print_function, unicode_literals
+
+import pytest
+
+from ntab.lib.text import *
+
+#-------------------------------------------------------------------------------
+
+def test_pad_length():
+ assert pad("hello", 0) == "hello"
+ assert pad("hello", 4) == "hello"
+ assert pad("hello", 5) == "hello"
+ assert pad("hello", 6) == "hello "
+ assert pad("hello", 10) == "hello "
+ assert pad("hello", length=10) == "hello "
+
+ assert pad("", 0) == ""
+ assert pad("", 5) == " "
+
+
+def test_pad_pad():
+ assert pad("hello", 4, "x") == "hello"
+ assert pad("hello", 6, "x") == "hellox"
+ assert pad("hello", 9, "x") == "helloxxxx"
+ assert pad("hello", 8, "o") == "helloooo"
+ assert pad("hello", 8, "-") == "hello---"
+ assert pad("hello", pad="-", length=8) == "hello---"
+ assert pad("hello", 8, "-=") == "hello=-="
+ assert pad("hello", 12, ".!.") == "hello..!..!."
+
+
+def test_pad_left():
+ assert pad("hello", 4, pos=0 ) == "hello"
+ assert pad("hello", 10, pos=1 ) == "hello "
+ assert pad("hello", 10, pos=0 ) == " hello"
+ assert pad("hello", 10, pos=0, pad="/") == "/////hello"
+
+
+# FIXME: Test center().
+
+def test_elide_default():
+ assert elide("I am a duck.", 8) == u"I am a \u2026"
+ assert elide("I am a duck.", 14) == "I am a duck."
+
+
+def test_elide_length():
+ assert elide("Hello, world!", 15, "...") == "Hello, world!"
+ assert elide("Hello, world!", 13, "...") == "Hello, world!"
+ assert elide("Hello, world!", 12, "...") == "Hello, wo..."
+ assert elide("Hello, world!", 11, "...") == "Hello, w..."
+ assert elide("Hello, world!", 10, "...") == "Hello, ..."
+ assert elide("Hello, world!", 5, "...") == "He..."
+
+ assert elide("foo", 3, "...") == "foo"
+ assert elide("fool", 3, "...") == "..."
+
+
+def test_elide_ellipsis():
+ assert elide("Hello, world!", 10, "...") == "Hello, ..."
+ assert elide("Hello, world!", 10, ".." ) == "Hello, w.."
+ assert elide("Hello, world!", 10, "*" ) == "Hello, wo*"
+ assert elide("Hello, world!", 10, "" ) == "Hello, wor"
+
+ assert elide("Hello, world!", ellipsis="*", length=10) == "Hello, wo*"
+
+
+def test_elide_position():
+ assert elide("Hello, world!", 10, "...", 1.0) == "Hello, ..."
+ assert elide("Hello, world!", 10, "...", 0.7) == "Hello...d!"
+ assert elide("Hello, world!", 10, "...", 0.5) == "Hell...ld!"
+ assert elide("Hello, world!", 10, "...", 0.4) == "Hel...rld!"
+ assert elide("Hello, world!", 10, "...", 0.0) == "... world!"
+
+ assert elide(
+ "Hello, world!", pos=0.4, length=10, ellipsis="..") == "Hel..orld!"
+
+
+def test_palide_length():
+ assert palide("Hello, world!", 3, "...") == "..."
+ assert palide("Hello, world!", 10, "...") == "Hello, ..."
+ assert palide("Hello, world!", 11, "...") == "Hello, w..."
+ assert palide("Hello, world!", 13, "...") == "Hello, world!"
+ assert palide("Hello, world!", 14, "...") == "Hello, world! "
+ assert palide("Hello, world!", 20, "...") == "Hello, world! "
+
+
+def test_palide_ellipsis():
+ assert palide("Hello, world!", 10, "~~~~~") == "Hello~~~~~"
+ assert palide("Hello, world!", 10, "..." ) == "Hello, ..."
+ assert palide("Hello, world!", 10, ".." ) == "Hello, w.."
+ assert palide("Hello, world!", 10, "" ) == "Hello, wor"
+
+
+def test_palide_pad():
+ assert palide("Hello, world!", 13, pad="x") == "Hello, world!"
+ assert palide("Hello, world!", 18, pad="x") == "Hello, world!xxxxx"
+ assert palide("Hello, world!", 18, pad="!") == "Hello, world!!!!!!"
+
+
+def test_palide_position():
+ assert palide("Hello, world!", 11, "..", elide_pos=0.0) == "..o, world!"
+ assert palide("Hello, world!", 11, "..", elide_pos=0.6) == "Hello..rld!"
+ assert palide("Hello, world!", 11, "..", elide_pos=0.8) == "Hello, ..d!"
+
+
+def test_palide_args():
+ assert palide(
+ ellipsis="-//-",
+ length=20,
+ pad="x",
+ elide_pos=0.4,
+ string="The quick brown fox jumped over the lazy dogs.",
+ ) == "The qu-//-lazy dogs."
+
+
+def test_palide_default():
+ assert palide("I am a duck.", 8) == u"I am a \u2026"
+ assert palide("I am a duck.", 14) == "I am a duck. "
+
+
| pretty-print a row
Print a row with one field on each line.
| 0.0 | e28de6ee1a9bf991225bfbeac40f5852dbecab36 | [
"ntab/lib/test/test_text.py::test_pad_length",
"ntab/lib/test/test_text.py::test_pad_pad",
"ntab/lib/test/test_text.py::test_pad_left",
"ntab/lib/test/test_text.py::test_elide_default",
"ntab/lib/test/test_text.py::test_elide_length",
"ntab/lib/test/test_text.py::test_elide_ellipsis",
"ntab/lib/test/test_text.py::test_palide_length",
"ntab/lib/test/test_text.py::test_palide_ellipsis",
"ntab/lib/test/test_text.py::test_palide_pad",
"ntab/lib/test/test_text.py::test_palide_default"
]
| [
"ntab/lib/test/test_text.py::test_elide_position",
"ntab/lib/test/test_text.py::test_palide_position",
"ntab/lib/test/test_text.py::test_palide_args"
]
| {
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2017-06-22 15:08:41+00:00 | mit | 1,031 |
|
alexhsamuel__ntab-14 | diff --git a/ntab/tab.py b/ntab/tab.py
index 3d90a41..d259d13 100644
--- a/ntab/tab.py
+++ b/ntab/tab.py
@@ -9,13 +9,48 @@ from __future__ import absolute_import, division, print_function, unicode_lite
from builtins import *
import collections
from collections import OrderedDict as odict
+import itertools
import numpy as np
+from past.builtins import basestring
import six
import sys
from . import fmt, nplib
from .lib import *
+#-------------------------------------------------------------------------------
+
+def _ensure_array(obj, length):
+ """
+ Ensures `obj` is an ndarray of shape `(length, )`, converting if necessary.
+ """
+ arr = None
+
+ if isinstance(obj, np.ndarray):
+ arr = obj
+
+ if arr is None and not isinstance(obj, basestring):
+ # Convert sequences to arrays.
+ try:
+ len(obj)
+ except:
+ pass
+ else:
+ arr = np.array(obj)
+
+ # Convert scalars to full arrays.
+ if arr is None:
+ # FIXME: Newer numpy doesn't require explicit dtype
+ dtype = np.array(obj).dtype
+ arr = np.full(length, obj, dtype)
+
+ if len(arr.shape) != 1:
+ raise ValueError("not one-dimensional")
+ if length is not None and arr.shape != (length, ):
+ raise ValueError("wrong length")
+ return arr
+
+
#-------------------------------------------------------------------------------
class ArraysObjectProxy(object):
@@ -269,9 +304,9 @@ class Table(object):
(n, a[sel]) for n, a in self.__arrs.items() )
- def __construct(self, arrs):
+ def __construct(self, length, arrs):
+ self.__length = length
self.__arrs = arrs
- self.__length = None if len(arrs) == 0 else len(a_value(arrs))
# Proxies.
# FIXME: Create lazily?
self.a = ArraysObjectProxy(self)
@@ -308,10 +343,26 @@ class Table(object):
be one-dimensional and the same length.
"""
arrs = odict(*args, **kw_args)
+
+ # Get the length.
+ length = None
+ for arr in six.itervalues(arrs):
+ try:
+ length = len(arr)
+ except TypeError:
+ pass
+ else:
+ break
+ if length is None and len(arrs) > 0:
+ raise ValueError("no arrs have length")
+
# Make sure the arrays are all arrays.
- arrs = odict( (str(n), np.array(a)) for n, a in six.iteritems(arrs) )
+ arrs = odict(
+ (str(n), _ensure_array(a, length))
+ for n, a in six.iteritems(arrs)
+ )
- self.__construct(arrs)
+ self.__construct(length, arrs)
self.__check(self.__arrs)
@@ -332,6 +383,7 @@ class Table(object):
# Construct an instance without calling __init__().
self = object.__new__(class_)
+ length = None if len(arrs) == 0 else len(a_value(arrs))
self.__construct(arrs)
if check:
self.__check(self.__arrs)
@@ -377,11 +429,16 @@ class Table(object):
#---------------------------------------------------------------------------
# Mutators
- # FIXME: Make immutable?
def add(self, *args, **kw_args):
+ """
+ Adds or replaces a column.
+ """
arrs = odict(*args, **kw_args)
- arrs = odict( (str(n), np.array(a)) for n, a in six.iteritems(arrs) )
+ arrs = odict(
+ (str(n), _ensure_array(a, self.__length))
+ for n, a in six.iteritems(arrs)
+ )
if len(arrs) == 0:
# Nothing to do.
| alexhsamuel/ntab | 0cec2c6fdb3e841d13e1e5bc8246bba083f216e0 | diff --git a/ntab/test/test_basic.py b/ntab/test/test_basic.py
index 44e7468..6259508 100644
--- a/ntab/test/test_basic.py
+++ b/ntab/test/test_basic.py
@@ -81,3 +81,18 @@ def test_empty_arrs():
assert tab.num_rows == 0
+def test_tab_create_scalar():
+ tab = Table(i=2, x=[3, 4, 5], l="foo")
+ assert tab.num_rows == 3
+ assert list(tab.a.i) == [2, 2, 2]
+ assert list(tab.a.l) == ["foo", "foo", "foo"]
+
+
+def test_tab_add_col_scalar():
+ tab = Table(x=[3, 4, 5])
+ tab.a.i = 2
+ tab.a.l = "foo"
+ assert list(tab.a.i) == [2, 2, 2]
+ assert list(tab.a.l) == ["foo", "foo", "foo"]
+
+
| assign constant to array
As a convenience, allow,
```py
tbl.a.foo = 42
```
for either an existing or new array.
| 0.0 | 0cec2c6fdb3e841d13e1e5bc8246bba083f216e0 | [
"ntab/test/test_basic.py::test_tab_create_scalar",
"ntab/test/test_basic.py::test_tab_add_col_scalar"
]
| [
"ntab/test/test_basic.py::test_init_dict",
"ntab/test/test_basic.py::test_init_empty_dict",
"ntab/test/test_basic.py::test_init_odict",
"ntab/test/test_basic.py::test_init_odict_empty",
"ntab/test/test_basic.py::test_init_kw_args",
"ntab/test/test_basic.py::test_init_items",
"ntab/test/test_basic.py::test_init_bad_length",
"ntab/test/test_basic.py::test_empty_arrs"
]
| {
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2017-06-22 16:34:53+00:00 | mit | 1,032 |
|
alexis-mignon__python-flickr-api-96 | diff --git a/flickr_api/upload.py b/flickr_api/upload.py
index 1595b7f..65de043 100644
--- a/flickr_api/upload.py
+++ b/flickr_api/upload.py
@@ -58,7 +58,7 @@ def post(url, auth_handler, args, photo_file, photo_file_data=None):
data = resp.content
if resp.status_code != 200:
- raise FlickrError("HTTP Error %i: %s" % (r.status, data))
+ raise FlickrError("HTTP Error %i: %s" % (resp.status_code, resp.text))
r = ET.fromstring(data)
if r.get("stat") != 'ok':
| alexis-mignon/python-flickr-api | 6d7e5f429ed252a77a0267ce3d248973f0f901b6 | diff --git a/test/test_upload.py b/test/test_upload.py
new file mode 100644
index 0000000..90dffba
--- /dev/null
+++ b/test/test_upload.py
@@ -0,0 +1,37 @@
+import unittest
+from unittest.mock import MagicMock
+
+from flickr_api import upload
+from flickr_api.auth import AuthHandler
+from flickr_api.flickrerrors import FlickrError
+
+from requests import Response
+
+from io import StringIO
+from io import BytesIO
+
+import inspect
+
+class TestUpload(unittest.TestCase):
+ def test_upload_not_200(self):
+ from flickr_api import set_auth_handler
+ auth_handler = AuthHandler(key="test", secret="test",
+ access_token_key="test",
+ access_token_secret="test")
+ set_auth_handler(auth_handler)
+ args = dict(
+ photo_file = '/tmp/test_file',
+ photo_file_data = StringIO("000000")
+ )
+
+ module = inspect.getmodule(upload)
+ resp = Response()
+ resp.status_code = 404
+ resp.raw = BytesIO("Not Found".encode("utf-8"))
+ module.requests.post = MagicMock(return_value=resp)
+
+ with self.assertRaises(FlickrError) as context:
+ upload(**args)
+
+ print(context.exception)
+ self.assertEquals("HTTP Error 404: Not Found", str(context.exception))
\ No newline at end of file
| Exception UnboundLocalError if server response not 200 when uploading photo
```
Traceback (most recent call last):
File "/home/w/projects/panoedit/flickr_upload.py", line 60, in main
path = os.path.join(conf.photos_dir, file_name)
File "/home/w/projects/panoedit/google_venv/local/lib/python2.7/site-packages/flickr_api/upload.py", line 107, in upload
r = post(UPLOAD_URL, auth.AUTH_HANDLER, args, photo_file, photo_file_data)
File "/home/w/projects/panoedit/google_venv/local/lib/python2.7/site-packages/flickr_api/upload.py", line 61, in post
raise FlickrError("HTTP Error %i: %s" % (r.status, data))
UnboundLocalError: local variable 'r' referenced before assignment
``` | 0.0 | 6d7e5f429ed252a77a0267ce3d248973f0f901b6 | [
"test/test_upload.py::TestUpload::test_upload_not_200"
]
| []
| {
"failed_lite_validators": [
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
} | 2018-06-14 16:28:03+00:00 | bsd-3-clause | 1,033 |
|
alexmojaki__pure_eval-12 | diff --git a/.travis.yml b/.travis.yml
deleted file mode 100644
index fb89d46..0000000
--- a/.travis.yml
+++ /dev/null
@@ -1,32 +0,0 @@
-dist: xenial
-language: python
-sudo: false
-
-python:
- - 3.5
- - 3.6
- - 3.7
- - 3.8-dev
- - 3.9-dev
-
-env:
- global:
- - PURE_EVAL_SLOW_TESTS=1
- - COVERALLS_PARALLEL=true
-
-before_install:
- - pip install --upgrade coveralls setuptools>=44 setuptools_scm>=3.4.3 pep517
-
-install:
- - pip install ".[tests]"
-
-script:
- - coverage run --branch --include='pure_eval/*' -m pytest --junitxml=./rspec.xml
- - coverage report -m
-
-after_success:
- - coveralls
-
-notifications:
- webhooks: https://coveralls.io/webhook
- email: false
diff --git a/MANIFEST.in b/MANIFEST.in
index 800dfd8..09204c8 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,2 +1,3 @@
include LICENSE.txt
include pure_eval/py.typed
+include README.md
diff --git a/make_release.sh b/make_release.sh
index f9b8308..f0c1ac8 100755
--- a/make_release.sh
+++ b/make_release.sh
@@ -26,5 +26,5 @@ export TAG="v${1}"
git tag "${TAG}"
git push origin master "${TAG}"
rm -rf ./build ./dist
-python3 -m pep517.build -bs .
+python -m build --sdist --wheel .
twine upload ./dist/*.whl dist/*.tar.gz
diff --git a/pure_eval/core.py b/pure_eval/core.py
index 0a0381e..748f051 100644
--- a/pure_eval/core.py
+++ b/pure_eval/core.py
@@ -15,6 +15,7 @@ from pure_eval.utils import (
of_standard_types,
is_any,
of_type,
+ ensure_dict,
)
@@ -39,9 +40,9 @@ class Evaluator:
"""
return cls(ChainMap(
- frame.f_locals,
- frame.f_globals,
- frame.f_builtins,
+ ensure_dict(frame.f_locals),
+ ensure_dict(frame.f_globals),
+ ensure_dict(frame.f_builtins),
))
def __getitem__(self, node: ast.expr) -> Any:
diff --git a/pure_eval/utils.py b/pure_eval/utils.py
index 139d6dd..a8a3730 100644
--- a/pure_eval/utils.py
+++ b/pure_eval/utils.py
@@ -189,3 +189,13 @@ def copy_ast_without_context(x):
return list(map(copy_ast_without_context, x))
else:
return x
+
+
+def ensure_dict(x):
+ """
+ Handles invalid non-dict inputs
+ """
+ try:
+ return dict(x)
+ except Exception:
+ return {}
diff --git a/setup.cfg b/setup.cfg
index 330cb29..3d07ca9 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -14,6 +14,7 @@ classifiers =
Programming Language :: Python :: 3.7
Programming Language :: Python :: 3.8
Programming Language :: Python :: 3.9
+ Programming Language :: Python :: 3.10
License :: OSI Approved :: MIT License
Operating System :: OS Independent
@@ -22,7 +23,7 @@ packages = pure_eval
install_requires =
include_package_data = True
tests_require = pytest
-setup_requires = setuptools>=44; wheel; setuptools_scm[toml]>=3.4.3
+setup_requires = setuptools>=44; setuptools_scm[toml]>=3.4.3
[options.extras_require]
tests = pytest
diff --git a/tox.ini b/tox.ini
index aa83fa0..3feff03 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,5 +1,5 @@
[tox]
-envlist = py{35,36,37,38,39}
+envlist = py{35,36,37,38,39,310}
[testenv]
commands = pytest
| alexmojaki/pure_eval | b5e1617805fbb1e77101de1ad372d2a0d58053ce | diff --git a/.github/workflows/pytest.yml b/.github/workflows/pytest.yml
new file mode 100644
index 0000000..7f68be5
--- /dev/null
+++ b/.github/workflows/pytest.yml
@@ -0,0 +1,36 @@
+name: Tests
+on: [push, pull_request]
+jobs:
+ build:
+ runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ python-version: [3.7, 3.8, 3.9, 3.10-dev]
+ steps:
+ - uses: actions/checkout@v2
+ - name: Set up Python ${{ matrix.python-version }}
+ uses: actions/setup-python@v2
+ with:
+ python-version: ${{ matrix.python-version }}
+ - name: run tests
+ env:
+ PURE_EVAL_SLOW_TESTS: 1
+ run: |
+ pip install -U pip
+ pip install --upgrade coveralls setuptools setuptools_scm pep517
+ pip install .[tests]
+ coverage run --source pure_eval -m pytest
+ coverage report -m
+ - name: Coveralls Python
+ uses: AndreMiras/coveralls-python-action@v20201129
+ with:
+ parallel: true
+ flag-name: test-${{ matrix.python-version }}
+ coveralls_finish:
+ needs: build
+ runs-on: ubuntu-latest
+ steps:
+ - name: Coveralls Finished
+ uses: AndreMiras/coveralls-python-action@v20201129
+ with:
+ parallel-finished: true
diff --git a/tests/test_utils.py b/tests/test_utils.py
index 315ecc5..172f50e 100644
--- a/tests/test_utils.py
+++ b/tests/test_utils.py
@@ -17,6 +17,7 @@ from pure_eval.utils import (
safe_name,
typing_annotation_samples,
is_standard_types,
+ ensure_dict,
)
@@ -126,3 +127,10 @@ def test_is_standard_types():
assert is_standard_types(lst, deep=False, check_dict_values=True)
assert is_standard_types(lst[0], deep=True, check_dict_values=True)
assert not is_standard_types(lst, deep=True, check_dict_values=True)
+
+
+def test_ensure_dict():
+ assert ensure_dict({}) == {}
+ assert ensure_dict([]) == {}
+ assert ensure_dict('foo') == {}
+ assert ensure_dict({'a': 1}) == {'a': 1}
| TypeError for malformed metaclass example
In https://github.com/ipython/ipython/issues/13481, the following example used to show a fatal error in IPython:
```python
class X(type):
def __prepare__(cls, *args, **kwargs):
return []
class Y(metaclass=X):
pass
```
If I try the same example with friendly-traceback, I also get a fatal error, with the following as part of a long traceback:
```
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "LOCAL:\pure_eval\core.py", line 445, in group_expressions
for node, value in expressions:
File "FRIENDLY:\info_variables.py", line 119, in <genexpr>
for nodes, obj in group_expressions(
File "LOCAL:\pure_eval\core.py", line 358, in find_expressions
value = self[node]
File "LOCAL:\pure_eval\core.py", line 68, in __getitem__
self._cache[node] = result = self._handle(node)
File "LOCAL:\pure_eval\core.py", line 89, in _handle
return self.names[node.id]
TypeError: list indices must be integers or slices, not str
```
In https://github.com/friendly-traceback/friendly-traceback/commit/276ec1b85f7c5949b0e5d1fb325b30b59b57d9c5, I've guarded against this type of fatal error.
I didn't see any evidence that the IPython crash is caused by pure_eval or an other library of yours, but I thought you might want to know about it - and possibly include some safeguards in pure_eval.
| 0.0 | b5e1617805fbb1e77101de1ad372d2a0d58053ce | [
"tests/test_utils.py::test_sys_modules",
"tests/test_utils.py::test_repr_cannot_eval",
"tests/test_utils.py::test_safe_name_types",
"tests/test_utils.py::test_safe_name_samples",
"tests/test_utils.py::test_safe_name_direct",
"tests/test_utils.py::test_is_standard_types",
"tests/test_utils.py::test_ensure_dict"
]
| []
| {
"failed_lite_validators": [
"has_hyperlinks",
"has_removed_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2022-01-22 15:31:07+00:00 | mit | 1,034 |
|
alexmojaki__stack_data-53 | diff --git a/stack_data/utils.py b/stack_data/utils.py
index 78ce2d6..ad8cd38 100644
--- a/stack_data/utils.py
+++ b/stack_data/utils.py
@@ -92,12 +92,13 @@ def is_frame(frame_or_tb: Union[FrameType, TracebackType]) -> bool:
def iter_stack(frame_or_tb: Union[FrameType, TracebackType]) -> Iterator[Union[FrameType, TracebackType]]:
- while frame_or_tb:
- yield frame_or_tb
- if is_frame(frame_or_tb):
- frame_or_tb = frame_or_tb.f_back
+ current: Union[FrameType, TracebackType, None] = frame_or_tb
+ while current:
+ yield current
+ if is_frame(current):
+ current = current.f_back
else:
- frame_or_tb = frame_or_tb.tb_next
+ current = current.tb_next
def frame_and_lineno(frame_or_tb: Union[FrameType, TracebackType]) -> Tuple[FrameType, int]:
diff --git a/tox.ini b/tox.ini
index b613d58..84e4ccb 100644
--- a/tox.ini
+++ b/tox.ini
@@ -6,3 +6,4 @@ commands = pytest {posargs}
extras = tests
passenv =
STACK_DATA_SLOW_TESTS
+ FIX_STACK_DATA_TESTS
| alexmojaki/stack_data | df3566c10c7c1cc69e25c32c35666593481aee31 | diff --git a/tests/__init__.py b/tests/__init__.py
index fe28111..e61d09f 100644
--- a/tests/__init__.py
+++ b/tests/__init__.py
@@ -1,7 +1,10 @@
import os
import pyximport
-from typeguard.importhook import install_import_hook
+try:
+ from typeguard import install_import_hook
+except ImportError:
+ from typeguard.importhook import install_import_hook
pyximport.install(language_level=3)
diff --git a/tests/golden_files/pygmented.txt b/tests/golden_files/pygmented.txt
index 920dabc..c82f142 100644
--- a/tests/golden_files/pygmented.txt
+++ b/tests/golden_files/pygmented.txt
@@ -1,11 +1,11 @@
Traceback (most recent call last):
File "formatter_example.py", line 21, in foo
- 9 | [38;5;15m[39m[38;5;15mx[39m[38;5;15m [39m[38;5;197m=[39m[38;5;15m [39m[38;5;141m1[39m
- 10 | [38;5;15m[39m[38;5;15mlst[39m[38;5;15m [39m[38;5;197m=[39m[38;5;15m [39m[38;5;15m([39m
+ 9 | [38;5;15m[39m[38;5;15mx[39m[38;5;15m [39m[38;5;204m=[39m[38;5;15m [39m[38;5;141m1[39m
+ 10 | [38;5;15m[39m[38;5;15mlst[39m[38;5;15m [39m[38;5;204m=[39m[38;5;15m [39m[38;5;15m([39m
11 | [38;5;15m [39m[38;5;15m[[39m
12 | [38;5;15m [39m[38;5;15mx[39m[38;5;15m,[39m
(...)
- 18 | [38;5;15m [39m[38;5;197m+[39m[38;5;15m [39m[38;5;15m[[39m[38;5;15m][39m
+ 18 | [38;5;15m [39m[38;5;204m+[39m[38;5;15m [39m[38;5;15m[[39m[38;5;15m][39m
19 | [38;5;15m[39m[38;5;15m)[39m
20 | [38;5;15m[39m[38;5;81mtry[39m[38;5;15m:[39m
--> 21 | [38;5;15m [39m[38;5;81mreturn[39m[38;5;15m [39m[38;5;15;48;5;24mint[39;49m[38;5;15;48;5;24m([39;49m[38;5;15;48;5;24mstr[39;49m[38;5;15;48;5;24m([39;49m[38;5;15;48;5;24mlst[39;49m[38;5;15;48;5;24m)[39;49m[38;5;15;48;5;24m)[39;49m
@@ -19,7 +19,7 @@ Traceback (most recent call last):
21 | [38;5;15m [39m[38;5;81mreturn[39m[38;5;15m [39m[38;5;15mint[39m[38;5;15m([39m[38;5;15mstr[39m[38;5;15m([39m[38;5;15mlst[39m[38;5;15m)[39m[38;5;15m)[39m
22 | [38;5;15m[39m[38;5;81mexcept[39m[38;5;15m:[39m
23 | [38;5;15m [39m[38;5;81mtry[39m[38;5;15m:[39m
---> 24 | [38;5;15m [39m[38;5;81mreturn[39m[38;5;15m [39m[38;5;141;48;5;24m1[39;49m[38;5;15;48;5;24m [39;49m[38;5;197;48;5;24m/[39;49m[38;5;15;48;5;24m [39;49m[38;5;141;48;5;24m0[39;49m
+--> 24 | [38;5;15m [39m[38;5;81mreturn[39m[38;5;15m [39m[38;5;141;48;5;24m1[39;49m[38;5;15;48;5;24m [39;49m[38;5;204;48;5;24m/[39;49m[38;5;15;48;5;24m [39;49m[38;5;141;48;5;24m0[39;49m
25 | [38;5;15m [39m[38;5;81mexcept[39m[38;5;15m [39m[38;5;148mException[39m[38;5;15m [39m[38;5;81mas[39m[38;5;15m [39m[38;5;15me[39m[38;5;15m:[39m
ZeroDivisionError: division by zero
@@ -31,24 +31,24 @@ Traceback (most recent call last):
--> 30 | [38;5;15m [39m[38;5;15;48;5;24mexec[39;49m[38;5;15;48;5;24m([39;49m[38;5;186;48;5;24m"[39;49m[38;5;186;48;5;24mfoo()[39;49m[38;5;186;48;5;24m"[39;49m[38;5;15;48;5;24m)[39;49m
File "<string>", line 1, in <module>
File "formatter_example.py", line 8, in foo
- 6 | [38;5;81mdef[39m[38;5;15m [39m[38;5;148mfoo[39m[38;5;15m([39m[38;5;15mn[39m[38;5;197m=[39m[38;5;141m5[39m[38;5;15m)[39m[38;5;15m:[39m
- 7 | [38;5;15m [39m[38;5;81mif[39m[38;5;15m [39m[38;5;15mn[39m[38;5;15m [39m[38;5;197m>[39m[38;5;15m [39m[38;5;141m0[39m[38;5;15m:[39m
---> 8 | [38;5;15m [39m[38;5;81mreturn[39m[38;5;15m [39m[38;5;15;48;5;24mfoo[39;49m[38;5;15;48;5;24m([39;49m[38;5;15;48;5;24mn[39;49m[38;5;15;48;5;24m [39;49m[38;5;197;48;5;24m-[39;49m[38;5;15;48;5;24m [39;49m[38;5;141;48;5;24m1[39;49m[38;5;15;48;5;24m)[39;49m
- 9 | [38;5;15m [39m[38;5;15mx[39m[38;5;15m [39m[38;5;197m=[39m[38;5;15m [39m[38;5;141m1[39m
+ 6 | [38;5;81mdef[39m[38;5;15m [39m[38;5;148mfoo[39m[38;5;15m([39m[38;5;15mn[39m[38;5;204m=[39m[38;5;141m5[39m[38;5;15m)[39m[38;5;15m:[39m
+ 7 | [38;5;15m [39m[38;5;81mif[39m[38;5;15m [39m[38;5;15mn[39m[38;5;15m [39m[38;5;204m>[39m[38;5;15m [39m[38;5;141m0[39m[38;5;15m:[39m
+--> 8 | [38;5;15m [39m[38;5;81mreturn[39m[38;5;15m [39m[38;5;15;48;5;24mfoo[39;49m[38;5;15;48;5;24m([39;49m[38;5;15;48;5;24mn[39;49m[38;5;15;48;5;24m [39;49m[38;5;204;48;5;24m-[39;49m[38;5;15;48;5;24m [39;49m[38;5;141;48;5;24m1[39;49m[38;5;15;48;5;24m)[39;49m
+ 9 | [38;5;15m [39m[38;5;15mx[39m[38;5;15m [39m[38;5;204m=[39m[38;5;15m [39m[38;5;141m1[39m
File "formatter_example.py", line 8, in foo
- 6 | [38;5;81mdef[39m[38;5;15m [39m[38;5;148mfoo[39m[38;5;15m([39m[38;5;15mn[39m[38;5;197m=[39m[38;5;141m5[39m[38;5;15m)[39m[38;5;15m:[39m
- 7 | [38;5;15m [39m[38;5;81mif[39m[38;5;15m [39m[38;5;15mn[39m[38;5;15m [39m[38;5;197m>[39m[38;5;15m [39m[38;5;141m0[39m[38;5;15m:[39m
---> 8 | [38;5;15m [39m[38;5;81mreturn[39m[38;5;15m [39m[38;5;15;48;5;24mfoo[39;49m[38;5;15;48;5;24m([39;49m[38;5;15;48;5;24mn[39;49m[38;5;15;48;5;24m [39;49m[38;5;197;48;5;24m-[39;49m[38;5;15;48;5;24m [39;49m[38;5;141;48;5;24m1[39;49m[38;5;15;48;5;24m)[39;49m
- 9 | [38;5;15m [39m[38;5;15mx[39m[38;5;15m [39m[38;5;197m=[39m[38;5;15m [39m[38;5;141m1[39m
+ 6 | [38;5;81mdef[39m[38;5;15m [39m[38;5;148mfoo[39m[38;5;15m([39m[38;5;15mn[39m[38;5;204m=[39m[38;5;141m5[39m[38;5;15m)[39m[38;5;15m:[39m
+ 7 | [38;5;15m [39m[38;5;81mif[39m[38;5;15m [39m[38;5;15mn[39m[38;5;15m [39m[38;5;204m>[39m[38;5;15m [39m[38;5;141m0[39m[38;5;15m:[39m
+--> 8 | [38;5;15m [39m[38;5;81mreturn[39m[38;5;15m [39m[38;5;15;48;5;24mfoo[39;49m[38;5;15;48;5;24m([39;49m[38;5;15;48;5;24mn[39;49m[38;5;15;48;5;24m [39;49m[38;5;204;48;5;24m-[39;49m[38;5;15;48;5;24m [39;49m[38;5;141;48;5;24m1[39;49m[38;5;15;48;5;24m)[39;49m
+ 9 | [38;5;15m [39m[38;5;15mx[39m[38;5;15m [39m[38;5;204m=[39m[38;5;15m [39m[38;5;141m1[39m
[... skipping similar frames: foo at line 8 (2 times)]
File "formatter_example.py", line 8, in foo
- 6 | [38;5;81mdef[39m[38;5;15m [39m[38;5;148mfoo[39m[38;5;15m([39m[38;5;15mn[39m[38;5;197m=[39m[38;5;141m5[39m[38;5;15m)[39m[38;5;15m:[39m
- 7 | [38;5;15m [39m[38;5;81mif[39m[38;5;15m [39m[38;5;15mn[39m[38;5;15m [39m[38;5;197m>[39m[38;5;15m [39m[38;5;141m0[39m[38;5;15m:[39m
---> 8 | [38;5;15m [39m[38;5;81mreturn[39m[38;5;15m [39m[38;5;15;48;5;24mfoo[39;49m[38;5;15;48;5;24m([39;49m[38;5;15;48;5;24mn[39;49m[38;5;15;48;5;24m [39;49m[38;5;197;48;5;24m-[39;49m[38;5;15;48;5;24m [39;49m[38;5;141;48;5;24m1[39;49m[38;5;15;48;5;24m)[39;49m
- 9 | [38;5;15m [39m[38;5;15mx[39m[38;5;15m [39m[38;5;197m=[39m[38;5;15m [39m[38;5;141m1[39m
+ 6 | [38;5;81mdef[39m[38;5;15m [39m[38;5;148mfoo[39m[38;5;15m([39m[38;5;15mn[39m[38;5;204m=[39m[38;5;141m5[39m[38;5;15m)[39m[38;5;15m:[39m
+ 7 | [38;5;15m [39m[38;5;81mif[39m[38;5;15m [39m[38;5;15mn[39m[38;5;15m [39m[38;5;204m>[39m[38;5;15m [39m[38;5;141m0[39m[38;5;15m:[39m
+--> 8 | [38;5;15m [39m[38;5;81mreturn[39m[38;5;15m [39m[38;5;15;48;5;24mfoo[39;49m[38;5;15;48;5;24m([39;49m[38;5;15;48;5;24mn[39;49m[38;5;15;48;5;24m [39;49m[38;5;204;48;5;24m-[39;49m[38;5;15;48;5;24m [39;49m[38;5;141;48;5;24m1[39;49m[38;5;15;48;5;24m)[39;49m
+ 9 | [38;5;15m [39m[38;5;15mx[39m[38;5;15m [39m[38;5;204m=[39m[38;5;15m [39m[38;5;141m1[39m
File "formatter_example.py", line 26, in foo
23 | [38;5;15m[39m[38;5;81mtry[39m[38;5;15m:[39m
- 24 | [38;5;15m [39m[38;5;81mreturn[39m[38;5;15m [39m[38;5;141m1[39m[38;5;15m [39m[38;5;197m/[39m[38;5;15m [39m[38;5;141m0[39m
+ 24 | [38;5;15m [39m[38;5;81mreturn[39m[38;5;15m [39m[38;5;141m1[39m[38;5;15m [39m[38;5;204m/[39m[38;5;15m [39m[38;5;141m0[39m
25 | [38;5;15m[39m[38;5;81mexcept[39m[38;5;15m [39m[38;5;148mException[39m[38;5;15m [39m[38;5;81mas[39m[38;5;15m [39m[38;5;15me[39m[38;5;15m:[39m
---> 26 | [38;5;15m [39m[38;5;81mraise[39m[38;5;15m [39m[38;5;148mTypeError[39m[38;5;15m [39m[38;5;197mfrom[39m[38;5;15m [39m[38;5;15me[39m
+--> 26 | [38;5;15m [39m[38;5;81mraise[39m[38;5;15m [39m[38;5;148mTypeError[39m[38;5;15m [39m[38;5;204mfrom[39m[38;5;15m [39m[38;5;15me[39m
TypeError
diff --git a/tests/golden_files/serialize.json b/tests/golden_files/serialize.json
index 94a190a..f084d18 100644
--- a/tests/golden_files/serialize.json
+++ b/tests/golden_files/serialize.json
@@ -582,13 +582,13 @@
"type": "line",
"is_current": false,
"lineno": 9,
- "text": "\u001b[38;5;15m\u001b[39m\u001b[38;5;15mx\u001b[39m\u001b[38;5;15m \u001b[39m\u001b[38;5;197m=\u001b[39m\u001b[38;5;15m \u001b[39m\u001b[38;5;141m1\u001b[39m"
+ "text": "\u001b[38;5;15m\u001b[39m\u001b[38;5;15mx\u001b[39m\u001b[38;5;15m \u001b[39m\u001b[38;5;204m=\u001b[39m\u001b[38;5;15m \u001b[39m\u001b[38;5;141m1\u001b[39m"
},
{
"type": "line",
"is_current": false,
"lineno": 10,
- "text": "\u001b[38;5;15m\u001b[39m\u001b[38;5;15mlst\u001b[39m\u001b[38;5;15m \u001b[39m\u001b[38;5;197m=\u001b[39m\u001b[38;5;15m \u001b[39m\u001b[38;5;15m(\u001b[39m"
+ "text": "\u001b[38;5;15m\u001b[39m\u001b[38;5;15mlst\u001b[39m\u001b[38;5;15m \u001b[39m\u001b[38;5;204m=\u001b[39m\u001b[38;5;15m \u001b[39m\u001b[38;5;15m(\u001b[39m"
},
{
"type": "line",
@@ -609,7 +609,7 @@
"type": "line",
"is_current": false,
"lineno": 18,
- "text": "\u001b[38;5;15m \u001b[39m\u001b[38;5;197m+\u001b[39m\u001b[38;5;15m \u001b[39m\u001b[38;5;15m[\u001b[39m\u001b[38;5;15m]\u001b[39m"
+ "text": "\u001b[38;5;15m \u001b[39m\u001b[38;5;204m+\u001b[39m\u001b[38;5;15m \u001b[39m\u001b[38;5;15m[\u001b[39m\u001b[38;5;15m]\u001b[39m"
},
{
"type": "line",
@@ -724,7 +724,7 @@
"type": "line",
"is_current": true,
"lineno": 24,
- "text": "\u001b[38;5;15m \u001b[39m\u001b[38;5;81mreturn\u001b[39m\u001b[38;5;15m \u001b[39m\u001b[38;5;141;48;5;24m1\u001b[39;49m\u001b[38;5;15;48;5;24m \u001b[39;49m\u001b[38;5;197;48;5;24m/\u001b[39;49m\u001b[38;5;15;48;5;24m \u001b[39;49m\u001b[38;5;141;48;5;24m0\u001b[39;49m"
+ "text": "\u001b[38;5;15m \u001b[39m\u001b[38;5;81mreturn\u001b[39m\u001b[38;5;15m \u001b[39m\u001b[38;5;141;48;5;24m1\u001b[39;49m\u001b[38;5;15;48;5;24m \u001b[39;49m\u001b[38;5;204;48;5;24m/\u001b[39;49m\u001b[38;5;15;48;5;24m \u001b[39;49m\u001b[38;5;141;48;5;24m0\u001b[39;49m"
},
{
"type": "line",
@@ -832,25 +832,25 @@
"type": "line",
"is_current": false,
"lineno": 6,
- "text": "\u001b[38;5;81mdef\u001b[39m\u001b[38;5;15m \u001b[39m\u001b[38;5;148mfoo\u001b[39m\u001b[38;5;15m(\u001b[39m\u001b[38;5;15mn\u001b[39m\u001b[38;5;197m=\u001b[39m\u001b[38;5;141m5\u001b[39m\u001b[38;5;15m)\u001b[39m\u001b[38;5;15m:\u001b[39m"
+ "text": "\u001b[38;5;81mdef\u001b[39m\u001b[38;5;15m \u001b[39m\u001b[38;5;148mfoo\u001b[39m\u001b[38;5;15m(\u001b[39m\u001b[38;5;15mn\u001b[39m\u001b[38;5;204m=\u001b[39m\u001b[38;5;141m5\u001b[39m\u001b[38;5;15m)\u001b[39m\u001b[38;5;15m:\u001b[39m"
},
{
"type": "line",
"is_current": false,
"lineno": 7,
- "text": "\u001b[38;5;15m \u001b[39m\u001b[38;5;81mif\u001b[39m\u001b[38;5;15m \u001b[39m\u001b[38;5;15mn\u001b[39m\u001b[38;5;15m \u001b[39m\u001b[38;5;197m>\u001b[39m\u001b[38;5;15m \u001b[39m\u001b[38;5;141m0\u001b[39m\u001b[38;5;15m:\u001b[39m"
+ "text": "\u001b[38;5;15m \u001b[39m\u001b[38;5;81mif\u001b[39m\u001b[38;5;15m \u001b[39m\u001b[38;5;15mn\u001b[39m\u001b[38;5;15m \u001b[39m\u001b[38;5;204m>\u001b[39m\u001b[38;5;15m \u001b[39m\u001b[38;5;141m0\u001b[39m\u001b[38;5;15m:\u001b[39m"
},
{
"type": "line",
"is_current": true,
"lineno": 8,
- "text": "\u001b[38;5;15m \u001b[39m\u001b[38;5;81mreturn\u001b[39m\u001b[38;5;15m \u001b[39m\u001b[38;5;15;48;5;24mfoo\u001b[39;49m\u001b[38;5;15;48;5;24m(\u001b[39;49m\u001b[38;5;15;48;5;24mn\u001b[39;49m\u001b[38;5;15;48;5;24m \u001b[39;49m\u001b[38;5;197;48;5;24m-\u001b[39;49m\u001b[38;5;15;48;5;24m \u001b[39;49m\u001b[38;5;141;48;5;24m1\u001b[39;49m\u001b[38;5;15;48;5;24m)\u001b[39;49m"
+ "text": "\u001b[38;5;15m \u001b[39m\u001b[38;5;81mreturn\u001b[39m\u001b[38;5;15m \u001b[39m\u001b[38;5;15;48;5;24mfoo\u001b[39;49m\u001b[38;5;15;48;5;24m(\u001b[39;49m\u001b[38;5;15;48;5;24mn\u001b[39;49m\u001b[38;5;15;48;5;24m \u001b[39;49m\u001b[38;5;204;48;5;24m-\u001b[39;49m\u001b[38;5;15;48;5;24m \u001b[39;49m\u001b[38;5;141;48;5;24m1\u001b[39;49m\u001b[38;5;15;48;5;24m)\u001b[39;49m"
},
{
"type": "line",
"is_current": false,
"lineno": 9,
- "text": "\u001b[38;5;15m \u001b[39m\u001b[38;5;15mx\u001b[39m\u001b[38;5;15m \u001b[39m\u001b[38;5;197m=\u001b[39m\u001b[38;5;15m \u001b[39m\u001b[38;5;141m1\u001b[39m"
+ "text": "\u001b[38;5;15m \u001b[39m\u001b[38;5;15mx\u001b[39m\u001b[38;5;15m \u001b[39m\u001b[38;5;204m=\u001b[39m\u001b[38;5;15m \u001b[39m\u001b[38;5;141m1\u001b[39m"
}
],
"variables": [
@@ -878,25 +878,25 @@
"type": "line",
"is_current": false,
"lineno": 6,
- "text": "\u001b[38;5;81mdef\u001b[39m\u001b[38;5;15m \u001b[39m\u001b[38;5;148mfoo\u001b[39m\u001b[38;5;15m(\u001b[39m\u001b[38;5;15mn\u001b[39m\u001b[38;5;197m=\u001b[39m\u001b[38;5;141m5\u001b[39m\u001b[38;5;15m)\u001b[39m\u001b[38;5;15m:\u001b[39m"
+ "text": "\u001b[38;5;81mdef\u001b[39m\u001b[38;5;15m \u001b[39m\u001b[38;5;148mfoo\u001b[39m\u001b[38;5;15m(\u001b[39m\u001b[38;5;15mn\u001b[39m\u001b[38;5;204m=\u001b[39m\u001b[38;5;141m5\u001b[39m\u001b[38;5;15m)\u001b[39m\u001b[38;5;15m:\u001b[39m"
},
{
"type": "line",
"is_current": false,
"lineno": 7,
- "text": "\u001b[38;5;15m \u001b[39m\u001b[38;5;81mif\u001b[39m\u001b[38;5;15m \u001b[39m\u001b[38;5;15mn\u001b[39m\u001b[38;5;15m \u001b[39m\u001b[38;5;197m>\u001b[39m\u001b[38;5;15m \u001b[39m\u001b[38;5;141m0\u001b[39m\u001b[38;5;15m:\u001b[39m"
+ "text": "\u001b[38;5;15m \u001b[39m\u001b[38;5;81mif\u001b[39m\u001b[38;5;15m \u001b[39m\u001b[38;5;15mn\u001b[39m\u001b[38;5;15m \u001b[39m\u001b[38;5;204m>\u001b[39m\u001b[38;5;15m \u001b[39m\u001b[38;5;141m0\u001b[39m\u001b[38;5;15m:\u001b[39m"
},
{
"type": "line",
"is_current": true,
"lineno": 8,
- "text": "\u001b[38;5;15m \u001b[39m\u001b[38;5;81mreturn\u001b[39m\u001b[38;5;15m \u001b[39m\u001b[38;5;15;48;5;24mfoo\u001b[39;49m\u001b[38;5;15;48;5;24m(\u001b[39;49m\u001b[38;5;15;48;5;24mn\u001b[39;49m\u001b[38;5;15;48;5;24m \u001b[39;49m\u001b[38;5;197;48;5;24m-\u001b[39;49m\u001b[38;5;15;48;5;24m \u001b[39;49m\u001b[38;5;141;48;5;24m1\u001b[39;49m\u001b[38;5;15;48;5;24m)\u001b[39;49m"
+ "text": "\u001b[38;5;15m \u001b[39m\u001b[38;5;81mreturn\u001b[39m\u001b[38;5;15m \u001b[39m\u001b[38;5;15;48;5;24mfoo\u001b[39;49m\u001b[38;5;15;48;5;24m(\u001b[39;49m\u001b[38;5;15;48;5;24mn\u001b[39;49m\u001b[38;5;15;48;5;24m \u001b[39;49m\u001b[38;5;204;48;5;24m-\u001b[39;49m\u001b[38;5;15;48;5;24m \u001b[39;49m\u001b[38;5;141;48;5;24m1\u001b[39;49m\u001b[38;5;15;48;5;24m)\u001b[39;49m"
},
{
"type": "line",
"is_current": false,
"lineno": 9,
- "text": "\u001b[38;5;15m \u001b[39m\u001b[38;5;15mx\u001b[39m\u001b[38;5;15m \u001b[39m\u001b[38;5;197m=\u001b[39m\u001b[38;5;15m \u001b[39m\u001b[38;5;141m1\u001b[39m"
+ "text": "\u001b[38;5;15m \u001b[39m\u001b[38;5;15mx\u001b[39m\u001b[38;5;15m \u001b[39m\u001b[38;5;204m=\u001b[39m\u001b[38;5;15m \u001b[39m\u001b[38;5;141m1\u001b[39m"
}
],
"variables": [
@@ -934,25 +934,25 @@
"type": "line",
"is_current": false,
"lineno": 6,
- "text": "\u001b[38;5;81mdef\u001b[39m\u001b[38;5;15m \u001b[39m\u001b[38;5;148mfoo\u001b[39m\u001b[38;5;15m(\u001b[39m\u001b[38;5;15mn\u001b[39m\u001b[38;5;197m=\u001b[39m\u001b[38;5;141m5\u001b[39m\u001b[38;5;15m)\u001b[39m\u001b[38;5;15m:\u001b[39m"
+ "text": "\u001b[38;5;81mdef\u001b[39m\u001b[38;5;15m \u001b[39m\u001b[38;5;148mfoo\u001b[39m\u001b[38;5;15m(\u001b[39m\u001b[38;5;15mn\u001b[39m\u001b[38;5;204m=\u001b[39m\u001b[38;5;141m5\u001b[39m\u001b[38;5;15m)\u001b[39m\u001b[38;5;15m:\u001b[39m"
},
{
"type": "line",
"is_current": false,
"lineno": 7,
- "text": "\u001b[38;5;15m \u001b[39m\u001b[38;5;81mif\u001b[39m\u001b[38;5;15m \u001b[39m\u001b[38;5;15mn\u001b[39m\u001b[38;5;15m \u001b[39m\u001b[38;5;197m>\u001b[39m\u001b[38;5;15m \u001b[39m\u001b[38;5;141m0\u001b[39m\u001b[38;5;15m:\u001b[39m"
+ "text": "\u001b[38;5;15m \u001b[39m\u001b[38;5;81mif\u001b[39m\u001b[38;5;15m \u001b[39m\u001b[38;5;15mn\u001b[39m\u001b[38;5;15m \u001b[39m\u001b[38;5;204m>\u001b[39m\u001b[38;5;15m \u001b[39m\u001b[38;5;141m0\u001b[39m\u001b[38;5;15m:\u001b[39m"
},
{
"type": "line",
"is_current": true,
"lineno": 8,
- "text": "\u001b[38;5;15m \u001b[39m\u001b[38;5;81mreturn\u001b[39m\u001b[38;5;15m \u001b[39m\u001b[38;5;15;48;5;24mfoo\u001b[39;49m\u001b[38;5;15;48;5;24m(\u001b[39;49m\u001b[38;5;15;48;5;24mn\u001b[39;49m\u001b[38;5;15;48;5;24m \u001b[39;49m\u001b[38;5;197;48;5;24m-\u001b[39;49m\u001b[38;5;15;48;5;24m \u001b[39;49m\u001b[38;5;141;48;5;24m1\u001b[39;49m\u001b[38;5;15;48;5;24m)\u001b[39;49m"
+ "text": "\u001b[38;5;15m \u001b[39m\u001b[38;5;81mreturn\u001b[39m\u001b[38;5;15m \u001b[39m\u001b[38;5;15;48;5;24mfoo\u001b[39;49m\u001b[38;5;15;48;5;24m(\u001b[39;49m\u001b[38;5;15;48;5;24mn\u001b[39;49m\u001b[38;5;15;48;5;24m \u001b[39;49m\u001b[38;5;204;48;5;24m-\u001b[39;49m\u001b[38;5;15;48;5;24m \u001b[39;49m\u001b[38;5;141;48;5;24m1\u001b[39;49m\u001b[38;5;15;48;5;24m)\u001b[39;49m"
},
{
"type": "line",
"is_current": false,
"lineno": 9,
- "text": "\u001b[38;5;15m \u001b[39m\u001b[38;5;15mx\u001b[39m\u001b[38;5;15m \u001b[39m\u001b[38;5;197m=\u001b[39m\u001b[38;5;15m \u001b[39m\u001b[38;5;141m1\u001b[39m"
+ "text": "\u001b[38;5;15m \u001b[39m\u001b[38;5;15mx\u001b[39m\u001b[38;5;15m \u001b[39m\u001b[38;5;204m=\u001b[39m\u001b[38;5;15m \u001b[39m\u001b[38;5;141m1\u001b[39m"
}
],
"variables": [
@@ -986,7 +986,7 @@
"type": "line",
"is_current": false,
"lineno": 24,
- "text": "\u001b[38;5;15m \u001b[39m\u001b[38;5;81mreturn\u001b[39m\u001b[38;5;15m \u001b[39m\u001b[38;5;141m1\u001b[39m\u001b[38;5;15m \u001b[39m\u001b[38;5;197m/\u001b[39m\u001b[38;5;15m \u001b[39m\u001b[38;5;141m0\u001b[39m"
+ "text": "\u001b[38;5;15m \u001b[39m\u001b[38;5;81mreturn\u001b[39m\u001b[38;5;15m \u001b[39m\u001b[38;5;141m1\u001b[39m\u001b[38;5;15m \u001b[39m\u001b[38;5;204m/\u001b[39m\u001b[38;5;15m \u001b[39m\u001b[38;5;141m0\u001b[39m"
},
{
"type": "line",
@@ -998,7 +998,7 @@
"type": "line",
"is_current": true,
"lineno": 26,
- "text": "\u001b[38;5;15m \u001b[39m\u001b[38;5;81mraise\u001b[39m\u001b[38;5;15m \u001b[39m\u001b[38;5;148mTypeError\u001b[39m\u001b[38;5;15m \u001b[39m\u001b[38;5;197mfrom\u001b[39m\u001b[38;5;15m \u001b[39m\u001b[38;5;15me\u001b[39m"
+ "text": "\u001b[38;5;15m \u001b[39m\u001b[38;5;81mraise\u001b[39m\u001b[38;5;15m \u001b[39m\u001b[38;5;148mTypeError\u001b[39m\u001b[38;5;15m \u001b[39m\u001b[38;5;204mfrom\u001b[39m\u001b[38;5;15m \u001b[39m\u001b[38;5;15me\u001b[39m"
}
],
"variables": [
diff --git a/tests/test_formatter.py b/tests/test_formatter.py
index 31862f8..2ef9ecf 100644
--- a/tests/test_formatter.py
+++ b/tests/test_formatter.py
@@ -3,8 +3,9 @@ import re
import sys
from contextlib import contextmanager
-import pytest
import pygments
+import pytest
+from asttokens.util import fstring_positions_work
from stack_data import Formatter, FrameInfo, Options, BlankLines
from tests.utils import compare_to_file
@@ -82,8 +83,7 @@ def test_example(capsys):
if sys.version_info[:2] < (3, 8):
f_string_suffix = 'old'
- elif sys.version_info[:2] == (3, 8):
- # lineno/col_offset in f-strings cannot be trusted in 3.8
+ elif not fstring_positions_work():
f_string_suffix = '3.8'
else:
f_string_suffix = 'new'
diff --git a/tests/test_serializer.py b/tests/test_serializer.py
index 807872d..bc8acca 100644
--- a/tests/test_serializer.py
+++ b/tests/test_serializer.py
@@ -39,4 +39,4 @@ def test_example():
)
- compare_to_file_json(result, "serialize")
+ compare_to_file_json(result, "serialize", pygmented=True)
diff --git a/tests/utils.py b/tests/utils.py
index 7ba00e2..1500086 100644
--- a/tests/utils.py
+++ b/tests/utils.py
@@ -1,9 +1,19 @@
import os
+import pygments
from littleutils import string_to_file, file_to_string, json_to_file, file_to_json
+def parse_version(version: str):
+ return tuple(int(x) for x in version.split("."))
+
+
+old_pygments = parse_version(pygments.__version__) < (2, 16, 1)
+
+
def compare_to_file(text, name):
+ if old_pygments and "pygment" in name:
+ return
filename = os.path.join(
os.path.dirname(__file__),
'golden_files',
@@ -16,7 +26,9 @@ def compare_to_file(text, name):
assert text == expected_output
-def compare_to_file_json(data, name):
+def compare_to_file_json(data, name, *, pygmented):
+ if old_pygments and pygmented:
+ return
filename = os.path.join(
os.path.dirname(__file__),
'golden_files',
| 0.6.2: Test case fail when pygments >= 2.16
When building your module with pygments >= 2.16.0, the test cases will fail.
Here is the pytest output:
<details>
<summary>details</summary>
```shell
+ /usr/bin/python3 -m tox --current-env -q --recreate -e py311
============================= test session starts ==============================
platform linux -- Python 3.11.4, pytest-7.4.0, pluggy-1.3.0
cachedir: .tox/py311/.pytest_cache
rootdir: /builddir/build/BUILD/stack_data-0.6.2
collected 21 items
tests/test_core.py ............... [ 71%]
tests/test_formatter.py F. [ 80%]
tests/test_serializer.py F [ 85%]
tests/test_utils.py ... [100%]
=================================== FAILURES ===================================
_________________________________ test_example _________________________________
capsys = <_pytest.capture.CaptureFixture object at 0x7f7f6dff2750>
def test_example(capsys):
from .samples.formatter_example import bar, print_stack1, format_stack1, format_frame, f_string, blank_lines
@contextmanager
def check_example(name):
yield
stderr = capsys.readouterr().err
compare_to_file(stderr, name)
with check_example("variables"):
try:
bar()
except Exception:
MyFormatter(show_variables=True).print_exception()
> with check_example("pygmented"):
tests/test_formatter.py:47:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/lib64/python3.11/contextlib.py:144: in __exit__
next(self.gen)
tests/test_formatter.py:39: in check_example
compare_to_file(stderr, name)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
text = 'Traceback (most recent call last):\n File "formatter_example.py", line 21, in foo\n 9 | \x1b[38;5;15m\x1b[39m\x...Error\x1b[39m\x1b[38;5;15m \x1b[39m\x1b[38;5;204mfrom\x1b[39m\x1b[38;5;15m \x1b[39m\x1b[38;5;15me\x1b[39m\nTypeError\n'
name = 'pygmented'
def compare_to_file(text, name):
filename = os.path.join(
os.path.dirname(__file__),
'golden_files',
name + '.txt',
)
if os.environ.get('FIX_STACK_DATA_TESTS'):
string_to_file(text, filename)
else:
expected_output = file_to_string(filename)
> assert text == expected_output
E AssertionError
tests/utils.py:16: AssertionError
_________________________________ test_example _________________________________
def test_example():
from .samples.formatter_example import bar, format_frame, format_stack1
result = dict(
format_frame=(format_frame(MyFormatter())),
format_stack=format_stack1(MyFormatter(show_variables=True)),
)
try:
bar()
except Exception:
result.update(
plain=MyFormatter(show_variables=True).format_exception(),
pygmented=MyFormatter(show_variables=True, pygmented=True).format_exception(),
pygmented_html=MyFormatter(show_variables=True, pygmented=True, html=True).format_exception(),
)
> compare_to_file_json(result, "serialize")
tests/test_serializer.py:42:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
data = {'format_frame': {'filename': 'formatter_example.py', 'lineno': 51, 'lines': [{'is_current': False, 'lineno': 49, 'tex...ormatter_example.py', 'lineno': 8, 'lines': [{...}, {...}, {...}, {...}], 'name': 'foo', ...}, ...], 'tail': ''}], ...}
name = 'serialize'
def compare_to_file_json(data, name):
filename = os.path.join(
os.path.dirname(__file__),
'golden_files',
name + '.json',
)
if os.environ.get('FIX_STACK_DATA_TESTS'):
json_to_file(data, filename, indent=4)
else:
expected_output = file_to_json(filename)
> assert data == expected_output
E AssertionError
tests/utils.py:29: AssertionError
=============================== warnings summary ===============================
../../../../usr/lib64/python3.11/site-packages/pyximport/pyximport.py:51
/usr/lib64/python3.11/site-packages/pyximport/pyximport.py:51: DeprecationWarning: the imp module is deprecated in favour of importlib and slated for removal in Python 3.12; see the module's documentation for alternative uses
import imp
-- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html
=========================== short test summary info ============================
FAILED tests/test_formatter.py::test_example - AssertionError
FAILED tests/test_serializer.py::test_example - AssertionError
=================== 2 failed, 19 passed, 1 warning in 1.02s ====================
ERROR: InvocationError for command /usr/bin/pytest (exited with code 1)
___________________________________ summary ____________________________________
ERROR: py311: commands failed
```
</details>
I found that both of these failures use Monokai's color styles and have been [updated](https://github.com/pygments/pygments/commit/d46519b340760cd4f8978d7f1349f8c287121a24) since pygments-2.16.0
I hope to be able to update the test cases to support the latest pygments, maybe you would want to support both versions of pygments?
Please let me know whether it is needed to support old version of pygments and I'll try to open a PR soon.
| 0.0 | df3566c10c7c1cc69e25c32c35666593481aee31 | [
"tests/test_formatter.py::test_example",
"tests/test_serializer.py::test_example"
]
| [
"tests/test_formatter.py::test_invalid_single_option"
]
| {
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
} | 2023-09-30 11:08:56+00:00 | mit | 1,035 |
|
algoo__hapic-134 | diff --git a/hapic/ext/aiohttp/context.py b/hapic/ext/aiohttp/context.py
index 8421567..bae729b 100644
--- a/hapic/ext/aiohttp/context.py
+++ b/hapic/ext/aiohttp/context.py
@@ -22,6 +22,7 @@ from hapic.exception import WorkflowException
from hapic.processor.main import Processor
from hapic.processor.main import ProcessValidationError
from hapic.processor.main import RequestParameters
+from hapic.util import LowercaseDictKeys
# Aiohttp regular expression to locate url parameters
AIOHTTP_RE_PATH_URL = re.compile(r"{([^:<>]+)(?::[^<>]+)?}")
@@ -35,8 +36,8 @@ class AiohttpRequestParameters(RequestParameters):
@property
async def body_parameters(self) -> dict:
if self._parsed_body is None:
- content_type = self.header_parameters.get("Content-Type")
- is_json = content_type == "application/json"
+ content_type = self.header_parameters.get("Content-Type", "")
+ is_json = content_type.lower() == "application/json"
if is_json:
self._parsed_body = await self._request.json()
@@ -60,7 +61,10 @@ class AiohttpRequestParameters(RequestParameters):
@property
def header_parameters(self):
- return dict(self._request.headers.items())
+ # NOTE BS 2019-01-21: headers can be read as lowercase
+ return LowercaseDictKeys(
+ (k.lower(), v) for k, v in self._request.headers.items()
+ )
@property
async def files_parameters(self):
diff --git a/hapic/ext/bottle/context.py b/hapic/ext/bottle/context.py
index 0bcb82f..e637f5e 100644
--- a/hapic/ext/bottle/context.py
+++ b/hapic/ext/bottle/context.py
@@ -17,6 +17,7 @@ from hapic.exception import RouteNotFound
from hapic.processor.main import Processor
from hapic.processor.main import ProcessValidationError
from hapic.processor.main import RequestParameters
+from hapic.util import LowercaseDictKeys
try: # Python 3.5+
from http import HTTPStatus
@@ -49,7 +50,9 @@ class BottleContext(BaseContext):
query_parameters = MultiDict(bottle.request.query.allitems())
body_parameters = dict(bottle.request.json or {})
form_parameters = MultiDict(bottle.request.forms.allitems())
- header_parameters = dict(bottle.request.headers)
+ header_parameters = LowercaseDictKeys(
+ (k.lower(), v) for k, v in bottle.request.headers.items()
+ )
files_parameters = dict(bottle.request.files)
return RequestParameters(
diff --git a/hapic/ext/flask/context.py b/hapic/ext/flask/context.py
index eaaef12..09f2d4f 100644
--- a/hapic/ext/flask/context.py
+++ b/hapic/ext/flask/context.py
@@ -14,6 +14,7 @@ from hapic.error.main import ErrorBuilderInterface
from hapic.processor.main import Processor
from hapic.processor.main import ProcessValidationError
from hapic.processor.main import RequestParameters
+from hapic.util import LowercaseDictKeys
try: # Python 3.5+
from http import HTTPStatus
@@ -51,7 +52,9 @@ class FlaskContext(BaseContext):
query_parameters=request.args, # TODO: Check
body_parameters=request.get_json(), # TODO: Check
form_parameters=request.form,
- header_parameters=request.headers,
+ header_parameters=LowercaseDictKeys(
+ (k.lower(), v) for k, v in request.headers.items()
+ ),
files_parameters=request.files,
)
diff --git a/hapic/ext/pyramid/context.py b/hapic/ext/pyramid/context.py
index c0fa0e0..683f513 100644
--- a/hapic/ext/pyramid/context.py
+++ b/hapic/ext/pyramid/context.py
@@ -16,6 +16,7 @@ from hapic.processor.main import Processor
from hapic.processor.main import ProcessValidationError
from hapic.processor.main import RequestParameters
from hapic.util import LOGGER_NAME
+from hapic.util import LowercaseDictKeys
try: # Python 3.5+
from http import HTTPStatus
@@ -72,7 +73,9 @@ class PyramidContext(BaseContext):
query_parameters=req.GET,
body_parameters=json_body,
form_parameters=req.POST,
- header_parameters=req.headers,
+ header_parameters=LowercaseDictKeys(
+ (k.lower(), v) for k, v in req.headers.items()
+ ),
files_parameters=files_parameters,
)
diff --git a/hapic/util.py b/hapic/util.py
index af4f3f8..b8fe7c2 100644
--- a/hapic/util.py
+++ b/hapic/util.py
@@ -1,2 +1,53 @@
# -*- coding: utf-8 -*-
+import typing
+
LOGGER_NAME = "hapic"
+
+
+class LowercaseDictKeys(dict):
+ """
+ Like a dict but try to use lowercase version of given keys.
+ Must give lowercase key to ths dict when fill it.
+ """
+
+ def get(
+ self, key: typing.Any, default_value: typing.Any = None
+ ) -> typing.Any:
+ """
+ Return value for given key.
+ Try with lowercase of given key. If not possible, do with given key.
+ """
+ try:
+ return super().get(key.lower(), default_value)
+ except AttributeError:
+ return super().get(key, default_value)
+
+ def __contains__(self, key: typing.Any) -> bool:
+ """
+ True if the dictionary has the specified key, else False.
+ Try with lowercase of given key. If not possible, do with given key.
+ """
+ try:
+ return super().__contains__(key.lower())
+ except AttributeError:
+ return super().__contains__(key)
+
+ def __delitem__(self, key: typing.Any) -> None:
+ """
+ Delete self[key].
+ Try with lowercase of given key. If not possible, do with given key.
+ """
+ try:
+ return super().__delitem__(key.lower())
+ except AttributeError:
+ return super().__delitem__(key)
+
+ def __getitem__(self, key: typing.Any) -> typing.Any:
+ """
+ Return value for given key.
+ Try with lowercase of given key. If not possible, do with given key.
+ """
+ try:
+ return super().__getitem__(key.lower())
+ except AttributeError:
+ return super().__getitem__(key)
| algoo/hapic | a7cc8fb684cd16a72c0d0422f9b2fe4c44ff770d | diff --git a/tests/ext/unit/test_aiohttp.py b/tests/ext/unit/test_aiohttp.py
index e75ba5c..22ef2f9 100644
--- a/tests/ext/unit/test_aiohttp.py
+++ b/tests/ext/unit/test_aiohttp.py
@@ -711,3 +711,23 @@ class TestAiohttpExt(object):
"message": "Validation error of input data",
"code": None,
} == json_
+
+ async def test_request_header__ok__lowercase_key(self, aiohttp_client):
+ hapic = Hapic(async_=True, processor_class=MarshmallowProcessor)
+
+ class HeadersSchema(marshmallow.Schema):
+ foo = marshmallow.fields.String(required=True)
+
+ @hapic.with_api_doc()
+ @hapic.input_headers(HeadersSchema())
+ async def hello(request, hapic_data: HapicData):
+ return web.json_response(hapic_data.headers)
+
+ app = web.Application(debug=True)
+ hapic.set_context(AiohttpContext(app))
+ app.router.add_get("/", hello)
+ client = await aiohttp_client(app)
+ response = await client.get("/", headers={"FOO": "bar"})
+ assert 200 == response.status
+ json_ = await response.json()
+ assert {"foo": "bar"} == json_
diff --git a/tests/unit/test_utils.py b/tests/unit/test_utils.py
new file mode 100644
index 0000000..64b203e
--- /dev/null
+++ b/tests/unit/test_utils.py
@@ -0,0 +1,23 @@
+# coding: utf-8
+from hapic.util import LowercaseDictKeys
+
+
+class TestUtils(object):
+ def test_unit__get__ok__nominal_case(self):
+ lowercase_dict = LowercaseDictKeys([("foo", "bar")])
+ assert "bar" == lowercase_dict.get("foo")
+ assert "bar" == lowercase_dict.get("FOO")
+
+ def test_unit__by_key__ok__nominal_case(self):
+ lowercase_dict = LowercaseDictKeys([("foo", "bar")])
+ assert "bar" == lowercase_dict["foo"]
+ assert "bar" == lowercase_dict["FOO"]
+
+ def test_unit__in__ok__nominal_case(self):
+ lowercase_dict = LowercaseDictKeys([("foo", "bar")])
+ assert "foo" in lowercase_dict
+ assert "FOO" in lowercase_dict
+
+ def test_unit__del__ok__nominal_case(self):
+ lowercase_dict = LowercaseDictKeys([("foo", "bar")])
+ del lowercase_dict["FOO"]
| Must be non case sensitive on headers
In hapic/ext/aiohttp/context.py#L38, header read is case sensitive.
``` python
content_type = self.header_parameters.get("Content-Type")
```
It must be not. | 0.0 | a7cc8fb684cd16a72c0d0422f9b2fe4c44ff770d | [
"tests/unit/test_utils.py::TestUtils::test_unit__get__ok__nominal_case",
"tests/unit/test_utils.py::TestUtils::test_unit__by_key__ok__nominal_case",
"tests/unit/test_utils.py::TestUtils::test_unit__in__ok__nominal_case",
"tests/unit/test_utils.py::TestUtils::test_unit__del__ok__nominal_case"
]
| []
| {
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2019-01-21 09:54:22+00:00 | mit | 1,036 |
|
algorand__py-algorand-sdk-171 | diff --git a/algosdk/future/transaction.py b/algosdk/future/transaction.py
index ce6e283..155826d 100644
--- a/algosdk/future/transaction.py
+++ b/algosdk/future/transaction.py
@@ -703,8 +703,8 @@ class AssetCreateTxn(AssetConfigTxn):
"""
def __init__(self, sender, sp, total, decimals,
default_frozen, *,
- manager, reserve, freeze, clawback,
- unit_name, asset_name, url,
+ manager=None, reserve=None, freeze=None, clawback=None,
+ unit_name="", asset_name="", url="",
metadata_hash=None,
note=None, lease=None, rekey_to=None):
super().__init__(sender=sender, sp=sp, total=total, decimals=decimals,
@@ -1151,8 +1151,8 @@ class ApplicationCallTxn(Transaction):
@staticmethod
def state_schema(schema):
"""Confirm the argument is a StateSchema, or false which is coerced to None"""
- if not schema:
- return None # Coerce false values to None, to help __eq__
+ if not schema or not schema.dictify():
+ return None # Coerce false/empty values to None, to help __eq__
assert isinstance(schema, StateSchema), f"{schema} is not a StateSchema"
return schema
diff --git a/algosdk/v2client/algod.py b/algosdk/v2client/algod.py
index 67c6bd2..322d8fa 100644
--- a/algosdk/v2client/algod.py
+++ b/algosdk/v2client/algod.py
@@ -169,6 +169,8 @@ class AlgodClient:
Returns:
str: transaction ID
"""
+ assert not isinstance(txn, future.transaction.Transaction), \
+ f"Attempt to send UNSIGNED transaction {txn}"
return self.send_raw_transaction(encoding.msgpack_encode(txn),
**kwargs)
@@ -243,6 +245,8 @@ class AlgodClient:
"""
serialized = []
for txn in txns:
+ assert not isinstance(txn, future.transaction.Transaction), \
+ f"Attempt to send UNSIGNED transaction {txn}"
serialized.append(base64.b64decode(encoding.msgpack_encode(txn)))
return self.send_raw_transaction(base64.b64encode(
| algorand/py-algorand-sdk | ab1a24a55fe0178acab563046e0d34cb8700421c | diff --git a/test_unit.py b/test_unit.py
index 7f30d64..a0fa345 100644
--- a/test_unit.py
+++ b/test_unit.py
@@ -852,7 +852,25 @@ class TestApplicationTransactions(unittest.TestCase):
self.assertEqual(create.dictify(), call.dictify())
self.assertEqual(create, call)
self.assertEqual(call, create)
-
+
+ def test_application_create_schema(self):
+ approve = b"\0"
+ clear = b"\1"
+ zero_schema = transaction.StateSchema(0, 0)
+ params = transaction.SuggestedParams(0, 1, 100, self.genesis)
+ for oc in transaction.OnComplete:
+ # verify that a schema with 0 uints and 0 bytes behaves the same as no schema
+ txn_zero_schema = transaction.ApplicationCreateTxn(self.sender, params, oc,
+ approve, clear,
+ zero_schema, zero_schema)
+ txn_none_schema = transaction.ApplicationCreateTxn(self.sender, params, oc,
+ approve, clear,
+ None, None)
+ # Check the dict first, it's important on it's own, and it
+ # also gives more a meaningful error if they're not equal.
+ self.assertEqual(txn_zero_schema.dictify(), txn_none_schema.dictify())
+ self.assertEqual(txn_zero_schema, txn_none_schema)
+ self.assertEqual(txn_none_schema, txn_zero_schema)
def test_application_update(self):
empty = b""
| Python SDK should detect the meaningless schema and not msgpack it.
If schemas are defined as 0s a signature validation error occurs. This should be checked as described below:
These tests:
if self.local_schema:
d["apls"] = self.local_schema.dictify()
if self.global_schema:
d["apgs"] = self.global_schema.dictify()
Should be something like if self.schema and not self.schema.empty():
(with empty() defined to return True if both ints and slices are 0) | 0.0 | ab1a24a55fe0178acab563046e0d34cb8700421c | [
"test_unit.py::TestApplicationTransactions::test_application_create_schema"
]
| [
"test_unit.py::TestPaymentTransaction::test_asset_empty_address_error",
"test_unit.py::TestPaymentTransaction::test_asset_transfer_float_amt",
"test_unit.py::TestPaymentTransaction::test_asset_transfer_negative_amt",
"test_unit.py::TestPaymentTransaction::test_error_empty_receiver_asset_txn",
"test_unit.py::TestPaymentTransaction::test_error_empty_receiver_txn",
"test_unit.py::TestPaymentTransaction::test_group_id",
"test_unit.py::TestPaymentTransaction::test_min_txn_fee",
"test_unit.py::TestPaymentTransaction::test_note_wrong_length",
"test_unit.py::TestPaymentTransaction::test_note_wrong_type",
"test_unit.py::TestPaymentTransaction::test_pay_float_amt",
"test_unit.py::TestPaymentTransaction::test_pay_negative_amt",
"test_unit.py::TestPaymentTransaction::test_serialize",
"test_unit.py::TestPaymentTransaction::test_serialize_asset_accept",
"test_unit.py::TestPaymentTransaction::test_serialize_asset_config",
"test_unit.py::TestPaymentTransaction::test_serialize_asset_create",
"test_unit.py::TestPaymentTransaction::test_serialize_asset_create_decimal",
"test_unit.py::TestPaymentTransaction::test_serialize_asset_destroy",
"test_unit.py::TestPaymentTransaction::test_serialize_asset_freeze",
"test_unit.py::TestPaymentTransaction::test_serialize_asset_revoke",
"test_unit.py::TestPaymentTransaction::test_serialize_asset_transfer",
"test_unit.py::TestPaymentTransaction::test_serialize_gen",
"test_unit.py::TestPaymentTransaction::test_serialize_keyreg",
"test_unit.py::TestPaymentTransaction::test_serialize_pay",
"test_unit.py::TestPaymentTransaction::test_serialize_pay_lease",
"test_unit.py::TestPaymentTransaction::test_serialize_txgroup",
"test_unit.py::TestPaymentTransaction::test_serialize_with_note_max_length",
"test_unit.py::TestPaymentTransaction::test_serialize_with_note_string_encode",
"test_unit.py::TestPaymentTransaction::test_serialize_zero_amt",
"test_unit.py::TestPaymentTransaction::test_serialize_zero_receiver",
"test_unit.py::TestPaymentTransaction::test_sign",
"test_unit.py::TestPaymentTransaction::test_sign_logic_multisig",
"test_unit.py::TestAssetConfigConveniences::test_asset_create",
"test_unit.py::TestAssetConfigConveniences::test_asset_destroy",
"test_unit.py::TestAssetConfigConveniences::test_asset_update",
"test_unit.py::TestAssetTransferConveniences::test_asset_closeout",
"test_unit.py::TestAssetTransferConveniences::test_asset_optin",
"test_unit.py::TestApplicationTransactions::test_application_call",
"test_unit.py::TestApplicationTransactions::test_application_create",
"test_unit.py::TestApplicationTransactions::test_application_delete",
"test_unit.py::TestApplicationTransactions::test_application_update",
"test_unit.py::TestMnemonic::test_bytes_wrong_len",
"test_unit.py::TestMnemonic::test_case_irrelevance",
"test_unit.py::TestMnemonic::test_key_wrong_len",
"test_unit.py::TestMnemonic::test_mnemonic_private_key",
"test_unit.py::TestMnemonic::test_mnemonic_wrong_len",
"test_unit.py::TestMnemonic::test_short_words",
"test_unit.py::TestMnemonic::test_whitespace_irrelevance",
"test_unit.py::TestMnemonic::test_word_not_in_list",
"test_unit.py::TestMnemonic::test_wordlist_integrity",
"test_unit.py::TestMnemonic::test_wrong_checksum",
"test_unit.py::TestMnemonic::test_zero_mnemonic",
"test_unit.py::TestAddress::test_encode_decode",
"test_unit.py::TestAddress::test_is_valid",
"test_unit.py::TestMultisig::test_errors",
"test_unit.py::TestMultisig::test_merge",
"test_unit.py::TestMultisig::test_msig_address",
"test_unit.py::TestMultisig::test_sign",
"test_unit.py::TestMsgpack::test_asset_accept",
"test_unit.py::TestMsgpack::test_asset_config",
"test_unit.py::TestMsgpack::test_asset_config_with_decimal",
"test_unit.py::TestMsgpack::test_asset_create",
"test_unit.py::TestMsgpack::test_asset_destroy",
"test_unit.py::TestMsgpack::test_asset_freeze",
"test_unit.py::TestMsgpack::test_asset_revoke",
"test_unit.py::TestMsgpack::test_asset_transfer",
"test_unit.py::TestMsgpack::test_bid",
"test_unit.py::TestMsgpack::test_keyreg_txn",
"test_unit.py::TestMsgpack::test_multisig_txn",
"test_unit.py::TestMsgpack::test_payment_txn",
"test_unit.py::TestMsgpack::test_signed_txn",
"test_unit.py::TestSignBytes::test_sign",
"test_unit.py::TestSignBytes::test_verify_negative",
"test_unit.py::TestLogic::test_check_program",
"test_unit.py::TestLogic::test_parse_bytecblock",
"test_unit.py::TestLogic::test_parse_intcblock",
"test_unit.py::TestLogic::test_parse_uvarint",
"test_unit.py::TestLogic::test_teal_sign",
"test_unit.py::TestLogicSig::test_basic",
"test_unit.py::TestLogicSig::test_multisig",
"test_unit.py::TestLogicSig::test_signature",
"test_unit.py::TestLogicSig::test_transaction",
"test_unit.py::TestTemplate::test_HTLC",
"test_unit.py::TestTemplate::test_dynamic_fee",
"test_unit.py::TestTemplate::test_limit_order_a",
"test_unit.py::TestTemplate::test_periodic_payment",
"test_unit.py::TestTemplate::test_split",
"test_unit.py::TestDryrun::test_create_request",
"test_unit.py::TestDryrun::test_global_state",
"test_unit.py::TestDryrun::test_local_state",
"test_unit.py::TestDryrun::test_no_error",
"test_unit.py::TestDryrun::test_pass_reject"
]
| {
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2021-01-26 23:19:55+00:00 | mit | 1,037 |
|
algorand__pyteal-153 | diff --git a/pyteal/ast/__init__.py b/pyteal/ast/__init__.py
index 9712dc7..fbc12cd 100644
--- a/pyteal/ast/__init__.py
+++ b/pyteal/ast/__init__.py
@@ -6,6 +6,7 @@ from .leafexpr import LeafExpr
from .addr import Addr
from .bytes import Bytes
from .int import Int, EnumInt
+from .methodsig import MethodSignature
# properties
from .arg import Arg
@@ -126,6 +127,7 @@ __all__ = [
"Bytes",
"Int",
"EnumInt",
+ "MethodSignature",
"Arg",
"TxnType",
"TxnField",
diff --git a/pyteal/ast/methodsig.py b/pyteal/ast/methodsig.py
new file mode 100644
index 0000000..de9bd97
--- /dev/null
+++ b/pyteal/ast/methodsig.py
@@ -0,0 +1,43 @@
+from typing import TYPE_CHECKING
+from pyteal.errors import TealInputError
+
+from pyteal.types import TealType
+
+from ..types import TealType
+from ..ir import TealOp, Op, TealBlock
+from .leafexpr import LeafExpr
+
+if TYPE_CHECKING:
+ from ..compiler import CompileOptions
+
+
+class MethodSignature(LeafExpr):
+ """An expression that represents an ABI method selector"""
+
+ def __init__(self, methodName: str) -> None:
+ """Create a new method selector for ABI method call.
+
+ Args:
+ methodName: A string containing a valid ABI method signature
+ """
+ super().__init__()
+ if type(methodName) is not str:
+ raise TealInputError(
+ "invalid input type {} to Method".format(type(methodName))
+ )
+ elif len(methodName) == 0:
+ raise TealInputError("invalid input empty string to Method")
+ self.methodName = methodName
+
+ def __teal__(self, options: "CompileOptions"):
+ op = TealOp(self, Op.method_signature, '"{}"'.format(self.methodName))
+ return TealBlock.FromOp(options, op)
+
+ def __str__(self) -> str:
+ return "(method: {})".format(self.methodName)
+
+ def type_of(self) -> TealType:
+ return TealType.bytes
+
+
+MethodSignature.__module__ = "pyteal"
diff --git a/pyteal/ast/subroutine.py b/pyteal/ast/subroutine.py
index 5c1a211..69dd54e 100644
--- a/pyteal/ast/subroutine.py
+++ b/pyteal/ast/subroutine.py
@@ -10,7 +10,6 @@ from .seq import Seq
from .scratchvar import ScratchVar
if TYPE_CHECKING:
- from ..ir import TealSimpleBlock
from ..compiler import CompileOptions
@@ -19,7 +18,10 @@ class SubroutineDefinition:
nextSubroutineId = 0
def __init__(
- self, implementation: Callable[..., Expr], returnType: TealType
+ self,
+ implementation: Callable[..., Expr],
+ returnType: TealType,
+ nameStr: str = None,
) -> None:
super().__init__()
self.id = SubroutineDefinition.nextSubroutineId
@@ -53,6 +55,7 @@ class SubroutineDefinition:
self.returnType = returnType
self.declaration: Optional["SubroutineDeclaration"] = None
+ self.__name = self.implementation.__name__ if nameStr is None else nameStr
def getDeclaration(self) -> "SubroutineDeclaration":
if self.declaration is None:
@@ -61,7 +64,7 @@ class SubroutineDefinition:
return self.declaration
def name(self) -> str:
- return self.implementation.__name__
+ return self.__name
def argumentCount(self) -> int:
return len(self.implementationParams)
@@ -181,7 +184,7 @@ class Subroutine:
])
"""
- def __init__(self, returnType: TealType) -> None:
+ def __init__(self, returnType: TealType, name: str = None) -> None:
"""Define a new subroutine with the given return type.
Args:
@@ -189,9 +192,10 @@ class Subroutine:
TealType.none indicates that this subroutine does not return any value.
"""
self.returnType = returnType
+ self.name = name
def __call__(self, fnImplementation: Callable[..., Expr]) -> Callable[..., Expr]:
- subroutine = SubroutineDefinition(fnImplementation, self.returnType)
+ subroutine = SubroutineDefinition(fnImplementation, self.returnType, self.name)
@wraps(fnImplementation)
def subroutineCall(*args: Expr, **kwargs) -> Expr:
diff --git a/pyteal/compiler/constants.py b/pyteal/compiler/constants.py
index 6d54e66..3642194 100644
--- a/pyteal/compiler/constants.py
+++ b/pyteal/compiler/constants.py
@@ -7,11 +7,7 @@ from algosdk import encoding
from ..ir import (
Op,
TealOp,
- TealLabel,
TealComponent,
- TealBlock,
- TealSimpleBlock,
- TealConditionalBlock,
)
from ..util import unescapeStr, correctBase32Padding
from ..errors import TealInternalError
@@ -94,6 +90,28 @@ def extractAddrValue(op: TealOp) -> Union[str, bytes]:
return value
+def extractMethodSigValue(op: TealOp) -> bytes:
+ """Extract the constant value being loaded by a TealOp whose op is Op.method.
+
+ Returns:
+ The bytes of method selector computed from the method signature that the op is loading.
+ """
+ if len(op.args) != 1 or type(op.args[0]) != str:
+ raise TealInternalError("Unexpected args in method opcode: {}".format(op.args))
+
+ methodSignature = cast(str, op.args[0])
+ if methodSignature[0] == methodSignature[-1] and methodSignature.startswith('"'):
+ methodSignature = methodSignature[1:-1]
+ else:
+ raise TealInternalError(
+ "Method signature opcode error: signatue {} not wrapped with double-quotes".format(
+ methodSignature
+ )
+ )
+ methodSelector = encoding.checksum(bytes(methodSignature, "utf-8"))[:4]
+ return methodSelector
+
+
def createConstantBlocks(ops: List[TealComponent]) -> List[TealComponent]:
"""Convert TEAL code from using pseudo-ops for constants to using assembled constant blocks.
@@ -124,6 +142,9 @@ def createConstantBlocks(ops: List[TealComponent]) -> List[TealComponent]:
elif basicOp == Op.addr:
addrValue = extractAddrValue(op)
byteFreqs[addrValue] = byteFreqs.get(addrValue, 0) + 1
+ elif basicOp == Op.method_signature:
+ methodValue = extractMethodSigValue(op)
+ byteFreqs[methodValue] = byteFreqs.get(methodValue, 0) + 1
assembled: List[TealComponent] = []
@@ -177,12 +198,22 @@ def createConstantBlocks(ops: List[TealComponent]) -> List[TealComponent]:
assembled.append(TealOp(op.expr, Op.intc, index, "//", *op.args))
continue
- if basicOp == Op.byte or basicOp == Op.addr:
- byteValue = (
- extractBytesValue(op)
- if basicOp == Op.byte
- else extractAddrValue(op)
- )
+ if (
+ basicOp == Op.byte
+ or basicOp == Op.addr
+ or basicOp == Op.method_signature
+ ):
+ if basicOp == Op.byte:
+ byteValue = extractBytesValue(op)
+ elif basicOp == Op.addr:
+ byteValue = extractAddrValue(op)
+ elif basicOp == Op.method_signature:
+ byteValue = extractMethodSigValue(op)
+ else:
+ raise TealInternalError(
+ "Expect a byte-like constant opcode, get {}".format(op)
+ )
+
if byteFreqs[byteValue] == 1:
encodedValue = (
("0x" + byteValue.hex())
diff --git a/pyteal/ir/ops.py b/pyteal/ir/ops.py
index 19f3b54..bb5d404 100644
--- a/pyteal/ir/ops.py
+++ b/pyteal/ir/ops.py
@@ -74,6 +74,7 @@ class Op(Enum):
bytec_3 = OpType("bytec_3", Mode.Signature | Mode.Application, 2)
byte = OpType("byte", Mode.Signature | Mode.Application, 2)
addr = OpType("addr", Mode.Signature | Mode.Application, 2)
+ method_signature = OpType("method", Mode.Signature | Mode.Application, 2)
arg = OpType("arg", Mode.Signature, 2)
txn = OpType("txn", Mode.Signature | Mode.Application, 2)
global_ = OpType("global", Mode.Signature | Mode.Application, 2)
diff --git a/pyteal/ir/tealop.py b/pyteal/ir/tealop.py
index 9207231..b136023 100644
--- a/pyteal/ir/tealop.py
+++ b/pyteal/ir/tealop.py
@@ -1,4 +1,4 @@
-from typing import cast, Union, List, Optional, TYPE_CHECKING
+from typing import Union, List, Optional, TYPE_CHECKING
from .tealcomponent import TealComponent
from .labelref import LabelReference
| algorand/pyteal | a9c42e307e548cc2e6a1fa8b38149e240ec07ceb | diff --git a/pyteal/ast/methodsig_test.py b/pyteal/ast/methodsig_test.py
new file mode 100644
index 0000000..6a941ea
--- /dev/null
+++ b/pyteal/ast/methodsig_test.py
@@ -0,0 +1,27 @@
+import pytest
+
+from pyteal.ast.methodsig import MethodSignature
+
+from .. import *
+
+
+def test_method():
+ expr = MethodSignature("add(uint64,uint64)uint64")
+ assert expr.type_of() == TealType.bytes
+
+ expected = TealSimpleBlock(
+ [TealOp(expr, Op.method_signature, '"add(uint64,uint64)uint64"')]
+ )
+ actual, _ = expr.__teal__(CompileOptions())
+ assert expected == actual
+
+
+def test_method_invalid():
+ with pytest.raises(TealInputError):
+ MethodSignature(114514)
+
+ with pytest.raises(TealInputError):
+ MethodSignature(['"m0()void"', '"m1()uint64"'])
+
+ with pytest.raises(TealInputError):
+ MethodSignature("")
diff --git a/pyteal/compiler/constants_test.py b/pyteal/compiler/constants_test.py
index 12d97b3..bd69d46 100644
--- a/pyteal/compiler/constants_test.py
+++ b/pyteal/compiler/constants_test.py
@@ -5,6 +5,7 @@ from .constants import (
extractBytesValue,
extractAddrValue,
createConstantBlocks,
+ extractMethodSigValue,
)
@@ -63,6 +64,44 @@ def test_extractAddrValue():
assert actual == expected
+# test case came from: https://gist.github.com/jasonpaulos/99e4f8a75f2fc2ec9b8073c064530359
+def test_extractMethodValue():
+ tests = [
+ (
+ TealOp(None, Op.method_signature, '"create(uint64)uint64"'),
+ b"\x43\x46\x41\x01",
+ ),
+ (TealOp(None, Op.method_signature, '"update()void"'), b"\xa0\xe8\x18\x72"),
+ (
+ TealOp(None, Op.method_signature, '"optIn(string)string"'),
+ b"\xcf\xa6\x8e\x36",
+ ),
+ (TealOp(None, Op.method_signature, '"closeOut()string"'), b"\xa9\xf4\x2b\x3d"),
+ (TealOp(None, Op.method_signature, '"delete()void"'), b"\x24\x37\x8d\x3c"),
+ (
+ TealOp(None, Op.method_signature, '"add(uint64,uint64)uint64"'),
+ b"\xfe\x6b\xdf\x69",
+ ),
+ (TealOp(None, Op.method_signature, '"empty()void"'), b"\xa8\x8c\x26\xa5"),
+ (
+ TealOp(None, Op.method_signature, '"payment(pay,uint64)bool"'),
+ b"\x3e\x3b\x3d\x28",
+ ),
+ (
+ TealOp(
+ None,
+ Op.method_signature,
+ '"referenceTest(account,application,account,asset,account,asset,asset,application,application)uint8[9]"',
+ ),
+ b"\x0d\xf0\x05\x0f",
+ ),
+ ]
+
+ for op, expected in tests:
+ actual = extractMethodSigValue(op)
+ assert actual == expected
+
+
def test_createConstantBlocks_empty():
ops = []
@@ -184,12 +223,14 @@ def test_createConstantBlocks_pushbytes():
ops = [
TealOp(None, Op.byte, "0x0102"),
TealOp(None, Op.byte, "0x0103"),
+ TealOp(None, Op.method_signature, '"empty()void"'),
TealOp(None, Op.concat),
]
expected = [
TealOp(None, Op.pushbytes, "0x0102", "//", "0x0102"),
TealOp(None, Op.pushbytes, "0x0103", "//", "0x0103"),
+ TealOp(None, Op.pushbytes, "0xa88c26a5", "//", '"empty()void"'),
TealOp(None, Op.concat),
]
@@ -240,6 +281,9 @@ def test_createConstantBlocks_byteblock_multiple():
None, Op.addr, "WSJHNPJ6YCLX5K4GUMQ4ISPK3ABMS3AL3F6CSVQTCUI5F4I65PWEMCWT3M"
),
TealOp(None, Op.concat),
+ TealOp(None, Op.method_signature, '"closeOut()string"'),
+ TealOp(None, Op.concat),
+ TealOp(None, Op.byte, "base64(qfQrPQ==)"),
]
expected = [
@@ -249,6 +293,7 @@ def test_createConstantBlocks_byteblock_multiple():
"0x0102",
"0x74657374",
"0xb49276bd3ec0977eab86a321c449ead802c96c0bd97c2956131511d2f11eebec",
+ "0xa9f42b3d",
),
TealOp(None, Op.bytec_0, "//", "0x0102"),
TealOp(None, Op.bytec_0, "//", "base64(AQI=)"),
@@ -273,6 +318,9 @@ def test_createConstantBlocks_byteblock_multiple():
"WSJHNPJ6YCLX5K4GUMQ4ISPK3ABMS3AL3F6CSVQTCUI5F4I65PWEMCWT3M",
),
TealOp(None, Op.concat),
+ TealOp(None, Op.bytec_3, "//", '"closeOut()string"'),
+ TealOp(None, Op.concat),
+ TealOp(None, Op.bytec_3, "//", "base64(qfQrPQ==)"),
]
actual = createConstantBlocks(ops)
| Use the `method` pseudo-op when routing to ABI methods
## Problem
Currently the `method` pseudo-op which converts an ABI method's signature to its selector is not available in PyTEAL. Utilizing this method, should make it easier to understand and debug ABI apps that are written in PyTEAL.
## Solution (at 30,000 feet)
Introduce `Op.method(a_method)` that takes the ABI method `a_method` with signature `THE_SIGNATURE` and when converted to TEAL has the effect of creating the following snippet:
```
method "THE_SIGNATURE"
```
That's just a shot in the dark, there are probably better ways to achieve this.
## Dependencies
<!-- Does the solution have any team or design dependencies? -->
## Urgency
Moderate - the bang for the buck on this one is pretty high. | 0.0 | a9c42e307e548cc2e6a1fa8b38149e240ec07ceb | [
"pyteal/ast/methodsig_test.py::test_method",
"pyteal/ast/methodsig_test.py::test_method_invalid",
"pyteal/compiler/constants_test.py::test_extractIntValue",
"pyteal/compiler/constants_test.py::test_extractBytesValue",
"pyteal/compiler/constants_test.py::test_extractAddrValue",
"pyteal/compiler/constants_test.py::test_extractMethodValue",
"pyteal/compiler/constants_test.py::test_createConstantBlocks_empty",
"pyteal/compiler/constants_test.py::test_createConstantBlocks_no_consts",
"pyteal/compiler/constants_test.py::test_createConstantBlocks_pushint",
"pyteal/compiler/constants_test.py::test_createConstantBlocks_intblock_single",
"pyteal/compiler/constants_test.py::test_createConstantBlocks_intblock_multiple",
"pyteal/compiler/constants_test.py::test_createConstantBlocks_intblock_pushint",
"pyteal/compiler/constants_test.py::test_createConstantBlocks_pushbytes",
"pyteal/compiler/constants_test.py::test_createConstantBlocks_byteblock_single",
"pyteal/compiler/constants_test.py::test_createConstantBlocks_byteblock_multiple",
"pyteal/compiler/constants_test.py::test_createConstantBlocks_byteblock_pushbytes",
"pyteal/compiler/constants_test.py::test_createConstantBlocks_all",
"pyteal/compiler/constants_test.py::test_createConstantBlocks_tmpl_int",
"pyteal/compiler/constants_test.py::test_createConstantBlocks_tmpl_int_mixed",
"pyteal/compiler/constants_test.py::test_createConstantBlocks_tmpl_bytes",
"pyteal/compiler/constants_test.py::test_createConstantBlocks_tmpl_bytes_mixed",
"pyteal/compiler/constants_test.py::test_createConstantBlocks_tmpl_all",
"pyteal/compiler/constants_test.py::test_createConstantBlocks_intc",
"pyteal/compiler/constants_test.py::test_createConstantBlocks_small_constant"
]
| []
| {
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2021-12-30 17:36:20+00:00 | mit | 1,038 |
|
algorand__pyteal-234 | diff --git a/pyteal/compiler/subroutines.py b/pyteal/compiler/subroutines.py
index 1b5e63e..78bfa7a 100644
--- a/pyteal/compiler/subroutines.py
+++ b/pyteal/compiler/subroutines.py
@@ -94,7 +94,6 @@ def spillLocalSlotsDuringRecursion(
for subroutine, reentryPoints in recursivePoints.items():
slots = list(sorted(slot for slot in localSlots[subroutine]))
- numArgs = subroutine.argumentCount()
if len(reentryPoints) == 0 or len(slots) == 0:
# no need to spill slots
@@ -107,13 +106,26 @@ def spillLocalSlotsDuringRecursion(
before: List[TealComponent] = []
after: List[TealComponent] = []
- if len(reentryPoints.intersection(stmt.getSubroutines())) != 0:
+ calledSubroutines = stmt.getSubroutines()
+ # the only opcode that references subroutines is callsub, and it should only ever
+ # reference one subroutine at a time
+ assert (
+ len(calledSubroutines) <= 1
+ ), "Multiple subroutines are called from the same TealComponent"
+
+ reentrySubroutineCalls = list(reentryPoints.intersection(calledSubroutines))
+ if len(reentrySubroutineCalls) != 0:
# A subroutine is being called which may reenter the current subroutine, so insert
# ops to spill local slots to the stack before calling the subroutine and also to
# restore the local slots after returning from the subroutine. This prevents a
# reentry into the current subroutine from modifying variables we are currently
# using.
+ # reentrySubroutineCalls should have a length of 1, since calledSubroutines has a
+ # maximum length of 1
+ reentrySubroutineCall = reentrySubroutineCalls[0]
+ numArgs = reentrySubroutineCall.argumentCount()
+
digArgs = True
coverSpilledSlots = False
uncoverArgs = False
| algorand/pyteal | f2598da3dec6041051df4442cd9cc7e3c373674d | diff --git a/pyteal/compiler/compiler_test.py b/pyteal/compiler/compiler_test.py
index 7a36fcf..74b5799 100644
--- a/pyteal/compiler/compiler_test.py
+++ b/pyteal/compiler/compiler_test.py
@@ -1153,7 +1153,7 @@ retsub
assert actual == expected
-def test_compile_subroutine_mutually_recursive():
+def test_compile_subroutine_mutually_recursive_4():
@Subroutine(TealType.uint64)
def isEven(i: Expr) -> Expr:
return If(i == Int(0), Int(1), Not(isOdd(i - Int(1))))
@@ -1285,6 +1285,147 @@ retsub
assert actual == expected
+def test_compile_subroutine_mutually_recursive_different_arg_count_4():
+ @Subroutine(TealType.uint64)
+ def factorial(i: Expr) -> Expr:
+ return If(
+ i <= Int(1),
+ Int(1),
+ factorial_intermediate(i - Int(1), Bytes("inconsequential")) * i,
+ )
+
+ @Subroutine(TealType.uint64)
+ def factorial_intermediate(i: Expr, j: Expr) -> Expr:
+ return Seq(Pop(j), factorial(i))
+
+ program = Return(factorial(Int(4)) == Int(24))
+
+ expected = """#pragma version 4
+int 4
+callsub factorial_0
+int 24
+==
+return
+
+// factorial
+factorial_0:
+store 0
+load 0
+int 1
+<=
+bnz factorial_0_l2
+load 0
+int 1
+-
+byte "inconsequential"
+load 0
+dig 2
+dig 2
+callsub factorialintermediate_1
+swap
+store 0
+swap
+pop
+swap
+pop
+load 0
+*
+b factorial_0_l3
+factorial_0_l2:
+int 1
+factorial_0_l3:
+retsub
+
+// factorial_intermediate
+factorialintermediate_1:
+store 2
+store 1
+load 2
+pop
+load 1
+load 1
+load 2
+dig 2
+callsub factorial_0
+store 1
+store 2
+load 1
+swap
+store 1
+swap
+pop
+retsub
+ """.strip()
+ actual = compileTeal(program, Mode.Application, version=4, assembleConstants=False)
+ assert actual == expected
+
+
+def test_compile_subroutine_mutually_recursive_different_arg_count_5():
+ @Subroutine(TealType.uint64)
+ def factorial(i: Expr) -> Expr:
+ return If(
+ i <= Int(1),
+ Int(1),
+ factorial_intermediate(i - Int(1), Bytes("inconsequential")) * i,
+ )
+
+ @Subroutine(TealType.uint64)
+ def factorial_intermediate(i: Expr, j: Expr) -> Expr:
+ return Seq(Log(j), factorial(i))
+
+ program = Return(factorial(Int(4)) == Int(24))
+
+ expected = """#pragma version 5
+int 4
+callsub factorial_0
+int 24
+==
+return
+
+// factorial
+factorial_0:
+store 0
+load 0
+int 1
+<=
+bnz factorial_0_l2
+load 0
+int 1
+-
+byte "inconsequential"
+load 0
+cover 2
+callsub factorialintermediate_1
+swap
+store 0
+load 0
+*
+b factorial_0_l3
+factorial_0_l2:
+int 1
+factorial_0_l3:
+retsub
+
+// factorial_intermediate
+factorialintermediate_1:
+store 2
+store 1
+load 2
+log
+load 1
+load 1
+load 2
+uncover 2
+callsub factorial_0
+cover 2
+store 2
+store 1
+retsub
+ """.strip()
+ actual = compileTeal(program, Mode.Application, version=5, assembleConstants=False)
+ assert actual == expected
+
+
def test_compile_loop_in_subroutine():
@Subroutine(TealType.none)
def setState(value: Expr) -> Expr:
| Incorrect TEAL code produced for mutually recursive subroutines with different argument counts
## Summary
There is a bug in the implementation of [`spillLocalSlotsDuringRecursion`](https://github.com/algorand/pyteal/blob/f2598da3dec6041051df4442cd9cc7e3c373674d/pyteal/compiler/subroutines.py#L65-L204) that causes incorrect TEAL code to be generated for mutually recursive subroutines with different argument counts.
The problem is that the variable `numArgs` is the number of arguments of the calling subroutine, **NOT** the number of arguments in the called subroutine. This will cause incorrect slot spilling and restoring code to be generated, in some cases producing an infinite recursive loop.
## Example
Below is an example of a PyTeal that's affected by this bug:
```python
from pyteal import *
@Subroutine(TealType.uint64)
def factorial(i: Expr) -> Expr:
return If(i <= Int(1), Int(1), factorial_intermediate(i - Int(1), Bytes("inconsequential")) * i)
@Subroutine(TealType.uint64)
def factorial_intermediate(i: Expr, j: Expr) -> Expr:
return Seq(Log(j), factorial(i))
program = Return(factorial(Int(4)) == Int(24))
```
It currently produces the following **incorrect** TEAL code (with PyTeal v0.10.0), with my commentary:
```
#pragma version 5
int 4
callsub factorial_0
int 24
==
return
// factorial
factorial_0:
store 0
load 0
int 1
<=
bnz factorial_0_l2
load 0
int 1
-
byte "inconsequential"
load 0
swap // the compiler thinks factorialintermediate_1 only takes 1 argument, but it takes 2. Only one value is uncovered
callsub factorialintermediate_1
swap
store 0
load 0
*
b factorial_0_l3
factorial_0_l2:
int 1
factorial_0_l3:
retsub
// factorial_intermediate
factorialintermediate_1:
store 2
store 1
load 2
log
load 1
load 1
load 2
uncover 3
uncover 3 // the compiler thinks factorial_0 takes 2 arguments, but it only takes 1. More values are uncovered than needed
callsub factorial_0
cover 2
store 2
store 1
retsub
``` | 0.0 | f2598da3dec6041051df4442cd9cc7e3c373674d | [
"pyteal/compiler/compiler_test.py::test_compile_subroutine_mutually_recursive_different_arg_count_4",
"pyteal/compiler/compiler_test.py::test_compile_subroutine_mutually_recursive_different_arg_count_5"
]
| [
"pyteal/compiler/compiler_test.py::test_compile_single",
"pyteal/compiler/compiler_test.py::test_compile_sequence",
"pyteal/compiler/compiler_test.py::test_compile_branch",
"pyteal/compiler/compiler_test.py::test_compile_branch_multiple",
"pyteal/compiler/compiler_test.py::test_empty_branch",
"pyteal/compiler/compiler_test.py::test_compile_mode",
"pyteal/compiler/compiler_test.py::test_compile_version_invalid",
"pyteal/compiler/compiler_test.py::test_compile_version_2",
"pyteal/compiler/compiler_test.py::test_compile_version_default",
"pyteal/compiler/compiler_test.py::test_compile_version_3",
"pyteal/compiler/compiler_test.py::test_compile_version_4",
"pyteal/compiler/compiler_test.py::test_compile_version_5",
"pyteal/compiler/compiler_test.py::test_compile_version_6",
"pyteal/compiler/compiler_test.py::test_slot_load_before_store",
"pyteal/compiler/compiler_test.py::test_assign_scratch_slots",
"pyteal/compiler/compiler_test.py::test_scratchvar_double_assign_invalid",
"pyteal/compiler/compiler_test.py::test_assembleConstants",
"pyteal/compiler/compiler_test.py::test_compile_while",
"pyteal/compiler/compiler_test.py::test_compile_for",
"pyteal/compiler/compiler_test.py::test_compile_break",
"pyteal/compiler/compiler_test.py::test_compile_continue",
"pyteal/compiler/compiler_test.py::test_compile_continue_break_nested",
"pyteal/compiler/compiler_test.py::test_compile_subroutine_unsupported",
"pyteal/compiler/compiler_test.py::test_compile_subroutine_no_return",
"pyteal/compiler/compiler_test.py::test_compile_subroutine_with_return",
"pyteal/compiler/compiler_test.py::test_compile_subroutine_many_args",
"pyteal/compiler/compiler_test.py::test_compile_subroutine_recursive",
"pyteal/compiler/compiler_test.py::test_compile_subroutine_recursive_5",
"pyteal/compiler/compiler_test.py::test_compile_subroutine_recursive_multiple_args",
"pyteal/compiler/compiler_test.py::test_compile_subroutine_recursive_multiple_args_5",
"pyteal/compiler/compiler_test.py::test_compile_subroutine_mutually_recursive_4",
"pyteal/compiler/compiler_test.py::test_compile_subroutine_mutually_recursive_5",
"pyteal/compiler/compiler_test.py::test_compile_loop_in_subroutine",
"pyteal/compiler/compiler_test.py::test_compile_subroutine_invalid_name",
"pyteal/compiler/compiler_test.py::test_compile_subroutine_assemble_constants",
"pyteal/compiler/compiler_test.py::test_compile_wide_ratio"
]
| {
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
} | 2022-03-05 00:18:42+00:00 | mit | 1,039 |
|
algorand__pyteal-29 | diff --git a/pyteal/__init__.py b/pyteal/__init__.py
index b72a01d..ff1f2fb 100644
--- a/pyteal/__init__.py
+++ b/pyteal/__init__.py
@@ -1,7 +1,19 @@
from .ast import *
+from .ast import __all__ as ast_all
from .ir import *
+from .ir import __all__ as ir_all
from .compiler import compileTeal
from .types import TealType
from .errors import TealInternalError, TealTypeError, TealInputError
from .util import execute
from .config import MAX_GROUP_SIZE
+
+__all__ = ast_all + ir_all + [
+ "compileTeal",
+ "TealType",
+ "TealInternalError",
+ "TealTypeError",
+ "TealInputError",
+ "execute",
+ "MAX_GROUP_SIZE",
+]
diff --git a/pyteal/ast/__init__.py b/pyteal/ast/__init__.py
index 9f635c9..362d006 100644
--- a/pyteal/ast/__init__.py
+++ b/pyteal/ast/__init__.py
@@ -41,3 +41,76 @@ from .assert_ import Assert
# misc
from .scratch import ScratchSlot, ScratchLoad, ScratchStore
from .maybe import MaybeValue
+
+__all__ = [
+ "Expr",
+ "LeafExpr",
+ "Addr",
+ "Bytes",
+ "Err",
+ "Int",
+ "EnumInt",
+ "Arg",
+ "TxnType",
+ "TxnField",
+ "TxnExpr",
+ "TxnaExpr",
+ "TxnArray",
+ "TxnObject",
+ "Txn",
+ "GtxnExpr",
+ "GtxnaExpr",
+ "TxnGroup",
+ "Gtxn",
+ "Global",
+ "GlobalField",
+ "App",
+ "AppField",
+ "OnComplete",
+ "AssetHolding",
+ "AssetParam",
+ "Array",
+ "Tmpl",
+ "Nonce",
+ "UnaryExpr",
+ "Btoi",
+ "Itob",
+ "Len",
+ "Sha256",
+ "Sha512_256",
+ "Keccak256",
+ "Not",
+ "BitwiseNot",
+ "Pop",
+ "Return",
+ "Balance",
+ "BinaryExpr",
+ "Add",
+ "Minus",
+ "Mul",
+ "Div",
+ "BitwiseAnd",
+ "BitwiseOr",
+ "BitwiseXor",
+ "Mod",
+ "Eq",
+ "Neq",
+ "Lt",
+ "Le",
+ "Gt",
+ "Ge",
+ "Ed25519Verify",
+ "Substring",
+ "NaryExpr",
+ "And",
+ "Or",
+ "Concat",
+ "If",
+ "Cond",
+ "Seq",
+ "Assert",
+ "ScratchSlot",
+ "ScratchLoad",
+ "ScratchStore",
+ "MaybeValue",
+]
diff --git a/pyteal/ir/__init__.py b/pyteal/ir/__init__.py
index e6aa604..ce498a9 100644
--- a/pyteal/ir/__init__.py
+++ b/pyteal/ir/__init__.py
@@ -3,3 +3,11 @@ from .ops import Op, Mode
from .tealcomponent import TealComponent
from .tealop import TealOp
from .teallabel import TealLabel
+
+__all__ = [
+ "Op",
+ "Mode",
+ "TealComponent",
+ "TealOp",
+ "TealLabel",
+]
| algorand/pyteal | 9e1ba1bb4ab32f5a1a43bd8084b31aee6e714853 | diff --git a/pyteal/ast/gtxn_test.py b/pyteal/ast/gtxn_test.py
index 33cbdf0..e27f5d9 100644
--- a/pyteal/ast/gtxn_test.py
+++ b/pyteal/ast/gtxn_test.py
@@ -1,6 +1,8 @@
import pytest
from .. import *
+# this is not necessary but mypy complains if it's not included
+from .. import MAX_GROUP_SIZE
GTXN_RANGE = range(MAX_GROUP_SIZE)
diff --git a/tests/module_test.py b/tests/module_test.py
new file mode 100644
index 0000000..3894542
--- /dev/null
+++ b/tests/module_test.py
@@ -0,0 +1,6 @@
+from pyteal import *
+
+def test_export_int():
+ from pyteal import ast
+
+ assert int != ast.int
| The module `pyteal.ast.int` shadows the Python keyword `int`
When importing `pyteal` by `from pyteal import *` (as in the documentation examples), the module `pyteal.ast.int` shadows the Python keyword `int`.
In particular, we have
```
$ python3 -c "from pyteal import *; print(int(5))"
Traceback (most recent call last):
File "<string>", line 1, in <module>
TypeError: 'module' object is not callable
```
while:
```
$ python3 -c "print(int(5))"
5
```
## Requirements
Stop submodules from being exported, small change. They shouldn't be exported in the first place
## Urgency
Could be a major blocker for some people and it's a simple change that we can just knock out. | 0.0 | 9e1ba1bb4ab32f5a1a43bd8084b31aee6e714853 | [
"tests/module_test.py::test_export_int"
]
| [
"pyteal/ast/gtxn_test.py::test_gtxn_invalid",
"pyteal/ast/gtxn_test.py::test_gtxn_sender",
"pyteal/ast/gtxn_test.py::test_gtxn_fee",
"pyteal/ast/gtxn_test.py::test_gtxn_first_valid",
"pyteal/ast/gtxn_test.py::test_gtxn_last_valid",
"pyteal/ast/gtxn_test.py::test_gtxn_note",
"pyteal/ast/gtxn_test.py::test_gtxn_lease",
"pyteal/ast/gtxn_test.py::test_gtxn_receiver",
"pyteal/ast/gtxn_test.py::test_gtxn_amount",
"pyteal/ast/gtxn_test.py::test_gtxn_close_remainder_to",
"pyteal/ast/gtxn_test.py::test_gtxn_vote_pk",
"pyteal/ast/gtxn_test.py::test_gtxn_selection_pk",
"pyteal/ast/gtxn_test.py::test_gtxn_vote_first",
"pyteal/ast/gtxn_test.py::test_gtxn_vote_last",
"pyteal/ast/gtxn_test.py::test_gtxn_vote_key_dilution",
"pyteal/ast/gtxn_test.py::test_gtxn_type",
"pyteal/ast/gtxn_test.py::test_gtxn_type_enum",
"pyteal/ast/gtxn_test.py::test_gtxn_xfer_asset",
"pyteal/ast/gtxn_test.py::test_gtxn_asset_amount",
"pyteal/ast/gtxn_test.py::test_gtxn_asset_sender",
"pyteal/ast/gtxn_test.py::test_gtxn_asset_receiver",
"pyteal/ast/gtxn_test.py::test_gtxn_asset_close_to",
"pyteal/ast/gtxn_test.py::test_gtxn_group_index",
"pyteal/ast/gtxn_test.py::test_gtxn_id",
"pyteal/ast/gtxn_test.py::test_txn_application_id",
"pyteal/ast/gtxn_test.py::test_txn_on_completion",
"pyteal/ast/gtxn_test.py::test_txn_application_args",
"pyteal/ast/gtxn_test.py::test_txn_application_args_length",
"pyteal/ast/gtxn_test.py::test_txn_accounts",
"pyteal/ast/gtxn_test.py::test_txn_accounts_length",
"pyteal/ast/gtxn_test.py::test_txn_approval_program",
"pyteal/ast/gtxn_test.py::test_txn_clear_state_program",
"pyteal/ast/gtxn_test.py::test_txn_rekey_to",
"pyteal/ast/gtxn_test.py::test_txn_config_asset",
"pyteal/ast/gtxn_test.py::test_txn_config_asset_total",
"pyteal/ast/gtxn_test.py::test_txn_config_asset_decimals",
"pyteal/ast/gtxn_test.py::test_txn_config_asset_default_frozen",
"pyteal/ast/gtxn_test.py::test_txn_config_asset_unit_name",
"pyteal/ast/gtxn_test.py::test_txn_config_asset_name",
"pyteal/ast/gtxn_test.py::test_txn_config_asset_url",
"pyteal/ast/gtxn_test.py::test_txn_config_asset_metadata_hash",
"pyteal/ast/gtxn_test.py::test_txn_config_asset_manager",
"pyteal/ast/gtxn_test.py::test_txn_config_asset_reserve",
"pyteal/ast/gtxn_test.py::test_txn_config_asset_freeze",
"pyteal/ast/gtxn_test.py::test_txn_config_asset_clawback",
"pyteal/ast/gtxn_test.py::test_txn_freeze_asset",
"pyteal/ast/gtxn_test.py::test_txn_freeze_asset_account",
"pyteal/ast/gtxn_test.py::test_txn_freeze_asset_frozen"
]
| {
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
} | 2020-10-27 15:28:03+00:00 | mit | 1,040 |
|
algorand__pyteal-33 | diff --git a/.gitignore b/.gitignore
index be31d1f..daf823c 100644
--- a/.gitignore
+++ b/.gitignore
@@ -125,3 +125,6 @@ dmypy.json
# Pyre type checker
.pyre/
+
+# IDE
+.idea
diff --git a/docs/index.rst b/docs/index.rst
index 9853a51..c5f1a36 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -39,6 +39,7 @@ PyTeal **hasn't been security audited**. Use it at your own risk.
byte_expression
accessing_transaction_field
crypto
+ scratch
control_structures
state
diff --git a/docs/scratch.rst b/docs/scratch.rst
new file mode 100644
index 0000000..752f138
--- /dev/null
+++ b/docs/scratch.rst
@@ -0,0 +1,33 @@
+.. _scratch:
+
+Scratch Space
+========================
+
+`Scratch space <https://developer.algorand.org/docs/reference/teal/specification/#scratch-space>`_
+is a temporary place to store values for later use in your program. It is temporary because any
+changes to scratch space do not persist beyond the current tranasaction. Scratch space can be used
+in both Application and Signature mode.
+
+Scratch space consists of 256 scratch slots, each capable of storing one integer or byte slice. When
+using the :any:`ScratchVar` class to work with scratch space, a slot is automatically assigned to
+each variable.
+
+Writing and Reading
+~~~~~~~~~~~~~~~~~~~~~~
+
+To write to scratch space, first create a :any:`ScratchVar` object and pass in the :any:`TealType`
+of the values that you will store there. It is possible to create a :any:`ScratchVar` that can store
+both integers and byte slices by passing no arguments to the :any:`ScratchVar` constructor, but note
+that no type checking takes places in this situation.
+
+To write or read values, use the corresponding :any:`ScratchVar.store` or :any:`ScratchVar.load` methods.
+
+For example:
+
+.. code-block:: python
+
+ myvar = ScratchVar(TealType.uint64)
+ program = Seq([
+ myvar.store(Int(5)),
+ Assert(myvar.load() == Int(5))
+ ])
diff --git a/pyteal/ast/__init__.py b/pyteal/ast/__init__.py
index 362d006..122e92a 100644
--- a/pyteal/ast/__init__.py
+++ b/pyteal/ast/__init__.py
@@ -39,7 +39,8 @@ from .seq import Seq
from .assert_ import Assert
# misc
-from .scratch import ScratchSlot, ScratchLoad, ScratchStore
+from .scratch import ScratchSlot, ScratchLoad, ScratchStore, ScratchStackStore
+from .scratchvar import ScratchVar
from .maybe import MaybeValue
__all__ = [
@@ -112,5 +113,7 @@ __all__ = [
"ScratchSlot",
"ScratchLoad",
"ScratchStore",
+ "ScratchStackStore",
+ "ScratchVar",
"MaybeValue",
]
diff --git a/pyteal/ast/scratch.py b/pyteal/ast/scratch.py
index e12077c..6b8795c 100644
--- a/pyteal/ast/scratch.py
+++ b/pyteal/ast/scratch.py
@@ -10,11 +10,19 @@ class ScratchSlot:
self.id = ScratchSlot.slotId
ScratchSlot.slotId += 1
- def store(self):
- """Get an expression to store a value in this slot."""
- return ScratchStore(self)
+ def store(self, value: Expr = None) -> Expr:
+ """Get an expression to store a value in this slot.
+
+ Args:
+ value (optional): The value to store in this slot. If not included, the last value on
+ the stack will be stored. NOTE: storing the last value on the stack breaks the typical
+ semantics of PyTeal, only use if you know what you're doing.
+ """
+ if value is not None:
+ return ScratchStore(self, value)
+ return ScratchStackStore(self)
- def load(self, type: TealType = TealType.anytype):
+ def load(self, type: TealType = TealType.anytype) -> 'ScratchLoad':
"""Get an expression to load a value from this slot.
Args:
@@ -66,23 +74,53 @@ ScratchLoad.__module__ = "pyteal"
class ScratchStore(Expr):
"""Expression to store a value in scratch space."""
- def __init__(self, slot: ScratchSlot):
+ def __init__(self, slot: ScratchSlot, value: Expr):
"""Create a new ScratchStore expression.
Args:
slot: The slot to store the value in.
+ value: The value to store.
"""
self.slot = slot
+ self.value = value
def __str__(self):
- return "(Store {})".format(self.slot.__str__())
+ return "(Store {} {})".format(str(self.slot), str(self.value))
def __teal__(self):
from ..ir import TealOp, Op, TealBlock
op = TealOp(Op.store, self.slot)
- return TealBlock.FromOp(op)
+ return TealBlock.FromOp(op, self.value)
def type_of(self):
return TealType.none
ScratchStore.__module__ = "pyteal"
+
+class ScratchStackStore(Expr):
+ """Expression to store a value from the stack in scratch space.
+
+ NOTE: This expression breaks the typical semantics of PyTeal, only use if you know what you're
+ doing.
+ """
+
+ def __init__(self, slot: ScratchSlot):
+ """Create a new ScratchStackStore expression.
+
+ Args:
+ slot: The slot to store the value in.
+ """
+ self.slot = slot
+
+ def __str__(self):
+ return "(StackStore {})".format(str(self.slot))
+
+ def __teal__(self):
+ from ..ir import TealOp, Op, TealBlock
+ op = TealOp(Op.store, self.slot)
+ return TealBlock.FromOp(op)
+
+ def type_of(self):
+ return TealType.none
+
+ScratchStackStore.__module__ = "pyteal"
diff --git a/pyteal/ast/scratchvar.py b/pyteal/ast/scratchvar.py
new file mode 100644
index 0000000..888bded
--- /dev/null
+++ b/pyteal/ast/scratchvar.py
@@ -0,0 +1,47 @@
+from ..types import TealType, require_type
+from .expr import Expr
+from .scratch import ScratchSlot, ScratchLoad
+
+class ScratchVar:
+ """
+ Interface around Scratch space, similiar to get/put local/global state
+
+ Example:
+ .. code-block:: python
+
+ myvar = ScratchVar(TealType.uint64)
+ Seq([
+ myvar.store(Int(5)),
+ Assert(myvar.load() == Int(5))
+ ])
+ """
+
+ def __init__(self, type: TealType = TealType.anytype):
+ """Create a new ScratchVar with an optional type.
+
+ Args:
+ type (optional): The type that this variable can hold. An error will be thrown if an
+ expression with an incompatiable type is stored in this variable. Defaults to
+ TealType.anytype.
+ """
+ self.slot = ScratchSlot()
+ self.type = type
+
+ def storage_type(self) -> TealType:
+ """Get the type of expressions that can be stored in this ScratchVar."""
+ return self.type
+
+ def store(self, value: Expr) -> Expr:
+ """Store value in Scratch Space
+
+ Args:
+ value: The value to store. Must conform to this ScratchVar's type.
+ """
+ require_type(value.type_of(), self.type)
+ return self.slot.store(value)
+
+ def load(self) -> ScratchLoad:
+ """Load value from Scratch Space"""
+ return self.slot.load(self.type)
+
+ScratchVar.__module__ = "pyteal"
| algorand/pyteal | eb2e689c48292571ee10bc334558a4b339e50667 | diff --git a/pyteal/ast/scratch_test.py b/pyteal/ast/scratch_test.py
index 8baa280..10a65ce 100644
--- a/pyteal/ast/scratch_test.py
+++ b/pyteal/ast/scratch_test.py
@@ -8,7 +8,8 @@ def test_scratch_slot():
assert slot.__hash__() == slot.__hash__()
assert slot != ScratchSlot()
- assert slot.store().__teal__()[0] == ScratchStore(slot).__teal__()[0]
+ assert slot.store().__teal__()[0] == ScratchStackStore(slot).__teal__()[0]
+ assert slot.store(Int(1)).__teal__()[0] == ScratchStore(slot, Int(1)).__teal__()[0]
assert slot.load().type_of() == TealType.anytype
assert slot.load(TealType.uint64).type_of() == TealType.uint64
@@ -42,8 +43,24 @@ def test_scratch_load_type():
assert actual == expected
def test_scratch_store():
+ for value in (Int(1), Bytes("test"), App.globalGet(Bytes("key")), If(Int(1), Int(2), Int(3))):
+ slot = ScratchSlot()
+ expr = ScratchStore(slot, value)
+ assert expr.type_of() == TealType.none
+
+ expected, valueEnd = value.__teal__()
+ storeBlock = TealSimpleBlock([
+ TealOp(Op.store, slot)
+ ])
+ valueEnd.setNextBlock(storeBlock)
+
+ actual, _ = expr.__teal__()
+
+ assert actual == expected
+
+def test_scratch_stack_store():
slot = ScratchSlot()
- expr = ScratchStore(slot)
+ expr = ScratchStackStore(slot)
assert expr.type_of() == TealType.none
expected = TealSimpleBlock([
diff --git a/pyteal/ast/scratchvar_test.py b/pyteal/ast/scratchvar_test.py
new file mode 100644
index 0000000..f4fa03d
--- /dev/null
+++ b/pyteal/ast/scratchvar_test.py
@@ -0,0 +1,64 @@
+import pytest
+
+from .. import *
+
+
+def test_scratchvar_type():
+ myvar_default = ScratchVar()
+ assert myvar_default.storage_type() == TealType.anytype
+ assert myvar_default.store(Bytes("value")).type_of() == TealType.none
+ assert myvar_default.load().type_of() == TealType.anytype
+
+ with pytest.raises(TealTypeError):
+ myvar_default.store(Pop(Int(1)))
+
+ myvar_int = ScratchVar(TealType.uint64)
+ assert myvar_int.storage_type() == TealType.uint64
+ assert myvar_int.store(Int(1)).type_of() == TealType.none
+ assert myvar_int.load().type_of() == TealType.uint64
+
+ with pytest.raises(TealTypeError):
+ myvar_int.store(Bytes("value"))
+
+ with pytest.raises(TealTypeError):
+ myvar_int.store(Pop(Int(1)))
+
+ myvar_bytes = ScratchVar(TealType.bytes)
+ assert myvar_bytes.storage_type() == TealType.bytes
+ assert myvar_bytes.store(Bytes("value")).type_of() == TealType.none
+ assert myvar_bytes.load().type_of() == TealType.bytes
+
+ with pytest.raises(TealTypeError):
+ myvar_bytes.store(Int(0))
+
+ with pytest.raises(TealTypeError):
+ myvar_bytes.store(Pop(Int(1)))
+
+def test_scratchvar_store():
+ myvar = ScratchVar(TealType.bytes)
+ expr = myvar.store(Bytes("value"))
+
+ expected = TealSimpleBlock([
+ TealOp(Op.byte, "\"value\""),
+ TealOp(Op.store, myvar.slot),
+ ])
+
+ actual, _ = expr.__teal__()
+ actual.addIncoming()
+ actual = TealBlock.NormalizeBlocks(actual)
+
+ assert actual == expected
+
+def test_scratchvar_load():
+ myvar = ScratchVar()
+ expr = myvar.load()
+
+ expected = TealSimpleBlock([
+ TealOp(Op.load, myvar.slot)
+ ])
+
+ actual, _ = expr.__teal__()
+ actual.addIncoming()
+ actual = TealBlock.NormalizeBlocks(actual)
+
+ assert actual == expected
| Allow programs to use scratch space
Hey PyTEAL maintainers,
first of all thank you for this amazing library.
Can ask to add `ScratchSlot` examples to the documentation? Or at least here as a comment so I will be able to prepare a PR with documentation update. Thank you! | 0.0 | eb2e689c48292571ee10bc334558a4b339e50667 | [
"pyteal/ast/scratch_test.py::test_scratch_slot",
"pyteal/ast/scratch_test.py::test_scratch_store",
"pyteal/ast/scratch_test.py::test_scratch_stack_store",
"pyteal/ast/scratchvar_test.py::test_scratchvar_type",
"pyteal/ast/scratchvar_test.py::test_scratchvar_store",
"pyteal/ast/scratchvar_test.py::test_scratchvar_load"
]
| [
"pyteal/ast/scratch_test.py::test_scratch_load_default",
"pyteal/ast/scratch_test.py::test_scratch_load_type"
]
| {
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2020-11-19 16:30:25+00:00 | mit | 1,041 |
|
algorand__pyteal-45 | diff --git a/pyteal/ast/binaryexpr.py b/pyteal/ast/binaryexpr.py
index 5e2e01e..7664a4f 100644
--- a/pyteal/ast/binaryexpr.py
+++ b/pyteal/ast/binaryexpr.py
@@ -17,7 +17,7 @@ class BinaryExpr(Expr):
return TealBlock.FromOp(TealOp(self.op), self.argLeft, self.argRight)
def __str__(self):
- return "({} {} {})".format(self.op.value, self.argLeft, self.argRight)
+ return "({} {} {})".format(self.op, self.argLeft, self.argRight)
def type_of(self):
return self.outputType
diff --git a/pyteal/ast/naryexpr.py b/pyteal/ast/naryexpr.py
index 50da97a..1a4bb92 100644
--- a/pyteal/ast/naryexpr.py
+++ b/pyteal/ast/naryexpr.py
@@ -39,7 +39,7 @@ class NaryExpr(Expr):
return start, end
def __str__(self):
- ret_str = "(" + self.op.value,
+ ret_str = "(" + str(self.op),
for a in self.args:
ret_str += " " + a.__str__()
ret_str += ")"
diff --git a/pyteal/ast/tmpl.py b/pyteal/ast/tmpl.py
index 91f956e..635be0d 100644
--- a/pyteal/ast/tmpl.py
+++ b/pyteal/ast/tmpl.py
@@ -13,7 +13,7 @@ class Tmpl(LeafExpr):
self.name = name
def __str__(self):
- return "(Tmpl {} {})".format(self.op.value, self.name)
+ return "(Tmpl {} {})".format(self.op, self.name)
def __teal__(self):
op = TealOp(self.op, self.name)
diff --git a/pyteal/ast/unaryexpr.py b/pyteal/ast/unaryexpr.py
index e9879f8..faa171b 100644
--- a/pyteal/ast/unaryexpr.py
+++ b/pyteal/ast/unaryexpr.py
@@ -15,7 +15,7 @@ class UnaryExpr(Expr):
return TealBlock.FromOp(TealOp(self.op), self.arg)
def __str__(self):
- return "({} {})".format(self.op.value, self.arg)
+ return "({} {})".format(self.op, self.arg)
def type_of(self):
return self.outputType
diff --git a/pyteal/compiler.py b/pyteal/compiler.py
index 91643d0..12cb64b 100644
--- a/pyteal/compiler.py
+++ b/pyteal/compiler.py
@@ -6,6 +6,10 @@ from .ir import Op, Mode, TealComponent, TealOp, TealLabel, TealBlock, TealSimpl
from .errors import TealInputError, TealInternalError
from .config import NUM_SLOTS
+MAX_TEAL_VERSION = 2
+MIN_TEAL_VERSION = 2
+DEFAULT_TEAL_VERSION = 2
+
def sortBlocks(start: TealBlock) -> List[TealBlock]:
"""Topologically sort the graph which starts with the input TealBlock.
@@ -95,6 +99,22 @@ def flattenBlocks(blocks: List[TealBlock]) -> List[TealComponent]:
return teal
+def verifyOpsForVersion(teal: List[TealComponent], version: int):
+ """Verify that all TEAL operations are allowed in the specified version.
+
+ Args:
+ teal: Code to check.
+ mode: The version to check against.
+
+ Raises:
+ TealInputError: if teal contains an operation not allowed in version.
+ """
+ for stmt in teal:
+ if isinstance(stmt, TealOp):
+ op = stmt.getOp()
+ if op.min_version > version:
+ raise TealInputError("Op not supported in TEAL version {}: {}".format(version, op))
+
def verifyOpsForMode(teal: List[TealComponent], mode: Mode):
"""Verify that all TEAL operations are allowed in mode.
@@ -109,14 +129,17 @@ def verifyOpsForMode(teal: List[TealComponent], mode: Mode):
if isinstance(stmt, TealOp):
op = stmt.getOp()
if not op.mode & mode:
- raise TealInputError("Op not supported in {} mode: {}".format(mode.name, op.value))
+ raise TealInputError("Op not supported in {} mode: {}".format(mode.name, op))
-def compileTeal(ast: Expr, mode: Mode) -> str:
+def compileTeal(ast: Expr, mode: Mode, version: int = DEFAULT_TEAL_VERSION) -> str:
"""Compile a PyTeal expression into TEAL assembly.
Args:
ast: The PyTeal expression to assemble.
mode: The mode of the program to assemble. Must be Signature or Application.
+ version (optional): The TEAL version used to assemble the program. This will determine which
+ expressions and fields are able to be used in the program and how expressions compile to
+ TEAL opcodes. Defaults to 2 if not included.
Returns:
A TEAL assembly program compiled from the input expression.
@@ -124,6 +147,9 @@ def compileTeal(ast: Expr, mode: Mode) -> str:
Raises:
TealInputError: if an operation in ast is not supported by the supplied mode.
"""
+ if not (MIN_TEAL_VERSION <= version <= MAX_TEAL_VERSION):
+ raise TealInputError("Unsupported TEAL version: {}. Excepted a number in the range [{}, {}]".format(version, MIN_TEAL_VERSION, MAX_TEAL_VERSION))
+
start, _ = ast.__teal__()
start.addIncoming()
start.validate()
@@ -134,6 +160,7 @@ def compileTeal(ast: Expr, mode: Mode) -> str:
order = sortBlocks(start)
teal = flattenBlocks(order)
+ verifyOpsForVersion(teal, version)
verifyOpsForMode(teal, mode)
slots = set()
diff --git a/pyteal/ir/ops.py b/pyteal/ir/ops.py
index 241eab1..cf26d77 100644
--- a/pyteal/ir/ops.py
+++ b/pyteal/ir/ops.py
@@ -1,3 +1,4 @@
+from typing import NamedTuple
from enum import Enum, Flag, auto
class Mode(Flag):
@@ -8,77 +9,84 @@ class Mode(Flag):
Mode.__module__ = "pyteal"
+OpType = NamedTuple('OpType', [('value', str), ('mode', Mode), ('min_version', int)])
+
class Op(Enum):
"""Enum of program opcodes."""
- err = "err", Mode.Signature | Mode.Application
- sha256 = "sha256", Mode.Signature | Mode.Application
- keccak256 = "keccak256", Mode.Signature | Mode.Application
- sha512_256 = "sha512_256", Mode.Signature | Mode.Application
- ed25519verify = "ed25519verify", Mode.Signature
- add = "+", Mode.Signature | Mode.Application
- minus = "-", Mode.Signature | Mode.Application
- div = "/", Mode.Signature | Mode.Application
- mul = "*", Mode.Signature | Mode.Application
- lt = "<", Mode.Signature | Mode.Application
- gt = ">", Mode.Signature | Mode.Application
- le = "<=", Mode.Signature | Mode.Application
- ge = ">=", Mode.Signature | Mode.Application
- logic_and = "&&", Mode.Signature | Mode.Application
- logic_or = "||", Mode.Signature | Mode.Application
- eq = "==", Mode.Signature | Mode.Application
- neq = "!=", Mode.Signature | Mode.Application
- logic_not = "!", Mode.Signature | Mode.Application
- len = "len", Mode.Signature | Mode.Application
- itob = "itob", Mode.Signature | Mode.Application
- btoi = "btoi", Mode.Signature | Mode.Application
- mod = "%", Mode.Signature | Mode.Application
- bitwise_or = "|", Mode.Signature | Mode.Application
- bitwise_and = "&", Mode.Signature | Mode.Application
- bitwise_xor = "^", Mode.Signature | Mode.Application
- bitwise_not = "~", Mode.Signature | Mode.Application
- mulw = "mulw", Mode.Signature | Mode.Application
- addw = "addw", Mode.Signature | Mode.Application
- int = "int", Mode.Signature | Mode.Application
- byte = "byte", Mode.Signature | Mode.Application
- addr = "addr", Mode.Signature | Mode.Application
- arg = "arg", Mode.Signature
- txn = "txn", Mode.Signature | Mode.Application
- global_ = "global", Mode.Signature | Mode.Application
- gtxn = "gtxn", Mode.Signature | Mode.Application
- load = "load", Mode.Signature | Mode.Application
- store = "store", Mode.Signature | Mode.Application
- txna = "txna", Mode.Signature | Mode.Application
- gtxna = "gtxna", Mode.Signature | Mode.Application
- bnz = "bnz", Mode.Signature | Mode.Application
- bz = "bz", Mode.Signature | Mode.Application
- b = "b", Mode.Signature | Mode.Application
- return_ = "return", Mode.Signature | Mode.Application
- pop = "pop", Mode.Signature | Mode.Application
- dup = "dup", Mode.Signature | Mode.Application
- dup2 = "dup2", Mode.Signature | Mode.Application
- concat = "concat", Mode.Signature | Mode.Application
- substring = "substring", Mode.Signature | Mode.Application
- substring3 = "substring3", Mode.Signature | Mode.Application
- balance = "balance", Mode.Application
- app_opted_in = "app_opted_in", Mode.Application
- app_local_get = "app_local_get", Mode.Application
- app_local_get_ex = "app_local_get_ex", Mode.Application
- app_global_get = "app_global_get", Mode.Application
- app_global_get_ex = "app_global_get_ex", Mode.Application
- app_local_put = "app_local_put", Mode.Application
- app_global_put = "app_global_put", Mode.Application
- app_local_del = "app_local_del", Mode.Application
- app_global_del = "app_global_del", Mode.Application
- asset_holding_get = "asset_holding_get", Mode.Application
- asset_params_get = "asset_params_get", Mode.Application
+ def __str__(self) -> str:
+ return self.value.value
+
+ @property
+ def mode(self) -> Mode:
+ """Get the modes where this op is available."""
+ return self.value.mode
- def __new__(cls, value: str, mode: Mode):
- obj = object.__new__(cls)
- obj._value_ = value
- return obj
+ @property
+ def min_version(self) -> int:
+ """Get the minimum version where this op is available."""
+ return self.value.min_version
- def __init__(self, value: str, mode: Mode):
- self.mode = mode
+ err = OpType("err", Mode.Signature | Mode.Application, 2)
+ sha256 = OpType("sha256", Mode.Signature | Mode.Application, 2)
+ keccak256 = OpType("keccak256", Mode.Signature | Mode.Application, 2)
+ sha512_256 = OpType("sha512_256", Mode.Signature | Mode.Application, 2)
+ ed25519verify = OpType("ed25519verify", Mode.Signature, 2)
+ add = OpType("+", Mode.Signature | Mode.Application, 2)
+ minus = OpType("-", Mode.Signature | Mode.Application, 2)
+ div = OpType("/", Mode.Signature | Mode.Application, 2)
+ mul = OpType("*", Mode.Signature | Mode.Application, 2)
+ lt = OpType("<", Mode.Signature | Mode.Application, 2)
+ gt = OpType(">", Mode.Signature | Mode.Application, 2)
+ le = OpType("<=", Mode.Signature | Mode.Application, 2)
+ ge = OpType(">=", Mode.Signature | Mode.Application, 2)
+ logic_and = OpType("&&", Mode.Signature | Mode.Application, 2)
+ logic_or = OpType("||", Mode.Signature | Mode.Application, 2)
+ eq = OpType("==", Mode.Signature | Mode.Application, 2)
+ neq = OpType("!=", Mode.Signature | Mode.Application, 2)
+ logic_not = OpType("!", Mode.Signature | Mode.Application, 2)
+ len = OpType("len", Mode.Signature | Mode.Application, 2)
+ itob = OpType("itob", Mode.Signature | Mode.Application, 2)
+ btoi = OpType("btoi", Mode.Signature | Mode.Application, 2)
+ mod = OpType("%", Mode.Signature | Mode.Application, 2)
+ bitwise_or = OpType("|", Mode.Signature | Mode.Application, 2)
+ bitwise_and = OpType("&", Mode.Signature | Mode.Application, 2)
+ bitwise_xor = OpType("^", Mode.Signature | Mode.Application, 2)
+ bitwise_not = OpType("~", Mode.Signature | Mode.Application, 2)
+ mulw = OpType("mulw", Mode.Signature | Mode.Application, 2)
+ addw = OpType("addw", Mode.Signature | Mode.Application, 2)
+ int = OpType("int", Mode.Signature | Mode.Application, 2)
+ byte = OpType("byte", Mode.Signature | Mode.Application, 2)
+ addr = OpType("addr", Mode.Signature | Mode.Application, 2)
+ arg = OpType("arg", Mode.Signature, 2)
+ txn = OpType("txn", Mode.Signature | Mode.Application, 2)
+ global_ = OpType("global", Mode.Signature | Mode.Application, 2)
+ gtxn = OpType("gtxn", Mode.Signature | Mode.Application, 2)
+ load = OpType("load", Mode.Signature | Mode.Application, 2)
+ store = OpType("store", Mode.Signature | Mode.Application, 2)
+ txna = OpType("txna", Mode.Signature | Mode.Application, 2)
+ gtxna = OpType("gtxna", Mode.Signature | Mode.Application, 2)
+ bnz = OpType("bnz", Mode.Signature | Mode.Application, 2)
+ bz = OpType("bz", Mode.Signature | Mode.Application, 2)
+ b = OpType("b", Mode.Signature | Mode.Application, 2)
+ return_ = OpType("return", Mode.Signature | Mode.Application, 2)
+ pop = OpType("pop", Mode.Signature | Mode.Application, 2)
+ dup = OpType("dup", Mode.Signature | Mode.Application, 2)
+ dup2 = OpType("dup2", Mode.Signature | Mode.Application, 2)
+ concat = OpType("concat", Mode.Signature | Mode.Application, 2)
+ substring = OpType("substring", Mode.Signature | Mode.Application, 2)
+ substring3 = OpType("substring3", Mode.Signature | Mode.Application, 2)
+ balance = OpType("balance", Mode.Application, 2)
+ app_opted_in = OpType("app_opted_in", Mode.Application, 2)
+ app_local_get = OpType("app_local_get", Mode.Application, 2)
+ app_local_get_ex = OpType("app_local_get_ex", Mode.Application, 2)
+ app_global_get = OpType("app_global_get", Mode.Application, 2)
+ app_global_get_ex = OpType("app_global_get_ex", Mode.Application, 2)
+ app_local_put = OpType("app_local_put", Mode.Application, 2)
+ app_global_put = OpType("app_global_put", Mode.Application, 2)
+ app_local_del = OpType("app_local_del", Mode.Application, 2)
+ app_global_del = OpType("app_global_del", Mode.Application, 2)
+ asset_holding_get = OpType("asset_holding_get", Mode.Application, 2)
+ asset_params_get = OpType("asset_params_get", Mode.Application, 2)
Op.__module__ = "pyteal"
diff --git a/pyteal/ir/tealop.py b/pyteal/ir/tealop.py
index 83aa8cf..27c062d 100644
--- a/pyteal/ir/tealop.py
+++ b/pyteal/ir/tealop.py
@@ -26,7 +26,7 @@ class TealOp(TealComponent):
def assemble(self) -> str:
from ..ast import ScratchSlot
- parts = [self.op.value]
+ parts = [str(self.op)]
for arg in self.args:
if isinstance(arg, ScratchSlot):
raise TealInternalError("Slot not assigned: {}".format(arg))
@@ -39,7 +39,7 @@ class TealOp(TealComponent):
return " ".join(parts)
def __repr__(self) -> str:
- args = [self.op.__str__()]
+ args = [str(self.op)]
for a in self.args:
args.append(repr(a))
| algorand/pyteal | 9c0e68bb3a223983a3f41a565487c98776cfbcdb | diff --git a/pyteal/compiler_test.py b/pyteal/compiler_test.py
index cbc6780..52b6c1a 100644
--- a/pyteal/compiler_test.py
+++ b/pyteal/compiler_test.py
@@ -344,3 +344,22 @@ app_global_get
with pytest.raises(TealInputError):
compileTeal(expr, Mode.Signature)
+
+def test_compile_version():
+ expr = Int(1)
+
+ with pytest.raises(TealInputError):
+ compileTeal(expr, Mode.Signature, 1)
+
+ expected_version_2 = """
+#pragma version 2
+int 1
+""".strip()
+ actual_version_2 = compileTeal(expr, Mode.Signature, 2)
+ assert actual_version_2 == expected_version_2
+
+ actual_default = compileTeal(expr, Mode.Signature)
+ assert actual_default == expected_version_2
+
+ with pytest.raises(TealInputError):
+ compileTeal(expr, Mode.Signature, 3)
| Add a way to version to PyTeal programs
## Summary
For security reasons, PyTeal programs should be written for a specific version of TEAL. This is because new TEAL versions can introduce new fields and transaction types that are not accessible from older versions and which can be dangerous. For example, TEAL v2 introduced rekeying, so to secure all TEAL v1 programs, the network automatically rejects transactions that involve rekeying and TEAL v1 programs.
When future versions of TEAL are released, PyTeal will be updated to support them, so the package should provide some way to version programs. This will guard against a program being compiled to a later version of TEAL than the program author intended.
Thanks to @fabrice102 for bringing this up.
## Scope
Some ideas about how this could be achieved:
* Add a version parameter to the `compileTeal` method.
* Add a `Version` object to the AST to declare support versions inside of a program's AST.
## Urgency
This should be implemented before PyTeal supports TEAL v3. | 0.0 | 9c0e68bb3a223983a3f41a565487c98776cfbcdb | [
"pyteal/compiler_test.py::test_compile_version"
]
| [
"pyteal/compiler_test.py::test_sort_single",
"pyteal/compiler_test.py::test_sort_sequence",
"pyteal/compiler_test.py::test_sort_branch",
"pyteal/compiler_test.py::test_sort_multiple_branch",
"pyteal/compiler_test.py::test_sort_branch_converge",
"pyteal/compiler_test.py::test_flatten_none",
"pyteal/compiler_test.py::test_flatten_single_empty",
"pyteal/compiler_test.py::test_flatten_single_one",
"pyteal/compiler_test.py::test_flatten_single_many",
"pyteal/compiler_test.py::test_flatten_sequence",
"pyteal/compiler_test.py::test_flatten_branch",
"pyteal/compiler_test.py::test_flatten_branch_converge",
"pyteal/compiler_test.py::test_flatten_multiple_branch",
"pyteal/compiler_test.py::test_flatten_multiple_branch_converge",
"pyteal/compiler_test.py::test_compile_single",
"pyteal/compiler_test.py::test_compile_sequence",
"pyteal/compiler_test.py::test_compile_branch",
"pyteal/compiler_test.py::test_compile_mode"
]
| {
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2021-03-16 21:25:54+00:00 | mit | 1,042 |
|
algorand__pyteal-79 | diff --git a/pyteal/__init__.py b/pyteal/__init__.py
index 95dd111..879ecea 100644
--- a/pyteal/__init__.py
+++ b/pyteal/__init__.py
@@ -5,7 +5,7 @@ from .ir import __all__ as ir_all
from .compiler import MAX_TEAL_VERSION, MIN_TEAL_VERSION, DEFAULT_TEAL_VERSION, CompileOptions, compileTeal
from .types import TealType
from .errors import TealInternalError, TealTypeError, TealInputError, TealCompileError
-from .config import MAX_GROUP_SIZE
+from .config import MAX_GROUP_SIZE, NUM_SLOTS
__all__ = ast_all + ir_all + [
"MAX_TEAL_VERSION",
@@ -19,4 +19,5 @@ __all__ = ast_all + ir_all + [
"TealInputError",
"TealCompileError",
"MAX_GROUP_SIZE",
+ "NUM_SLOTS",
]
diff --git a/pyteal/ast/__init__.py b/pyteal/ast/__init__.py
index 08a29e8..ed7e0b5 100644
--- a/pyteal/ast/__init__.py
+++ b/pyteal/ast/__init__.py
@@ -13,6 +13,7 @@ from .arg import Arg
from .txn import TxnType, TxnField, TxnExpr, TxnaExpr, TxnArray, TxnObject, Txn
from .gtxn import GtxnExpr, GtxnaExpr, TxnGroup, Gtxn
from .gaid import GeneratedID
+from .gload import ImportScratchValue
from .global_ import Global, GlobalField
from .app import App, AppField, OnComplete
from .asset import AssetHolding, AssetParam
@@ -66,6 +67,7 @@ __all__ = [
"TxnGroup",
"Gtxn",
"GeneratedID",
+ "ImportScratchValue",
"Global",
"GlobalField",
"App",
diff --git a/pyteal/ast/gload.py b/pyteal/ast/gload.py
new file mode 100644
index 0000000..f8774ab
--- /dev/null
+++ b/pyteal/ast/gload.py
@@ -0,0 +1,58 @@
+from typing import cast, Union, TYPE_CHECKING
+
+from ..types import TealType, require_type
+from ..ir import TealOp, Op, TealBlock
+from ..errors import TealInputError, verifyTealVersion
+from ..config import MAX_GROUP_SIZE, NUM_SLOTS
+from .expr import Expr
+from .int import Int
+from .leafexpr import LeafExpr
+
+if TYPE_CHECKING:
+ from ..compiler import CompileOptions
+
+class ImportScratchValue(LeafExpr):
+ """An expression to load a scratch value created by another transaction in the current group"""
+
+ def __init__(self, txnIndex: Union[int, Expr], slotId: int) -> None:
+ """ Create an expression to load a scratch space slot from a transaction in the current group.
+
+ Requires TEAL version 4 or higher. This operation is only permitted in application mode.
+
+ Args:
+ txnIndex: The index of the transaction from which the created ID should be obtained.
+ This index may be a Python int, or it may be a PyTeal expression that evaluates at
+ runtime. If it's an expression, it must evaluate to a uint64. In all cases, the index
+ must be less than the index of the current transaction.
+ slotId: The index of the scratch slot that should be loaded. The index must be a Python int
+ in the range [0-256).
+ """
+ super().__init__()
+ if type(txnIndex) == int:
+ if txnIndex < 0 or txnIndex >= MAX_GROUP_SIZE:
+ raise TealInputError("Invalid transaction index {}, shoud be in [0, {})".format(txnIndex, MAX_GROUP_SIZE))
+ else:
+ require_type(cast(Expr, txnIndex).type_of(), TealType.uint64)
+ if slotId < 0 or slotId >= NUM_SLOTS:
+ raise TealInputError("Invalid slot ID {}, shoud be in [0, {})".format(slotId, NUM_SLOTS))
+
+ self.txnIndex = txnIndex
+ self.slotId = slotId
+
+ def __str__(self) -> str:
+ return "(Gload {} {})".format(self.txnIndex, self.slotId)
+
+ def __teal__(self, options: 'CompileOptions'):
+ verifyTealVersion(Op.gload.min_version, options.version, "TEAL version too low to use Gload expression")
+
+ if type(self.txnIndex) == int:
+ op = TealOp(self, Op.gload, cast(int, self.txnIndex), cast(int, self.slotId))
+ return TealBlock.FromOp(options, op)
+
+ op = TealOp(self, Op.gloads, self.slotId)
+ return TealBlock.FromOp(options, op, cast(Expr, self.txnIndex))
+
+ def type_of(self):
+ return TealType.anytype
+
+ImportScratchValue.__module__ = "pyteal"
diff --git a/pyteal/ast/scratch.py b/pyteal/ast/scratch.py
index 38de0f5..cd342fc 100644
--- a/pyteal/ast/scratch.py
+++ b/pyteal/ast/scratch.py
@@ -1,6 +1,8 @@
from typing import TYPE_CHECKING
from ..types import TealType
+from ..config import NUM_SLOTS
+from ..errors import TealInputError
from .expr import Expr
if TYPE_CHECKING:
@@ -9,12 +11,28 @@ if TYPE_CHECKING:
class ScratchSlot:
"""Represents the allocation of a scratch space slot."""
- slotId = 0
+ # Unique identifier for the compiler to automatically assign slots
+ # The id field is used by the compiler to map to an actual slot in the source code
+ # Slot ids under 256 are manually reserved slots
+ nextSlotId = NUM_SLOTS
- def __init__(self):
- self.id = ScratchSlot.slotId
- ScratchSlot.slotId += 1
+ def __init__(self, requestedSlotId: int = None):
+ """Initializes a scratch slot with a particular id
+ Args:
+ requestedSlotId (optional): A scratch slot id that the compiler must store the value.
+ This id may be a Python int in the range [0-256).
+ """
+ if requestedSlotId is None:
+ self.id = ScratchSlot.nextSlotId
+ ScratchSlot.nextSlotId += 1
+ self.isReservedSlot = False
+ else:
+ if requestedSlotId < 0 or requestedSlotId >= NUM_SLOTS:
+ raise TealInputError("Invalid slot ID {}, shoud be in [0, {})".format(requestedSlotId, NUM_SLOTS))
+ self.id = requestedSlotId
+ self.isReservedSlot = True
+
def store(self, value: Expr = None) -> Expr:
"""Get an expression to store a value in this slot.
diff --git a/pyteal/ast/scratchvar.py b/pyteal/ast/scratchvar.py
index 888bded..53398e1 100644
--- a/pyteal/ast/scratchvar.py
+++ b/pyteal/ast/scratchvar.py
@@ -16,15 +16,17 @@ class ScratchVar:
])
"""
- def __init__(self, type: TealType = TealType.anytype):
+ def __init__(self, type: TealType = TealType.anytype, slotId: int = None):
"""Create a new ScratchVar with an optional type.
Args:
type (optional): The type that this variable can hold. An error will be thrown if an
expression with an incompatiable type is stored in this variable. Defaults to
TealType.anytype.
+ slotId (optional): A scratch slot id that the compiler must store the value.
+ This id may be a Python int in the range [0-256).
"""
- self.slot = ScratchSlot()
+ self.slot = ScratchSlot(requestedSlotId=slotId)
self.type = type
def storage_type(self) -> TealType:
diff --git a/pyteal/compiler/compiler.py b/pyteal/compiler/compiler.py
index 37635ee..d57f169 100644
--- a/pyteal/compiler/compiler.py
+++ b/pyteal/compiler/compiler.py
@@ -1,6 +1,6 @@
-from typing import List
+from typing import List, Set
-from ..ast import Expr
+from ..ast import Expr, ScratchSlot
from ..ir import Mode, TealComponent, TealOp, TealBlock
from ..errors import TealInputError, TealInternalError
from ..config import NUM_SLOTS
@@ -96,18 +96,32 @@ def compileTeal(ast: Expr, mode: Mode, *, version: int = DEFAULT_TEAL_VERSION, a
verifyOpsForVersion(teal, version)
verifyOpsForMode(teal, mode)
- slots = set()
+ slots: Set[ScratchSlot] = set()
+ slotIds: Set[int] = set()
+ nextSlotIndex = 0
for stmt in teal:
for slot in stmt.getSlots():
+ # If there are two unique slots with same IDs, raise an error
+ if slot.id in slotIds and id(slot) not in [id(s) for s in slots]:
+ raise TealInternalError("Slot ID {} has been assigned multiple times".format(slot.id))
+ slotIds.add(slot.id)
slots.add(slot)
if len(slots) > NUM_SLOTS:
# TODO: identify which slots can be reused
raise TealInternalError("Too many slots in use: {}, maximum is {}".format(len(slots), NUM_SLOTS))
- for index, slot in enumerate(sorted(slots, key=lambda slot: slot.id)):
+ for slot in sorted(slots, key=lambda slot: slot.id):
+ # Find next vacant slot that compiler can assign to
+ while nextSlotIndex in slotIds:
+ nextSlotIndex += 1
for stmt in teal:
- stmt.assignSlot(slot, index)
+ if slot.isReservedSlot:
+ # Slot ids under 256 are manually reserved slots
+ stmt.assignSlot(slot, slot.id)
+ else:
+ stmt.assignSlot(slot, nextSlotIndex)
+ slotIds.add(nextSlotIndex)
if assembleConstants:
if version < 3:
| algorand/pyteal | 6675d82fbfff626552f09141b39841d612ea7c11 | diff --git a/pyteal/ast/gload_test.py b/pyteal/ast/gload_test.py
new file mode 100644
index 0000000..7d58037
--- /dev/null
+++ b/pyteal/ast/gload_test.py
@@ -0,0 +1,57 @@
+import pytest
+
+from .. import *
+# this is not necessary but mypy complains if it's not included
+from .. import MAX_GROUP_SIZE, NUM_SLOTS, CompileOptions
+
+teal3Options = CompileOptions(version=3)
+teal4Options = CompileOptions(version=4)
+
+def test_gload_teal_3():
+ with pytest.raises(TealInputError):
+ ImportScratchValue(0, 1).__teal__(teal3Options)
+
+ with pytest.raises(TealInputError):
+ ImportScratchValue(Int(0), 1).__teal__(teal3Options)
+
+def test_gload():
+ expr = ImportScratchValue(0, 1)
+ assert expr.type_of() == TealType.anytype
+
+ expected = TealSimpleBlock([
+ TealOp(expr, Op.gload, 0, 1)
+ ])
+
+ actual, _ = expr.__teal__(teal4Options)
+
+ assert actual == expected
+
+def test_gload_dynamic():
+ arg = Int(1)
+ expr = ImportScratchValue(arg, 0)
+ assert expr.type_of() == TealType.anytype
+
+ expected = TealSimpleBlock([
+ TealOp(arg, Op.int, 1),
+ TealOp(expr, Op.gloads, 0)
+ ])
+
+ actual, _ = expr.__teal__(teal4Options)
+ actual.addIncoming()
+ actual = TealBlock.NormalizeBlocks(actual)
+
+ assert actual == expected
+
+def test_gload_invalid():
+ with pytest.raises(TealInputError):
+ ImportScratchValue(-1, 0)
+
+ with pytest.raises(TealInputError):
+ ImportScratchValue(MAX_GROUP_SIZE, 0)
+
+ with pytest.raises(TealInputError):
+ ImportScratchValue(0, -1)
+
+ with pytest.raises(TealInputError):
+ ImportScratchValue(0, NUM_SLOTS)
+
\ No newline at end of file
diff --git a/pyteal/ast/scratch_test.py b/pyteal/ast/scratch_test.py
index 9236182..dd78053 100644
--- a/pyteal/ast/scratch_test.py
+++ b/pyteal/ast/scratch_test.py
@@ -75,3 +75,23 @@ def test_scratch_stack_store():
actual, _ = expr.__teal__(options)
assert actual == expected
+
+def test_scratch_assign_id():
+ slot = ScratchSlot(255)
+ expr = ScratchStackStore(slot)
+ assert expr.type_of() == TealType.none
+
+ expected = TealSimpleBlock([
+ TealOp(expr, Op.store, slot)
+ ])
+
+ actual, _ = expr.__teal__(options)
+
+ assert actual == expected
+
+def test_scratch_assign_id_invalid():
+ with pytest.raises(TealInputError):
+ slot = ScratchSlot(-1)
+
+ with pytest.raises(TealInputError):
+ slot = ScratchSlot(NUM_SLOTS)
diff --git a/pyteal/ast/scratchvar_test.py b/pyteal/ast/scratchvar_test.py
index 519202c..0f9b0c0 100644
--- a/pyteal/ast/scratchvar_test.py
+++ b/pyteal/ast/scratchvar_test.py
@@ -66,3 +66,35 @@ def test_scratchvar_load():
actual = TealBlock.NormalizeBlocks(actual)
assert actual == expected
+
+def test_scratchvar_assign_store():
+ slotId = 2
+ myvar = ScratchVar(TealType.uint64, slotId)
+ arg = Int(10)
+ expr = myvar.store(arg)
+
+ expected = TealSimpleBlock([
+ TealOp(arg, Op.int, 10),
+ TealOp(expr, Op.store, myvar.slot),
+ ])
+
+ actual, _ = expr.__teal__(options)
+ actual.addIncoming()
+ actual = TealBlock.NormalizeBlocks(actual)
+
+ assert actual == expected
+
+def test_scratchvar_assign_load():
+ slotId = 5
+ myvar = ScratchVar(slotId=slotId)
+ expr = myvar.load()
+
+ expected = TealSimpleBlock([
+ TealOp(expr, Op.load, myvar.slot)
+ ])
+
+ actual, _ = expr.__teal__(options)
+ actual.addIncoming()
+ actual = TealBlock.NormalizeBlocks(actual)
+
+ assert actual == expected
diff --git a/pyteal/compiler/compiler_test.py b/pyteal/compiler/compiler_test.py
index b68eb67..f10e8c9 100644
--- a/pyteal/compiler/compiler_test.py
+++ b/pyteal/compiler/compiler_test.py
@@ -139,6 +139,42 @@ def test_slot_load_before_store():
with pytest.raises(TealInternalError):
compileTeal(program, Mode.Application, version=2)
+def test_assign_scratch_slots():
+ myScratch = ScratchVar(TealType.uint64)
+ otherScratch = ScratchVar(TealType.uint64, 1)
+ anotherScratch = ScratchVar(TealType.uint64, 0)
+ lastScratch = ScratchVar(TealType.uint64)
+ prog = Seq([
+ myScratch.store(Int(5)), # Slot 2
+ otherScratch.store(Int(0)), # Slot 1
+ anotherScratch.store(Int(7)), # Slot 0
+ lastScratch.store(Int(9)), # Slot 3
+ ])
+
+ expected = """
+#pragma version 4
+int 5
+store 2
+int 0
+store 1
+int 7
+store 0
+int 9
+store 3
+""".strip()
+ actual = compileTeal(prog, mode=Mode.Signature, version=4)
+ assert actual == expected
+
+def test_scratchvar_double_assign_invalid():
+ myvar = ScratchVar(TealType.uint64, 10)
+ otherVar = ScratchVar(TealType.uint64, 10)
+ prog = Seq([
+ myvar.store(Int(5)),
+ otherVar.store(Int(0))
+ ])
+ with pytest.raises(TealInternalError):
+ compileTeal(prog, mode=Mode.Signature, version=4)
+
def test_assembleConstants():
program = Itob(Int(1) + Int(1) + Tmpl.Int("TMPL_VAR")) == Concat(Bytes("test"), Bytes("test"), Bytes("test2"))
| Support importing/exporting values between contracts
# Summary
TEAL v4 introduces the `gload(s)` ops, which can load values from the scratch space of prior app call transactions in the same group. PyTeal should support this feature as well.
# Scope
edit: commenting out the initial idea in favor of a simpler approach inspired by @StylishTriangles's suggestion.
This issue requires the following changes:
1. Allow `ScratchSlot` to have an additional optional parameter `requestedSlotId`, which would be an integer from 0-255 which specifies the slot ID that compiler **must** assign to this slot. This would also require modifications to `compileTeal` to actually assign the correct ID to any `ScratchSlot`s with this parameter, and if multiple unique `ScratchSlot` request the same slot ID, compilation must fail.
2. Also add an optional `requestedSlotId` parameter to `ScratchVar` which would be passed directly to its underlying `ScratchSlot`.
3. Expose the `gload` op in a PyTeal expression (maybe as `ImportScratchValue(txnIndex: Expr, slotId: int)`)
~Ideally we can have a system where one contract can export many values (perhaps with helpful names that don't make it onto the chain) with something like `Export("bidAmount", computedBidAmount)`. Then, another contract can import this exported value with something like `computedBidAmount = ImportFrom(txnIndex, contract1.exports("bidAmount"))`.~
~This would require programs to be compiled in a specific order to make dependencies work, or all at once and the compiler can sort out dependencies.~
~Additionally, the compile method would need to return more information, perhaps a Python object that contains a dict of all exports from the contract and their assigned slots -- it's important to make this information accessible by the user so that they can properly debug, and it also lets other non-PyTeal contracts import their exports. This object then needs to be obtained by the importing contract in order to know which slots to actually load. Again, here it will be important to allow imports from non-PyTeal contracts (meaning essentially direct access to gload(s)).~ | 0.0 | 6675d82fbfff626552f09141b39841d612ea7c11 | [
"pyteal/ast/gload_test.py::test_gload_teal_3",
"pyteal/ast/gload_test.py::test_gload",
"pyteal/ast/gload_test.py::test_gload_dynamic",
"pyteal/ast/gload_test.py::test_gload_invalid",
"pyteal/ast/scratch_test.py::test_scratch_slot",
"pyteal/ast/scratch_test.py::test_scratch_load_default",
"pyteal/ast/scratch_test.py::test_scratch_load_type",
"pyteal/ast/scratch_test.py::test_scratch_store",
"pyteal/ast/scratch_test.py::test_scratch_stack_store",
"pyteal/ast/scratch_test.py::test_scratch_assign_id",
"pyteal/ast/scratch_test.py::test_scratch_assign_id_invalid",
"pyteal/ast/scratchvar_test.py::test_scratchvar_type",
"pyteal/ast/scratchvar_test.py::test_scratchvar_store",
"pyteal/ast/scratchvar_test.py::test_scratchvar_load",
"pyteal/ast/scratchvar_test.py::test_scratchvar_assign_store",
"pyteal/ast/scratchvar_test.py::test_scratchvar_assign_load",
"pyteal/compiler/compiler_test.py::test_compile_single",
"pyteal/compiler/compiler_test.py::test_compile_sequence",
"pyteal/compiler/compiler_test.py::test_compile_branch",
"pyteal/compiler/compiler_test.py::test_compile_mode",
"pyteal/compiler/compiler_test.py::test_compile_version_invalid",
"pyteal/compiler/compiler_test.py::test_compile_version_2",
"pyteal/compiler/compiler_test.py::test_compile_version_default",
"pyteal/compiler/compiler_test.py::test_compile_version_3",
"pyteal/compiler/compiler_test.py::test_compile_version_4",
"pyteal/compiler/compiler_test.py::test_slot_load_before_store",
"pyteal/compiler/compiler_test.py::test_assign_scratch_slots",
"pyteal/compiler/compiler_test.py::test_scratchvar_double_assign_invalid",
"pyteal/compiler/compiler_test.py::test_assembleConstants"
]
| []
| {
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2021-07-23 19:29:14+00:00 | mit | 1,043 |
|
alife-data-standards__alife-std-dev-python-17 | diff --git a/ALifeStdDev/ALifeStdDev/__init__.py b/ALifeStdDev/ALifeStdDev/__init__.py
new file mode 100644
index 0000000..09a5400
--- /dev/null
+++ b/ALifeStdDev/ALifeStdDev/__init__.py
@@ -0,0 +1,9 @@
+"""Convenience namespace for ALife data standards dev tools."""
+
+from .. import phylogeny
+
+from ..phylogeny import *
+
+__all__ = [
+ "phylogeny",
+]
diff --git a/ALifeStdDev/__init__.py b/ALifeStdDev/__init__.py
index d31c31e..dddea34 100644
--- a/ALifeStdDev/__init__.py
+++ b/ALifeStdDev/__init__.py
@@ -1,1 +1,3 @@
+"""Top level package for ALife data standards dev tools."""
+
__version__ = "0.2.3"
diff --git a/README.md b/README.md
index e16b28d..c18d2de 100644
--- a/README.md
+++ b/README.md
@@ -16,3 +16,19 @@ ALifeStdDev can be installed using pip:
```
pip install ALifeStdDev
```
+
+# Usage Instructions
+
+To load a single submodule,
+
+```python3
+from ALifeStdDev import phylogeny as asd_phylo
+asd_phylo.load_phylogeny_to_pandas_df("myfile.csv")
+```
+
+To load the library as a flat namespace,
+
+```python3
+from ALifeStdDev import ALifeStdDev as asd
+asd.load_phylogeny_to_pandas_df("myfile.csv")
+```
diff --git a/setup.py b/setup.py
index 7588023..275a6e0 100644
--- a/setup.py
+++ b/setup.py
@@ -1,4 +1,4 @@
-from setuptools import setup
+from setuptools import setup, find_packages
with open('README.md') as readme_file:
readme = readme_file.read()
@@ -30,7 +30,7 @@ setup(name='ALifeStdDev',
include_package_data=True,
keywords='artificial life',
test_suite='tests',
- packages=['ALifeStdDev'],
+ packages=find_packages(include=['ALifeStdDev', 'ALifeStdDev.*']),
install_requires=['networkx', 'pandas'],
tests_require=['pytest'],
zip_safe=False,
| alife-data-standards/alife-std-dev-python | 8e5bd4f30b1dce0594ef8e672e262600c88214ad | diff --git a/tests/__init__.py b/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/tests/test_flat_namespace.py b/tests/test_flat_namespace.py
new file mode 100644
index 0000000..4818cb0
--- /dev/null
+++ b/tests/test_flat_namespace.py
@@ -0,0 +1,8 @@
+import typing
+
+from ALifeStdDev import ALifeStdDev as asd
+
+
+def test_access():
+ assert hasattr(asd, "load_phylogeny_to_networkx")
+ assert isinstance(asd.load_phylogeny_to_networkx, typing.Callable)
| Publish on pypi?
Would there be any interest in publishing this software via pypi? | 0.0 | 8e5bd4f30b1dce0594ef8e672e262600c88214ad | [
"tests/test_flat_namespace.py::test_access"
]
| []
| {
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2023-03-04 20:17:53+00:00 | mit | 1,044 |
|
allisson__python-simple-rest-client-9 | diff --git a/.gitignore b/.gitignore
index 95ea3fd..ed03ab1 100644
--- a/.gitignore
+++ b/.gitignore
@@ -39,7 +39,7 @@ htmlcov/
.tox/
.coverage
.coverage.*
-.cache
+.pytest_cache
nosetests.xml
coverage.xml
*,cover
diff --git a/CHANGES.rst b/CHANGES.rst
index 13d44c2..229f54b 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -1,6 +1,11 @@
Changelog
---------
+0.5.2
+~~~~~
+
+* Fix JSONDecodeError when processing empty server responses (thanks @zmbbb).
+
0.5.1
~~~~~
diff --git a/simple_rest_client/request.py b/simple_rest_client/request.py
index b412489..05713a9 100644
--- a/simple_rest_client/request.py
+++ b/simple_rest_client/request.py
@@ -3,7 +3,7 @@ import logging
import async_timeout
from json_encoder import json
-from .decorators import handle_request_error, handle_async_request_error
+from .decorators import handle_async_request_error, handle_request_error
from .models import Response
logger = logging.getLogger(__name__)
@@ -26,7 +26,9 @@ def make_request(session, request):
if 'text' in content_type:
body = client_response.text
elif 'json' in content_type:
- body = json.loads(client_response.text)
+ body = client_response.text
+ if body:
+ body = json.loads(body)
else:
body = client_response.content
@@ -58,7 +60,9 @@ async def make_async_request(session, request):
if 'text' in content_type:
body = await client_response.text()
elif 'json' in content_type:
- body = json.loads(await client_response.text())
+ body = await client_response.text()
+ if body:
+ body = json.loads(body)
else:
body = await client_response.read()
| allisson/python-simple-rest-client | 2b7a0b84f9ba93ab0223da50b28365f61433e73c | diff --git a/tests/test_resource.py b/tests/test_resource.py
index 5f8fc0b..f3a70bd 100644
--- a/tests/test_resource.py
+++ b/tests/test_resource.py
@@ -83,24 +83,25 @@ def test_resource_actions(url, method, status, action, args, kwargs, reqres_reso
assert response.body == {'success': True}
[email protected]('content_type,response_body', [
- ('application/json', {'success': True}),
- ('text/plain', '{"success": true}'),
- ('application/octet-stream', b'{"success": true}'),
[email protected]('content_type,response_body,expected_response_body', [
+ ('application/json', '{"success": true}', {'success': True}),
+ ('application/json', '', ''),
+ ('text/plain', '{"success": true}', '{"success": true}'),
+ ('application/octet-stream', '{"success": true}', b'{"success": true}'),
])
@responses.activate
-def test_resource_response_body(content_type, response_body, reqres_resource):
+def test_resource_response_body(content_type, response_body, expected_response_body, reqres_resource):
url = 'https://reqres.in/api/users'
responses.add(
responses.GET,
url,
- body=b'{"success": true}',
+ body=response_body,
status=200,
content_type=content_type
)
response = reqres_resource.list()
- assert response.body == response_body
+ assert response.body == expected_response_body
@pytest.mark.asyncio
@@ -124,14 +125,15 @@ async def test_async_resource_actions(url, method, status, action, args, kwargs,
@pytest.mark.asyncio
[email protected]('content_type,response_body', [
- ('application/json', {'success': True}),
- ('text/plain', '{"success": true}'),
- ('application/octet-stream', b'{"success": true}'),
[email protected]('content_type,response_body,expected_response_body', [
+ ('application/json', '{"success": true}', {'success': True}),
+ ('application/json', '', ''),
+ ('text/plain', '{"success": true}', '{"success": true}'),
+ ('application/octet-stream', '{"success": true}', b'{"success": true}'),
])
-async def test_asyncresource_response_body(content_type, response_body, reqres_async_resource):
+async def test_asyncresource_response_body(content_type, response_body, expected_response_body, reqres_async_resource):
url = 'https://reqres.in/api/users'
with aioresponses() as mock_response:
- mock_response.get(url, status=200, body=b'{"success": true}', headers={'Content-Type': content_type})
+ mock_response.get(url, status=200, body=response_body, headers={'Content-Type': content_type})
response = await reqres_async_resource.list()
- assert response.body == response_body
+ assert response.body == expected_response_body
| json.decoder.JSONDecodeError when processing empty server responses
@zmbbb report:
Hey, really like your lib and the elegant interface.
I encountered an exception (see below) when I received an empty response from the server, in this case a "204 NO CONTENT". The lib forwarded the empty response to JSON for decoding which raised the exception. As a quick fix, I added a check whether client_response.text is True. This works for me, for empty and non-empty responses.
The exception:
[...]
File "/usr/local/lib/python3.5/dist-packages/simple_rest_client/resource.py", line 107, in action_method
return make_request(self.session, request)
File "/usr/local/lib/python3.5/dist-packages/simple_rest_client/decorators.py", line 39, in wrapper
response = f(*args, **kwargs)
File "/usr/local/lib/python3.5/dist-packages/simple_rest_client/request.py", line 30, in make_request
body = json.loads(client_response.text)
File "/usr/local/lib/python3.5/dist-packages/json_encoder/json/init.py", line 229, in loads
**kw
File "/usr/lib/python3.5/json/init.py", line 332, in loads
return cls(**kw).decode(s)
File "/usr/lib/python3.5/json/decoder.py", line 339, in decode
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
File "/usr/lib/python3.5/json/decoder.py", line 357, in raw_decode
raise JSONDecodeError("Expecting value", s, err.value) from None
json.decoder.JSONDecodeError: Expecting value: line 1 column 1 (char 0)
| 0.0 | 2b7a0b84f9ba93ab0223da50b28365f61433e73c | [
"tests/test_resource.py::test_asyncresource_response_body[application/json--]",
"tests/test_resource.py::test_resource_response_body[application/json--]"
]
| [
"tests/test_resource.py::test_async_resource_actions[https://reqres.in/api/users/2-GET-200-retrieve-2-kwargs2]",
"tests/test_resource.py::test_custom_resource_get_action_full_url",
"tests/test_resource.py::test_base_resource_actions",
"tests/test_resource.py::test_base_resource_get_action_full_url_with_action_url_match_error",
"tests/test_resource.py::test_asyncresource_response_body[application/json-{\"success\":",
"tests/test_resource.py::test_async_resource_actions[https://reqres.in/api/users/2-PUT-200-update-2-kwargs3]",
"tests/test_resource.py::test_resource_actions[https://reqres.in/api/users/2-PUT-200-update-2-kwargs3]",
"tests/test_resource.py::test_resource_response_body[text/plain-{\"success\":",
"tests/test_resource.py::test_base_resource_get_action_full_url_with_append_slash",
"tests/test_resource.py::test_resource_actions[https://reqres.in/api/users-POST-201-create-None-kwargs1]",
"tests/test_resource.py::test_resource_actions[https://reqres.in/api/users/2-GET-200-retrieve-2-kwargs2]",
"tests/test_resource.py::test_base_resource_get_action_full_url",
"tests/test_resource.py::test_async_resource_actions[https://reqres.in/api/users/2-PATCH-200-partial_update-2-kwargs4]",
"tests/test_resource.py::test_custom_resource_actions",
"tests/test_resource.py::test_base_resource_get_action_full_url_with_action_not_found",
"tests/test_resource.py::test_asyncresource_response_body[application/octet-stream-{\"success\":",
"tests/test_resource.py::test_async_resource_actions[https://reqres.in/api/users-GET-200-list-None-kwargs0]",
"tests/test_resource.py::test_resource_actions[https://reqres.in/api/users/2-PATCH-200-partial_update-2-kwargs4]",
"tests/test_resource.py::test_async_resource_actions[https://reqres.in/api/users/2-DELETE-204-destroy-2-kwargs5]",
"tests/test_resource.py::test_resource_actions[https://reqres.in/api/users-GET-200-list-None-kwargs0]",
"tests/test_resource.py::test_asyncresource_response_body[text/plain-{\"success\":",
"tests/test_resource.py::test_async_resource_actions[https://reqres.in/api/users-POST-201-create-None-kwargs1]",
"tests/test_resource.py::test_resource_response_body[application/octet-stream-{\"success\":",
"tests/test_resource.py::test_resource_response_body[application/json-{\"success\":"
]
| {
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2018-04-24 18:06:42+00:00 | mit | 1,045 |
|
allo-media__text2num-107 | diff --git a/text_to_num/lang/base.py b/text_to_num/lang/base.py
index b2a8cc9..e349c1c 100644
--- a/text_to_num/lang/base.py
+++ b/text_to_num/lang/base.py
@@ -70,7 +70,7 @@ class Language:
return NotImplemented
def not_numeric_word(self, word: Optional[str]) -> bool:
- return word is None or word != self.DECIMAL_SEP and word not in self.NUMBERS
+ return word is None or word != self.DECIMAL_SEP and word not in self.NUMBERS and word not in self.ZERO
def split_number_word(self, word: str) -> str: # maybe use: List[str]
"""In some languages numbers are written as one word, e.g. German
diff --git a/text_to_num/lang/english.py b/text_to_num/lang/english.py
index e155a2e..4286476 100644
--- a/text_to_num/lang/english.py
+++ b/text_to_num/lang/english.py
@@ -117,7 +117,7 @@ class English(Language):
AND_NUMS: Set[str] = set()
AND = "and"
- NEVER_IF_ALONE = {"one"}
+ NEVER_IF_ALONE = {"one", "o"}
# Relaxed composed numbers (two-words only)
# start => (next, target)
diff --git a/text_to_num/parsers.py b/text_to_num/parsers.py
index d66d9ed..72c3add 100644
--- a/text_to_num/parsers.py
+++ b/text_to_num/parsers.py
@@ -655,21 +655,21 @@ class WordToDigitParser:
elif (
word in self.lang.ZERO
and self.at_start_of_seq()
- and (
- look_ahead is None
- or look_ahead in self.lang.NUMBERS
- or look_ahead in self.lang.ZERO
- or look_ahead in self.lang.DECIMAL_SEP
- )
+ and look_ahead is not None
+ and look_ahead in self.lang.DECIMAL_SEP
):
- self._value.append("0")
+ pass
elif (
word in self.lang.ZERO
and self.at_start_of_seq()
- and look_ahead is not None
- and look_ahead in self.lang.DECIMAL_SEP
+ # and (
+ # look_ahead is None
+ # or look_ahead in self.lang.NUMBERS
+ # or look_ahead in self.lang.ZERO
+ # or look_ahead in self.lang.DECIMAL_SEP
+ # )
):
- pass
+ self._value.append("0")
elif self._push(self.lang.ord2card(word) or "", look_ahead):
self._value.append(
self.lang.num_ord(
| allo-media/text2num | 95b983d5999391e5af9395f2003c7ba391d762ad | diff --git a/tests/test_text_to_num_en.py b/tests/test_text_to_num_en.py
index b6a71ad..d12341b 100644
--- a/tests/test_text_to_num_en.py
+++ b/tests/test_text_to_num_en.py
@@ -126,6 +126,7 @@ class TestTextToNumEN(TestCase):
self.assertEqual(alpha2digit(source, "en"), expected)
self.assertEqual(alpha2digit("zero", "en"), "0")
+ self.assertEqual(alpha2digit("zero love", "en"), "0 love")
def test_alpha2digit_ordinals(self):
source = (
diff --git a/tests/test_text_to_num_fr.py b/tests/test_text_to_num_fr.py
index accd0ef..c9ba038 100644
--- a/tests/test_text_to_num_fr.py
+++ b/tests/test_text_to_num_fr.py
@@ -143,6 +143,8 @@ class TestTextToNumFR(TestCase):
# self.assertEqual(alpha2digit(source, "fr"), source)
self.assertEqual(alpha2digit("zéro", "fr"), "0")
+ self.assertEqual(alpha2digit("a a un trois sept trois trois sept cinq quatre zéro c c", "fr"), "a a 1 3 7 3 3 7 5 4 0 c c")
+ self.assertEqual(alpha2digit("sept un zéro", "fr"), "7 1 0")
def test_alpha2digit_ordinals(self):
source = (
| Issue when there is 'zéro' before other letters
Ex OK:
```
data_zero = 'zéro'
print(alpha2digit(data_zero, 'fr'))
0
```
Ex: NOK:
```
data1 = 'a a un trois sept trois trois sept cinq quatre zéro c c'
data2 = 'b b un trois sept trois trois sept cinq quatre zéro d d'
print(alpha2digit(data1, 'fr'))
print(alpha2digit(data2, 'fr'))
a a 1 3 7 3 3 7 5 4 zéro c c
b b 1 3 7 3 3 7 5 4 zéro d d
```
We can see `zéro` is not transform to digit 0.
thanks in advance for your help
have a good day | 0.0 | 95b983d5999391e5af9395f2003c7ba391d762ad | [
"tests/test_text_to_num_en.py::TestTextToNumEN::test_alpha2digit_zero",
"tests/test_text_to_num_fr.py::TestTextToNumFR::test_alpha2digit_zero"
]
| [
"tests/test_text_to_num_en.py::TestTextToNumEN::test_alpha2digit_decimals",
"tests/test_text_to_num_en.py::TestTextToNumEN::test_alpha2digit_formal",
"tests/test_text_to_num_en.py::TestTextToNumEN::test_alpha2digit_integers",
"tests/test_text_to_num_en.py::TestTextToNumEN::test_alpha2digit_ordinals",
"tests/test_text_to_num_en.py::TestTextToNumEN::test_alpha2digit_ordinals_force",
"tests/test_text_to_num_en.py::TestTextToNumEN::test_alpha2digit_signed",
"tests/test_text_to_num_en.py::TestTextToNumEN::test_and",
"tests/test_text_to_num_en.py::TestTextToNumEN::test_one_as_noun_or_article",
"tests/test_text_to_num_en.py::TestTextToNumEN::test_relaxed",
"tests/test_text_to_num_en.py::TestTextToNumEN::test_second_as_time_unit_vs_ordinal",
"tests/test_text_to_num_en.py::TestTextToNumEN::test_text2num",
"tests/test_text_to_num_en.py::TestTextToNumEN::test_text2num_centuries",
"tests/test_text_to_num_en.py::TestTextToNumEN::test_text2num_exc",
"tests/test_text_to_num_en.py::TestTextToNumEN::test_text2num_zeroes",
"tests/test_text_to_num_en.py::TestTextToNumEN::test_uppercase",
"tests/test_text_to_num_fr.py::TestTextToNumFR::test_alpha2digit_all_ordinals",
"tests/test_text_to_num_fr.py::TestTextToNumFR::test_alpha2digit_decimals",
"tests/test_text_to_num_fr.py::TestTextToNumFR::test_alpha2digit_formal",
"tests/test_text_to_num_fr.py::TestTextToNumFR::test_alpha2digit_integers",
"tests/test_text_to_num_fr.py::TestTextToNumFR::test_alpha2digit_newline",
"tests/test_text_to_num_fr.py::TestTextToNumFR::test_alpha2digit_ordinals",
"tests/test_text_to_num_fr.py::TestTextToNumFR::test_alpha2digit_signed",
"tests/test_text_to_num_fr.py::TestTextToNumFR::test_article",
"tests/test_text_to_num_fr.py::TestTextToNumFR::test_relaxed",
"tests/test_text_to_num_fr.py::TestTextToNumFR::test_text2num",
"tests/test_text_to_num_fr.py::TestTextToNumFR::test_text2num_centuries",
"tests/test_text_to_num_fr.py::TestTextToNumFR::test_text2num_exc",
"tests/test_text_to_num_fr.py::TestTextToNumFR::test_text2num_variants",
"tests/test_text_to_num_fr.py::TestTextToNumFR::test_text2num_zeroes",
"tests/test_text_to_num_fr.py::TestTextToNumFR::test_trente_et_onze",
"tests/test_text_to_num_fr.py::TestTextToNumFR::test_un_pronoun"
]
| {
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
} | 2024-03-19 15:02:09+00:00 | mit | 1,046 |
|
allo-media__text2num-113 | diff --git a/text_to_num/lang/english.py b/text_to_num/lang/english.py
index 4286476..9332a37 100644
--- a/text_to_num/lang/english.py
+++ b/text_to_num/lang/english.py
@@ -32,6 +32,8 @@ from .base import Language
# Those words multiplies lesser numbers (see Rules)
# Special case: "hundred" is processed apart.
MULTIPLIERS = {
+ "hundred": 100,
+ "hundreds": 100,
"thousand": 1_000,
"thousands": 1_000,
"million": 1_000_000,
diff --git a/text_to_num/parsers.py b/text_to_num/parsers.py
index 72c3add..b406b81 100644
--- a/text_to_num/parsers.py
+++ b/text_to_num/parsers.py
@@ -120,11 +120,11 @@ class WordStreamValueParser(WordStreamValueParserInterface):
return coef != self.value
"""Is this multiplier expected?"""
- if coef > self.value and (self.value > 0 or coef >= 1000):
+ if coef > self.value and (self.value > 0 or coef >= 100):
# a multiplier can be applied to anything lesser than itself,
# as long as it not zero (special case for 1000 which then implies 1)
return True
- if coef * 1000 <= self.n000_val:
+ if coef * 1000 <= self.n000_val or coef == 100 and 100 > self.grp_val:
# a multiplier can not be applied to a value bigger than itself,
# so it must be applied to the current group only.
# ex. for "mille": "deux millions cent cinquante mille"
@@ -133,7 +133,7 @@ class WordStreamValueParser(WordStreamValueParserInterface):
# we test the 1000 × ``coef`` (as the multipliers above 100,
# are a geometric progression of ratio 1000)
return (
- self.grp_val > 0 or coef == 1000
+ self.grp_val > 0 or coef == 1000 or coef == 100
) # "mille" without unit is additive
# TODO: There is one exception to the above rule: "de milliard"
# ex. : "mille milliards de milliards"
@@ -175,6 +175,10 @@ class WordStreamValueParser(WordStreamValueParserInterface):
return False
# a multiplier can not be applied to a value bigger than itself,
# so it must be applied to the current group
+ if coef < 1000:
+ self.grp_val = (self.grp_val or 1) * coef
+ self.last_word = None
+ return True
if coef < self.n000_val:
self.n000_val = self.n000_val + coef * (
self.grp_val or 1
| allo-media/text2num | 79659e54d4bdf5f4fd0c295747a2b1ccdcedbe4a | diff --git a/tests/test_text_to_num_en.py b/tests/test_text_to_num_en.py
index d12341b..640d7c4 100644
--- a/tests/test_text_to_num_en.py
+++ b/tests/test_text_to_num_en.py
@@ -51,13 +51,10 @@ class TestTextToNumEN(TestCase):
self.assertEqual(text2num("thousand nine hundred twenty", "en"), 1920)
self.assertEqual(text2num("one billion twenty-five millions", "en"), 1_025_000_000)
- def test_text2num_centuries(self):
- self.assertEqual(text2num("nineteen hundred seventy-three", "en"), 1973)
-
def test_text2num_exc(self):
self.assertRaises(ValueError, text2num, "thousand thousand two hundreds", "en")
self.assertRaises(ValueError, text2num, "sixty fifteen", "en")
- self.assertRaises(ValueError, text2num, "sixty hundred", "en")
+ self.assertRaises(ValueError, text2num, "hundred hundreds", "en")
def test_text2num_zeroes(self):
self.assertEqual(text2num("zero", "en"), 0)
@@ -67,6 +64,13 @@ class TestTextToNumEN(TestCase):
self.assertRaises(ValueError, text2num, "fifty zero three", "en")
self.assertRaises(ValueError, text2num, "fifty three zero", "en")
+ def test_text2num_hundreds(self):
+ source = "forty five hundred thirty eight"
+ expected = 4538
+ self.assertEqual(text2num(source, "en"), expected)
+ self.assertEqual(text2num("nineteen hundred seventy-three", "en"), 1973)
+ self.assertEqual(text2num("sixty hundred", "en"), 6000)
+
def test_alpha2digit_integers(self):
source = "twenty-five cows, twelve chickens and one hundred twenty five kg of potatoes."
expected = "25 cows, 12 chickens and 125 kg of potatoes."
@@ -97,6 +101,11 @@ class TestTextToNumEN(TestCase):
expected = "34 = 34"
self.assertEqual(alpha2digit(source, "en", relaxed=True), expected)
+ def test_alpha2digit_hundreds(self):
+ source = "forty five hundred thirty eight dollars and eighteen cents"
+ expected = "4538 dollars and 18 cents"
+ self.assertEqual(alpha2digit(source, "en"), expected)
+
def test_alpha2digit_formal(self):
source = "plus thirty-three nine sixty zero six twelve twenty-one"
expected = "+33 9 60 06 12 21"
| forty five hundred thirty => 45 130 instead of 4530
Hello,
I'm using your package and the alpha2digit method on english text.
The following text returns a wrong value :
`forty five hundred thirty dollars and ninety cents` => `45 130 dollars and 90 cents`
I'm expecting the following : `4530 dollars and 90 cents` | 0.0 | 79659e54d4bdf5f4fd0c295747a2b1ccdcedbe4a | [
"tests/test_text_to_num_en.py::TestTextToNumEN::test_alpha2digit_hundreds",
"tests/test_text_to_num_en.py::TestTextToNumEN::test_text2num_hundreds"
]
| [
"tests/test_text_to_num_en.py::TestTextToNumEN::test_alpha2digit_decimals",
"tests/test_text_to_num_en.py::TestTextToNumEN::test_alpha2digit_formal",
"tests/test_text_to_num_en.py::TestTextToNumEN::test_alpha2digit_integers",
"tests/test_text_to_num_en.py::TestTextToNumEN::test_alpha2digit_ordinals",
"tests/test_text_to_num_en.py::TestTextToNumEN::test_alpha2digit_ordinals_force",
"tests/test_text_to_num_en.py::TestTextToNumEN::test_alpha2digit_signed",
"tests/test_text_to_num_en.py::TestTextToNumEN::test_alpha2digit_zero",
"tests/test_text_to_num_en.py::TestTextToNumEN::test_and",
"tests/test_text_to_num_en.py::TestTextToNumEN::test_one_as_noun_or_article",
"tests/test_text_to_num_en.py::TestTextToNumEN::test_relaxed",
"tests/test_text_to_num_en.py::TestTextToNumEN::test_second_as_time_unit_vs_ordinal",
"tests/test_text_to_num_en.py::TestTextToNumEN::test_text2num",
"tests/test_text_to_num_en.py::TestTextToNumEN::test_text2num_exc",
"tests/test_text_to_num_en.py::TestTextToNumEN::test_text2num_zeroes",
"tests/test_text_to_num_en.py::TestTextToNumEN::test_uppercase"
]
| {
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
} | 2024-04-16 16:57:36+00:00 | mit | 1,047 |
|
allo-media__text2num-77 | diff --git a/text_to_num/lang/spanish.py b/text_to_num/lang/spanish.py
index cecbf8d..679ccde 100644
--- a/text_to_num/lang/spanish.py
+++ b/text_to_num/lang/spanish.py
@@ -61,6 +61,10 @@ STENS: Dict[str, int] = {
)
}
+STENS["veintitrés"] = 23
+STENS["veintidós"] = 22
+
+
# Ten multiples
# Ten multiples may be followed by a unit only;
MTENS: Dict[str, int] = {
| allo-media/text2num | b3c763288ff4e49b42c2c9ab8a6b71b553d18526 | diff --git a/tests/test_text_to_num_es.py b/tests/test_text_to_num_es.py
index 7633952..e6974ae 100644
--- a/tests/test_text_to_num_es.py
+++ b/tests/test_text_to_num_es.py
@@ -38,6 +38,8 @@ class TestTextToNumES(TestCase):
self.assertEqual(text2num("diecinueve", "es"), 19)
self.assertEqual(text2num("veinte", "es"), 20)
self.assertEqual(text2num("veintiuno", "es"), 21)
+ self.assertEqual(text2num("veintitres", "es"), 23)
+ self.assertEqual(text2num("veintitrés", "es"), 23)
self.assertEqual(text2num("treinta", "es"), 30)
self.assertEqual(text2num("treinta y uno", "es"), 31)
self.assertEqual(text2num("treinta y dos", "es"), 32)
| Spanish numbers like ciento veintitrés not correctly converted
The number "ciento veintitrés" in Spanish is converted to 100 instead of 123. It seems "veintitrés" is not recognized. | 0.0 | b3c763288ff4e49b42c2c9ab8a6b71b553d18526 | [
"tests/test_text_to_num_es.py::TestTextToNumES::test_text2num"
]
| [
"tests/test_text_to_num_es.py::TestTextToNumES::test_accent",
"tests/test_text_to_num_es.py::TestTextToNumES::test_alpha2digit_decimals",
"tests/test_text_to_num_es.py::TestTextToNumES::test_alpha2digit_formal",
"tests/test_text_to_num_es.py::TestTextToNumES::test_alpha2digit_integers",
"tests/test_text_to_num_es.py::TestTextToNumES::test_alpha2digit_signed",
"tests/test_text_to_num_es.py::TestTextToNumES::test_alpha2digit_zero",
"tests/test_text_to_num_es.py::TestTextToNumES::test_and",
"tests/test_text_to_num_es.py::TestTextToNumES::test_one_as_noun_or_article",
"tests/test_text_to_num_es.py::TestTextToNumES::test_relaxed",
"tests/test_text_to_num_es.py::TestTextToNumES::test_text2num_exc",
"tests/test_text_to_num_es.py::TestTextToNumES::test_text2num_zeroes"
]
| {
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
} | 2022-07-05 07:23:53+00:00 | mit | 1,048 |
|
allo-media__text2num-96 | diff --git a/text_to_num/parsers.py b/text_to_num/parsers.py
index 5573375..d90ebb7 100644
--- a/text_to_num/parsers.py
+++ b/text_to_num/parsers.py
@@ -124,13 +124,14 @@ class WordStreamValueParser(WordStreamValueParserInterface):
# a multiplier can be applied to anything lesser than itself,
# as long as it not zero (special case for 1000 which then implies 1)
return True
- if coef * coef <= self.n000_val:
+ if coef * 1000 <= self.n000_val:
# a multiplier can not be applied to a value bigger than itself,
# so it must be applied to the current group only.
# ex. for "mille": "deux millions cent cinquante mille"
# ex. for "millions": "trois milliard deux cent millions"
# But not twice: "dix mille cinq mille" is invalid for example. Therefore,
- # we test the square of ``coef``.
+ # we test the 1000 × ``coef`` (as the multipliers above 100,
+ # are a geometric progression of ratio 1000)
return (
self.grp_val > 0 or coef == 1000
) # "mille" without unit is additive
| allo-media/text2num | 03165958242c33b2770cde1701a39ba3436b8103 | diff --git a/tests/test_text_to_num_en.py b/tests/test_text_to_num_en.py
index 86344c6..6d21928 100644
--- a/tests/test_text_to_num_en.py
+++ b/tests/test_text_to_num_en.py
@@ -49,6 +49,7 @@ class TestTextToNumEN(TestCase):
self.assertEqual(text2num("one hundred fifteen", "en"), 115)
self.assertEqual(text2num("seventy-five thousands", "en"), 75000)
self.assertEqual(text2num("thousand nine hundred twenty", "en"), 1920)
+ self.assertEqual(text2num("one billion twenty-five millions", "en"), 1_025_000_000)
def test_text2num_centuries(self):
self.assertEqual(text2num("nineteen hundred seventy-three", "en"), 1973)
diff --git a/tests/test_text_to_num_fr.py b/tests/test_text_to_num_fr.py
index e212881..956153f 100644
--- a/tests/test_text_to_num_fr.py
+++ b/tests/test_text_to_num_fr.py
@@ -46,6 +46,7 @@ class TestTextToNumFR(TestCase):
self.assertEqual(text2num("quinze", "fr"), 15)
self.assertEqual(text2num("soixante quinze mille", "fr"), 75000)
+ self.assertEqual(text2num("un milliard vingt-cinq millions", "fr"), 1_025_000_000)
def test_text2num_variants(self):
self.assertEqual(text2num("quatre-vingt dix-huit", "fr"), 98)
| Billions followed by millions (e.g. 1,025,000,000) not converted for English, French
When the next multiplier is a million, following billions, it is not handled correctly. It is not detected as an expected valid multiplier. There is a "TODO" comment in the related code, I suspect it is about this issue.
It doesn't happen with Spanish because in Spanish billion is basically "thousand millions".
If the next multiplier following billions is a thousand, it works fine, because 1000x1000=1,000,000 < 1 billion.
For now, I add the following check to the end of `is_coef_appliable()` method, before returning False, it works fine for me now:
```
if coef > self.grp_val and coef * self.grp_val < self.n000_val:
return True
```
It seems reasonable to me, but I may not be able to consider all possible cases, and for different languages. Just a suggestion for the time being. | 0.0 | 03165958242c33b2770cde1701a39ba3436b8103 | [
"tests/test_text_to_num_en.py::TestTextToNumEN::test_text2num",
"tests/test_text_to_num_fr.py::TestTextToNumFR::test_text2num"
]
| [
"tests/test_text_to_num_en.py::TestTextToNumEN::test_alpha2digit_decimals",
"tests/test_text_to_num_en.py::TestTextToNumEN::test_alpha2digit_formal",
"tests/test_text_to_num_en.py::TestTextToNumEN::test_alpha2digit_integers",
"tests/test_text_to_num_en.py::TestTextToNumEN::test_alpha2digit_ordinals",
"tests/test_text_to_num_en.py::TestTextToNumEN::test_alpha2digit_ordinals_force",
"tests/test_text_to_num_en.py::TestTextToNumEN::test_alpha2digit_signed",
"tests/test_text_to_num_en.py::TestTextToNumEN::test_alpha2digit_zero",
"tests/test_text_to_num_en.py::TestTextToNumEN::test_and",
"tests/test_text_to_num_en.py::TestTextToNumEN::test_one_as_noun_or_article",
"tests/test_text_to_num_en.py::TestTextToNumEN::test_relaxed",
"tests/test_text_to_num_en.py::TestTextToNumEN::test_second_as_time_unit_vs_ordinal",
"tests/test_text_to_num_en.py::TestTextToNumEN::test_text2num_centuries",
"tests/test_text_to_num_en.py::TestTextToNumEN::test_text2num_exc",
"tests/test_text_to_num_en.py::TestTextToNumEN::test_text2num_zeroes",
"tests/test_text_to_num_en.py::TestTextToNumEN::test_uppercase",
"tests/test_text_to_num_fr.py::TestTextToNumFR::test_alpha2digit_all_ordinals",
"tests/test_text_to_num_fr.py::TestTextToNumFR::test_alpha2digit_decimals",
"tests/test_text_to_num_fr.py::TestTextToNumFR::test_alpha2digit_formal",
"tests/test_text_to_num_fr.py::TestTextToNumFR::test_alpha2digit_integers",
"tests/test_text_to_num_fr.py::TestTextToNumFR::test_alpha2digit_ordinals",
"tests/test_text_to_num_fr.py::TestTextToNumFR::test_alpha2digit_signed",
"tests/test_text_to_num_fr.py::TestTextToNumFR::test_alpha2digit_zero",
"tests/test_text_to_num_fr.py::TestTextToNumFR::test_article",
"tests/test_text_to_num_fr.py::TestTextToNumFR::test_relaxed",
"tests/test_text_to_num_fr.py::TestTextToNumFR::test_text2num_centuries",
"tests/test_text_to_num_fr.py::TestTextToNumFR::test_text2num_exc",
"tests/test_text_to_num_fr.py::TestTextToNumFR::test_text2num_variants",
"tests/test_text_to_num_fr.py::TestTextToNumFR::test_text2num_zeroes",
"tests/test_text_to_num_fr.py::TestTextToNumFR::test_trente_et_onze",
"tests/test_text_to_num_fr.py::TestTextToNumFR::test_un_pronoun"
]
| {
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
} | 2023-08-14 14:52:31+00:00 | mit | 1,049 |
|
allrod5__parameters-validation-12 | diff --git a/README.md b/README.md
index 49dd10f..c44f8e9 100644
--- a/README.md
+++ b/README.md
@@ -69,6 +69,51 @@ def foo(df: log_to_debug(str)):
# do something
```
+## Skipping validations
+
+For whatever reason, if one wants to skip validations a method `skip_validations` is
+appended to the decorated method. When called it will return the original method as if
+it wasn't decorated with `@validate_parameters`:
+
+```python
+from parameters_validation import no_whitespaces, validate_parameters
+
+@validate_parameters
+def foo(arg: no_whitespaces(str)):
+ print(arg)
+
+foo.skip_validations()("white spaces")
+# prints: white spaces
+```
+
+Note that, in the example, `foo.skip_validations()` does not changes `foo` itself but
+actually returns another function without the validation behaviour.
+
+## Testing
+
+In general, unit and integration tests should be fine with parameters validation
+validating input parameters though it might be the case one wants to mock some or all
+of the validations.
+
+Functions decorated with `@validate_parameters` are appended with a `mock_validations`
+method that accepts a dictionary mapping parameters to mock validations:
+
+```python
+from parameters_validation import no_whitespaces, validate_parameters
+
+@validate_parameters
+def foo(arg: no_whitespaces(str)):
+ print(arg)
+
+foo.mock_validations({"arg": lambda *_, **__: print("mocked")})("white spaces")
+# prints: mocked
+# prints: white spaces
+```
+
+Note that mock functions **must not** be decorated with `@parameter_validation`.
+Also, note that, in the example, `foo.mock_validations(...)` does not changes `foo`
+itself but actually returns another function with mocked behaviour.
+
## When to validate parameters
It is a pythonic convention follow the [EAFP](https://docs.python.org/3/glossary.html#term-eafp) principle whenever possible. There are cases however that skipping validations leads to silent errors and big headaches. Let's use an illustrative example:
diff --git a/parameters_validation/builtin_validations.py b/parameters_validation/builtin_validations.py
index c56476e..7c8a52b 100644
--- a/parameters_validation/builtin_validations.py
+++ b/parameters_validation/builtin_validations.py
@@ -210,4 +210,4 @@ def _build_arg(arg_name, arg_type):
arg += " <{t}>".format(t=arg_type.__name__)
except AttributeError:
arg += " <{t}>".format(t=arg_type._name)
- return arg
\ No newline at end of file
+ return arg
diff --git a/parameters_validation/validate_parameters_decorator.py b/parameters_validation/validate_parameters_decorator.py
index fa843a6..b59ed56 100644
--- a/parameters_validation/validate_parameters_decorator.py
+++ b/parameters_validation/validate_parameters_decorator.py
@@ -1,7 +1,55 @@
import inspect
+from copy import deepcopy
from functools import wraps
+def _get_parameter_value_dict(specs, args, kwargs):
+ parameters = kwargs.copy()
+ for arg_value, parameter in zip(args, specs.args):
+ parameters[parameter] = arg_value
+ if specs.defaults:
+ for default_parameter, default_value in zip(
+ specs.args[len(specs.args) - len(specs.defaults):], specs.defaults
+ ):
+ if default_parameter in parameters:
+ continue
+ parameters[default_parameter] = default_value
+ if specs.kwonlydefaults:
+ for default_parameter, default_value in specs.kwonlydefaults.items():
+ if default_parameter in parameters:
+ continue
+ parameters[default_parameter] = default_value
+ return parameters
+
+
+def _get_wrapper(f: callable, specs: inspect.FullArgSpec, validations: dict = None):
+ if validations is None:
+ validations = specs.annotations
+
+ @wraps(f)
+ def wrapper(*args, **kwargs):
+ parameters = _get_parameter_value_dict(specs, args, kwargs)
+ for parameter, annotation in validations.items():
+ if not hasattr(annotation, "_parameter_validation"):
+ continue
+ annotation(parameters[parameter], parameter)
+
+ return f(*args, **kwargs)
+
+ def parameter_validation_mock(pseudo_validation_function: callable):
+ mock = deepcopy(pseudo_validation_function)
+ mock._parameter_validation = True
+ return mock
+
+ def mock_validations(mocks: dict):
+ valid_mocks = {p: parameter_validation_mock(v) for p, v in mocks.items()}
+ return _get_wrapper(f, specs, {**validations, **valid_mocks})
+ wrapper.mock_validations = mock_validations
+ wrapper.skip_validations = lambda: f
+
+ return wrapper
+
+
def validate_parameters(func):
"""
Decorator to apply validations in the parameters type hints before executing the
@@ -18,36 +66,28 @@ def validate_parameters(func):
... foo("") # invalid, empty
... foo(None) # invalid, none
+ Validations can be skipped with `.skip_validations()`:
+
+ >>> from parameters_validation import non_blank
+ ...
+ ... @validate_parameters
+ ... def foo(s: non_blank(str)):
+ ... pass
+ ...
+ ... foo.skip_validations()("") # does not throw since validations are skipped
+
+ Validations can be mocked for testing purposes with `.mock_validations({...})`:
+
+ >>> from parameters_validation import non_blank
+ ...
+ ... @validate_parameters
+ ... def foo(s: non_blank(str)):
+ ... pass
+ ...
+ ... foo.mock_validations({"s": lambda *_: print("mocked")})("") # prints "mocked"
+
:param func: decorated function
:return: wrapped function
"""
specs = inspect.getfullargspec(func)
- @wraps(func)
- def wrapper(*args, **kwargs):
- parameters = get_parameter_value_dict(args, kwargs)
- for parameter, annotation in specs.annotations.items():
- if not hasattr(annotation, "_parameter_validation"):
- continue
- annotation(parameters[parameter], parameter)
-
- return func(*args, **kwargs)
-
- def get_parameter_value_dict(args, kwargs):
- parameters = kwargs.copy()
- for arg_value, parameter in zip(args, specs.args):
- parameters[parameter] = arg_value
- if specs.defaults:
- for default_parameter, default_value in zip(
- specs.args[len(specs.args)-len(specs.defaults):], specs.defaults
- ):
- if default_parameter in parameters:
- continue
- parameters[default_parameter] = default_value
- if specs.kwonlydefaults:
- for default_parameter, default_value in specs.kwonlydefaults.items():
- if default_parameter in parameters:
- continue
- parameters[default_parameter] = default_value
- return parameters
-
- return wrapper
+ return _get_wrapper(func, specs)
diff --git a/setup.py b/setup.py
index 128adc3..99d95d4 100644
--- a/setup.py
+++ b/setup.py
@@ -16,7 +16,7 @@ extras = {
setup(
name='parameters-validation',
- version='1.1.5',
+ version='1.2.0',
packages=['parameters_validation'],
url='https://github.com/allrod5/parameters-validation',
license='MIT',
| allrod5/parameters-validation | 42d116873d426360b5e2f26726c02c5044968714 | diff --git a/tests/unit/test_bultin_validations.py b/tests/unit/test_bultin_validations.py
index 8d5fb55..674c7e4 100644
--- a/tests/unit/test_bultin_validations.py
+++ b/tests/unit/test_bultin_validations.py
@@ -52,3 +52,19 @@ class TestBuiltinValidations:
def test_strongly_typed_incorrect_usage(self):
with pytest.raises(RuntimeError):
bar("")
+
+ def test_unable_to_validate_non_blank(self):
+ with pytest.raises(RuntimeError):
+ foo(42, "", [None], "", 42, [1])
+
+ def test_unable_to_validate_non_empty(self):
+ with pytest.raises(RuntimeError):
+ foo("non-blank", "", None, "", 42, [1])
+
+ def test_unable_to_validate_no_whitespaces(self):
+ with pytest.raises(RuntimeError):
+ foo("non-blank", "", [None], None, 42, [1])
+
+ def test_unable_to_validate_non_negative(self):
+ with pytest.raises(RuntimeError):
+ foo("non-blank", "", [None], "", None, [1])
diff --git a/tests/unit/test_mock_validations.py b/tests/unit/test_mock_validations.py
new file mode 100644
index 0000000..d3eb050
--- /dev/null
+++ b/tests/unit/test_mock_validations.py
@@ -0,0 +1,36 @@
+import pytest
+
+from parameters_validation import validate_parameters, parameter_validation, non_blank
+
+
+@parameter_validation
+def type_error(*args, **kwargs):
+ raise TypeError
+
+
+def value_error(*args, **kwargs):
+ raise ValueError
+
+
+@validate_parameters
+def foo(arg: type_error(str)):
+ pass
+
+
+@validate_parameters
+def bar(arg: non_blank(str)):
+ pass
+
+
+class TestValidateParametersDecoratorMock:
+ def test_setup(self):
+ with pytest.raises(TypeError):
+ foo("anything")
+
+ def test_mock_validation_replaces_original(self):
+ with pytest.raises(ValueError):
+ foo.mock_validations({"arg": value_error})("anything")
+
+ def test_unmatched_mock_raises_key_error(self):
+ with pytest.raises(KeyError):
+ bar.mock_validations({"unmatched": value_error})("non_blank")
diff --git a/tests/unit/test_skip_validations.py b/tests/unit/test_skip_validations.py
new file mode 100644
index 0000000..da7d697
--- /dev/null
+++ b/tests/unit/test_skip_validations.py
@@ -0,0 +1,16 @@
+from parameters_validation import validate_parameters, parameter_validation
+
+
+@parameter_validation
+def custom_validation(param, arg_name, arg_type):
+ raise Exception
+
+
+@validate_parameters
+def foo(arg: custom_validation(str)):
+ pass
+
+
+class TestValidateParametersDecoratorSkip:
+ def test_skip_validation(self):
+ foo.skip_validations()("anything")
| Patching the validate_parameters decorator
### Problem description
With the actual state of the library it is not intuitive how to write tests for methods that use the validate_parameters decorator. Because decorators are called at the moment that the python module is imported, to patch the decorator we need to disrupt the usual "good programming guidelines" and import the module in the middle of the code after patching it.
### Possible solution strategy
#### Disable the type checking
Since the decorator doesn't change the value of the inputs, every testing needed for the validation part can be done separately. So probably just having a way to deactivate the validate_parameters when running tests is good enough for most cases.
#### Postpone the evaluation
I took a brief look at a form of postponing the decorator evaluation so we could patch the method the usual way. Looks like the Python community is aware of the problems that the evaluation of annotations at function definition time can cause since I fond a [PEP for this](https://www.python.org/dev/peps/pep-0563/) discussing that.
| 0.0 | 42d116873d426360b5e2f26726c02c5044968714 | [
"tests/unit/test_mock_validations.py::TestValidateParametersDecoratorMock::test_mock_validation_replaces_original",
"tests/unit/test_mock_validations.py::TestValidateParametersDecoratorMock::test_unmatched_mock_raises_key_error",
"tests/unit/test_skip_validations.py::TestValidateParametersDecoratorSkip::test_skip_validation"
]
| [
"tests/unit/test_bultin_validations.py::TestBuiltinValidations::test_success",
"tests/unit/test_bultin_validations.py::TestBuiltinValidations::test_non_blank",
"tests/unit/test_bultin_validations.py::TestBuiltinValidations::test_non_null",
"tests/unit/test_bultin_validations.py::TestBuiltinValidations::test_non_empty",
"tests/unit/test_bultin_validations.py::TestBuiltinValidations::test_no_whitespaces",
"tests/unit/test_bultin_validations.py::TestBuiltinValidations::test_non_negative",
"tests/unit/test_bultin_validations.py::TestBuiltinValidations::test_strongly_typed",
"tests/unit/test_bultin_validations.py::TestBuiltinValidations::test_strongly_typed_incorrect_usage",
"tests/unit/test_bultin_validations.py::TestBuiltinValidations::test_unable_to_validate_non_blank",
"tests/unit/test_bultin_validations.py::TestBuiltinValidations::test_unable_to_validate_non_empty",
"tests/unit/test_bultin_validations.py::TestBuiltinValidations::test_unable_to_validate_no_whitespaces",
"tests/unit/test_bultin_validations.py::TestBuiltinValidations::test_unable_to_validate_non_negative",
"tests/unit/test_mock_validations.py::TestValidateParametersDecoratorMock::test_setup"
]
| {
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2020-03-24 22:29:03+00:00 | mit | 1,050 |
|
allrod5__parameters-validation-3 | diff --git a/parameters_validation/validate_parameters_decorator.py b/parameters_validation/validate_parameters_decorator.py
index 877f496..7aea24b 100644
--- a/parameters_validation/validate_parameters_decorator.py
+++ b/parameters_validation/validate_parameters_decorator.py
@@ -33,6 +33,6 @@ def validate_parameters(func):
continue
annotation(parameters[parameter], parameter)
- func(*args, **kwargs)
+ return func(*args, **kwargs)
return wrapper
diff --git a/setup.py b/setup.py
index 884467f..efdb614 100644
--- a/setup.py
+++ b/setup.py
@@ -16,7 +16,7 @@ extras = {
setup(
name='parameters-validation',
- version='1.1.0',
+ version='1.1.1',
packages=['parameters_validation'],
url='https://github.com/allrod5/parameters-validation',
license='MIT',
| allrod5/parameters-validation | 8a36fbaa01780960d8963fba35aae3f06efcc6a5 | diff --git a/test/__init__.py b/tests/__init__.py
similarity index 100%
rename from test/__init__.py
rename to tests/__init__.py
diff --git a/tests/bugfixes/__init__.py b/tests/bugfixes/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/tests/bugfixes/test_return_value_is_preserved.py b/tests/bugfixes/test_return_value_is_preserved.py
new file mode 100644
index 0000000..af96bc4
--- /dev/null
+++ b/tests/bugfixes/test_return_value_is_preserved.py
@@ -0,0 +1,27 @@
+"""
+This test covers a fix for a bug first reported in
+https://github.com/allrod5/parameters-validation/issues/2
+
+In version 1.1.0 a function annotated with decorator
+@validate_parameters would return None regardless of it's actual
+return value
+
+This bug was fixed in version 1.1.1
+"""
+from parameters_validation import non_null, validate_parameters
+
+
+def test_return_value_is_preserved():
+ # given
+ @validate_parameters
+ def guinea_pig(front: str, back: non_null(str)):
+ result = None
+ if front:
+ result = front + '-' + back
+ return result
+
+ # when
+ return_value = guinea_pig("one", "two")
+
+ # then
+ assert return_value == "one-two"
| Decorated function can not return right value.
I think I found a bug.
**How to reproduce:**
```python
from parameters_validation import non_null, validate_parameters
@validate_parameters
def concat(front: str, back: non_null(str)):
result = None
if front:
result = front + '-' + back
print('inner_result = {}'.format(result))
return result
result_str = concat('one' , 'two')
print('outer_result = {}'.format(result_str)
```
step 1: Run above coce
step 2: 'inner_result' should be same with 'outer_result'. But they are different
step 3: If you comment out '@validate_parameters' line, the output has no problem.
| 0.0 | 8a36fbaa01780960d8963fba35aae3f06efcc6a5 | [
"tests/bugfixes/test_return_value_is_preserved.py::test_return_value_is_preserved"
]
| []
| {
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
} | 2019-06-04 11:41:27+00:00 | mit | 1,051 |
|
allrod5__parameters-validation-6 | diff --git a/parameters_validation/validate_parameters_decorator.py b/parameters_validation/validate_parameters_decorator.py
index 7aea24b..4cc3d7c 100644
--- a/parameters_validation/validate_parameters_decorator.py
+++ b/parameters_validation/validate_parameters_decorator.py
@@ -24,10 +24,7 @@ def validate_parameters(func):
specs = inspect.getfullargspec(func)
@wraps(func)
def wrapper(*args, **kwargs):
- parameters = kwargs.copy()
- for arg_value, parameter in zip(args, specs.args):
- parameters[parameter] = arg_value
-
+ parameters = get_parameter_value_dict(args, kwargs)
for parameter, annotation in specs.annotations.items():
if not hasattr(annotation, "_parameter_validation"):
continue
@@ -35,4 +32,22 @@ def validate_parameters(func):
return func(*args, **kwargs)
+ def get_parameter_value_dict(args, kwargs):
+ parameters = kwargs.copy()
+ for arg_value, parameter in zip(args, specs.args):
+ parameters[parameter] = arg_value
+ if specs.defaults:
+ for default_parameter, default_value in zip(specs.args, specs.defaults):
+ if default_parameter in parameters:
+ pass
+ parameters[default_parameter] = default_value
+ if specs.kwonlydefaults:
+ for default_parameter, default_value in zip(
+ specs.kwonlyargs, specs.kwonlydefaults
+ ):
+ if default_parameter in parameters:
+ pass
+ parameters[default_parameter] = default_value
+ return parameters
+
return wrapper
| allrod5/parameters-validation | 611c5d4744dde3354ee7e8460d199bc79ee4af94 | diff --git a/tests/bugfixes/test_strongly_typed_on_default_parameters.py b/tests/bugfixes/test_strongly_typed_on_default_parameters.py
new file mode 100644
index 0000000..aa6d8a2
--- /dev/null
+++ b/tests/bugfixes/test_strongly_typed_on_default_parameters.py
@@ -0,0 +1,26 @@
+"""
+This test covers a fix for a bug first reported in
+https://github.com/allrod5/parameters-validation/issues/5
+
+In version 1.1.1 a function annotated with decorator
+@validate_parameters would crash if the builtin validation
+`strongly_typed` is used for a parameter with default value
+and there is a call to this function that uses the default value
+
+This bug was fixed in version 1.1.2
+"""
+from parameters_validation import non_null, validate_parameters, strongly_typed
+
+
+def test_strongly_typed_on_default_parameters():
+ # given
+ default_value = "default value"
+ @validate_parameters
+ def guinea_pig(a: strongly_typed(str) = default_value):
+ return a
+
+ # when
+ return_value = guinea_pig()
+
+ # then
+ assert return_value == default_value
| `strongly_typed` validation does not work with default parameters
Attempting to use `strongly_typed` validation on default parameters will break the code.
```python
from parameters_validation import validate_parameters, strongly_typed
@validate_parameters
def foo(a: strongly_typed(str) = "default value"):
print(a)
# Breaks and raises an obscure error
``` | 0.0 | 611c5d4744dde3354ee7e8460d199bc79ee4af94 | [
"tests/bugfixes/test_strongly_typed_on_default_parameters.py::test_strongly_typed_on_default_parameters"
]
| []
| {
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
} | 2019-06-10 19:33:07+00:00 | mit | 1,052 |
|
allrod5__parameters-validation-9 | diff --git a/parameters_validation/validate_parameters_decorator.py b/parameters_validation/validate_parameters_decorator.py
index 2fa2cf3..fa843a6 100644
--- a/parameters_validation/validate_parameters_decorator.py
+++ b/parameters_validation/validate_parameters_decorator.py
@@ -41,12 +41,12 @@ def validate_parameters(func):
specs.args[len(specs.args)-len(specs.defaults):], specs.defaults
):
if default_parameter in parameters:
- pass
+ continue
parameters[default_parameter] = default_value
if specs.kwonlydefaults:
for default_parameter, default_value in specs.kwonlydefaults.items():
if default_parameter in parameters:
- pass
+ continue
parameters[default_parameter] = default_value
return parameters
diff --git a/setup.py b/setup.py
index 014293a..a82c97a 100644
--- a/setup.py
+++ b/setup.py
@@ -16,7 +16,7 @@ extras = {
setup(
name='parameters-validation',
- version='1.1.3',
+ version='1.1.4',
packages=['parameters_validation'],
url='https://github.com/allrod5/parameters-validation',
license='MIT',
| allrod5/parameters-validation | 900a9aff357d7e931a7aabdbe3a68fe00a29c526 | diff --git a/tests/bugfixes/test_validations_on_parameters_with_default_value.py b/tests/bugfixes/test_validations_on_parameters_with_default_value.py
new file mode 100644
index 0000000..72104e3
--- /dev/null
+++ b/tests/bugfixes/test_validations_on_parameters_with_default_value.py
@@ -0,0 +1,84 @@
+"""
+This test covers a fix for a bug first reported in
+https://github.com/allrod5/parameters-validation/issues/8
+
+In version 1.1.3 validations defined for a parameter with default a
+value are always applied to the default value and not to explicit
+passed values.
+
+This bug was fixed in version 1.1.4
+"""
+import pytest
+
+from parameters_validation import validate_parameters, non_blank, no_whitespaces
+
+
+class TestValidationsOnParameterWithDefaultValue:
+ def test_default_value_success(self):
+ # given
+ default_value = "default_value"
+
+ @validate_parameters
+ def guinea_pig(s: no_whitespaces(non_blank(str)) = default_value):
+ return s
+
+ # when
+ return_value = guinea_pig()
+
+ # then
+ assert return_value == default_value
+
+ def test_bad_default_value(self):
+ # given
+ default_value = "default value"
+
+ @validate_parameters
+ def guinea_pig(s: no_whitespaces(non_blank(str)) = default_value):
+ return s
+
+ # then
+ with pytest.raises(ValueError):
+ guinea_pig()
+
+ def test_custom_value_success(self):
+ # given
+ default_value = "default_value"
+ custom_value = "custom_value"
+
+ @validate_parameters
+ def guinea_pig(s: no_whitespaces(non_blank(str)) = default_value):
+ return s
+
+ # when
+ return_value = guinea_pig(custom_value)
+
+ # then
+ assert return_value == custom_value
+
+ def test_bad_custom_value(self):
+ # given
+ default_value = "default_value"
+ whitespaced_string = "whitespaced string"
+ blank_string = " "
+ empty_string = ""
+ null_string = None
+
+ @validate_parameters
+ def guinea_pig(s: no_whitespaces(non_blank(str)) = default_value):
+ return s
+
+ # then
+ with pytest.raises(ValueError):
+ guinea_pig(whitespaced_string)
+
+ # then
+ with pytest.raises(ValueError):
+ guinea_pig(blank_string)
+
+ # then
+ with pytest.raises(ValueError):
+ guinea_pig(empty_string)
+
+ # then
+ with pytest.raises(ValueError):
+ guinea_pig(null_string)
| Validations on parameters with default values aren't performed
Using validations on parameters with default values won't work. Apparently, just the default value is validated.
This bug was introduced after #7.
```python
from parameters_validation import non_blank, validate_parameters
@validate_parameters
def foo(s: non_blank(str) = "default")
return s
foo("") # won't raise errors
``` | 0.0 | 900a9aff357d7e931a7aabdbe3a68fe00a29c526 | [
"tests/bugfixes/test_validations_on_parameters_with_default_value.py::TestValidationsOnParameterWithDefaultValue::test_bad_custom_value"
]
| [
"tests/bugfixes/test_validations_on_parameters_with_default_value.py::TestValidationsOnParameterWithDefaultValue::test_default_value_success",
"tests/bugfixes/test_validations_on_parameters_with_default_value.py::TestValidationsOnParameterWithDefaultValue::test_bad_default_value",
"tests/bugfixes/test_validations_on_parameters_with_default_value.py::TestValidationsOnParameterWithDefaultValue::test_custom_value_success"
]
| {
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
} | 2019-06-11 11:54:55+00:00 | mit | 1,053 |
|
alltheplaces__alltheplaces-949 | diff --git a/locations/hours.py b/locations/hours.py
index 305f600e..6603b0ec 100644
--- a/locations/hours.py
+++ b/locations/hours.py
@@ -55,7 +55,9 @@ class OpeningHours(object):
opening_hours = '24/7'
else:
for day_group in day_groups:
- if day_group['from_day'] == day_group['to_day']:
+ if not day_group['hours']:
+ continue
+ elif day_group['from_day'] == day_group['to_day']:
opening_hours += '{from_day} {hours}; '.format(**day_group)
elif day_group['from_day'] == 'Su' and day_group['to_day'] == 'Sa':
opening_hours += '{hours}; '.format(**day_group)
diff --git a/locations/spiders/banfield_pet_hospital.py b/locations/spiders/banfield_pet_hospital.py
new file mode 100644
index 00000000..217c2be8
--- /dev/null
+++ b/locations/spiders/banfield_pet_hospital.py
@@ -0,0 +1,71 @@
+import scrapy
+import re
+import urllib.parse
+
+from locations.items import GeojsonPointItem
+from locations.hours import OpeningHours
+
+
+class BanfieldPetHospitalScraper(scrapy.Spider):
+ name = "banfield_pet_hospital"
+ allowed_domains = ["www.banfield.com"]
+ download_delay = 0.5
+ start_urls = (
+ 'https://www.banfield.com/our-hospitals/hospital-locations/all-locations',
+ )
+
+ def parse_store(self, response):
+
+ elem = response.xpath('//div[contains(@class, "our-hospitals-location")]')
+
+ script_body = ' '.join(elem.xpath('.//script/text()').extract())
+ match = re.search(r'.*google.maps.LatLng\(([0-9.-]+),\s([0-9.-]+)\)', script_body)
+
+ lat, lon = match.groups()
+
+ # use last 3 elements of the store url as unique identifier (store number does not appear to be unique)
+ ref = "_".join(urllib.parse.urlsplit(response.url).path.split('/')[-3:])
+
+ number = elem.xpath('//div[@class="vcard"]/p[@id="hospitalAddressHospitalNumber"]/text()').extract_first()
+ number = re.search(r'Hospital\sNumber:\s+(\d+)', number).group(1)
+
+ properties = {
+ 'name': elem.xpath('//div[@class="vcard"]/p[@class="fn"]/text()').extract_first(),
+ 'addr_full': elem.xpath('//div[@class="vcard"]/span[@class="street-address"]/text()').extract_first(),
+ 'phone': elem.xpath('//div[@class="vcard"]/p[@id="hospitalAddressPhone"]/text()').extract_first(),
+ 'city': elem.xpath('//div[@class="vcard"]/span[@class="region"]/text()').extract_first(),
+ 'state': elem.xpath('//div[@class="vcard"]/span[@class="state"]/text()').extract_first(),
+ 'postcode': elem.xpath('//div[@class="vcard"]/span[@class="postal-code"]/text()').extract_first(),
+ 'ref': ref,
+ 'website': response.url,
+ 'lat': lat,
+ 'lon': lon,
+ 'extras': {
+ 'number': number
+ }
+ }
+
+ days = elem.xpath('//div[@class="hours"]/div[contains(@class, "day")]/@content').extract()
+ opening_hours = OpeningHours()
+
+ for d in days:
+ match = re.search(r'([A-Za-z]{2})\s([\d:]+)-([\d:]+)', d)
+ if match:
+ day, open, close = match.groups()
+ opening_hours.add_range(day=day, open_time=open, close_time=close)
+
+ hours = opening_hours.as_opening_hours()
+
+ if hours and hours != 'Mo-Su ':
+ properties['opening_hours'] = hours
+
+ yield GeojsonPointItem(**properties)
+
+ def parse(self, response):
+ stores = response.xpath('//li/table')
+ for store in stores:
+ elem = store.xpath('.//td[@class="hospname"]/a')
+ path = elem.xpath('.//@href').extract_first()
+ name = elem.xpath('.//text()').extract_first()
+
+ yield scrapy.Request(response.urljoin(path), callback=self.parse_store)
| alltheplaces/alltheplaces | 4faa631b73de26eab29eeff58d4e5ecad963aa1b | diff --git a/tests/__init__.py b/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/test_opening_hours.py b/tests/test_opening_hours.py
new file mode 100644
index 00000000..8a8ebfbc
--- /dev/null
+++ b/tests/test_opening_hours.py
@@ -0,0 +1,67 @@
+from locations.hours import OpeningHours
+
+
+def test_two_ranges():
+ o = OpeningHours()
+ o.add_range('Mo', '07:00', '17:00')
+ o.add_range('Tu', '07:00', '17:00')
+ o.add_range('We', '07:00', '17:00')
+
+ o.add_range('Fr', '08:00', '17:00')
+ o.add_range('Sa', '08:00', '17:00')
+
+ assert o.as_opening_hours() == "Mo-We 07:00-17:00; Fr-Sa 08:00-17:00"
+
+
+def test_mixed_ranges():
+ o = OpeningHours()
+ o.add_range('Mo', '08:00', '17:00')
+ o.add_range('Tu', '08:00', '17:00')
+ o.add_range('We', '09:00', '18:00')
+ o.add_range('Th', '09:00', '18:00')
+ o.add_range('Fr', '07:00', '17:00')
+ o.add_range('Su', '09:00', '17:00')
+
+ assert o.as_opening_hours() == "Mo-Tu 08:00-17:00; We-Th 09:00-18:00; Fr 07:00-17:00; Su 09:00-17:00"
+
+
+def test_closed_sunday():
+ o = OpeningHours()
+ o.add_range('Mo', '07:00', '17:00')
+ o.add_range('Tu', '07:00', '17:00')
+ o.add_range('We', '07:00', '17:00')
+ o.add_range('Th', '07:00', '17:00')
+ o.add_range('Fr', '07:00', '17:00')
+ o.add_range('Sa', '07:00', '17:00')
+
+ assert o.as_opening_hours() == "Mo-Sa 07:00-17:00"
+
+
+def test_closed_tuesday():
+ o = OpeningHours()
+ o.add_range('Mo', '07:00', '17:00')
+ o.add_range('We', '07:00', '17:00')
+ o.add_range('Th', '07:00', '17:00')
+ o.add_range('Fr', '07:00', '17:00')
+ o.add_range('Sa', '07:00', '17:00')
+ o.add_range('Su', '07:00', '17:00')
+
+ assert o.as_opening_hours() == "Mo 07:00-17:00; We-Su 07:00-17:00"
+
+
+def test_twentyfour_seven():
+ o = OpeningHours()
+ o.add_range('Mo', '0:00', '23:59')
+ o.add_range('Tu', '0:00', '23:59')
+ o.add_range('We', '0:00', '23:59')
+ o.add_range('Th', '0:00', '23:59')
+ o.add_range('Fr', '0:00', '23:59')
+ o.add_range('Sa', '0:00', '23:59')
+ o.add_range('Su', '0:00', '23:59')
+
+ assert o.as_opening_hours() == '24/7'
+
+
+def test_no_opening_hours():
+ o = OpeningHours()
+ assert o.as_opening_hours() == ''
| Banfield Pet Hospital
https://www.banfield.com/our-hospitals/hospital-locations | 0.0 | 4faa631b73de26eab29eeff58d4e5ecad963aa1b | [
"tests/test_opening_hours.py::test_two_ranges",
"tests/test_opening_hours.py::test_mixed_ranges",
"tests/test_opening_hours.py::test_closed_sunday",
"tests/test_opening_hours.py::test_closed_tuesday",
"tests/test_opening_hours.py::test_no_opening_hours"
]
| [
"tests/test_opening_hours.py::test_twentyfour_seven"
]
| {
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_added_files"
],
"has_test_patch": true,
"is_lite": false
} | 2018-12-10 16:37:27+00:00 | mit | 1,054 |
|
altair-viz__altair-1539 | diff --git a/altair/vegalite/v3/theme.py b/altair/vegalite/v3/theme.py
index 54fc6e58..9d7b67b7 100644
--- a/altair/vegalite/v3/theme.py
+++ b/altair/vegalite/v3/theme.py
@@ -2,6 +2,23 @@
from ...utils.theme import ThemeRegistry
+VEGA_THEMES = ['ggplot2', 'quartz', 'vox', 'fivethirtyeight', 'dark', 'latimes']
+
+
+class VegaTheme(object):
+ """Implementation of a builtin vega theme."""
+ def __init__(self, theme):
+ self.theme = theme
+
+ def __call__(self):
+ return {"usermeta": {"embedOptions": {"theme": self.theme}},
+ "config": {"view": {"width": 400, "height": 300},
+ "mark": {"tooltip": None}}}
+
+ def __repr__(self):
+ return "VegaTheme({!r})".format(self.theme)
+
+
# The entry point group that can be used by other packages to declare other
# renderers that will be auto-detected. Explicit registration is also
# allowed by the PluginRegistery API.
@@ -14,4 +31,8 @@ themes.register('opaque', lambda: {"config": {"background": "white",
"view": {"width": 400, "height": 300},
"mark": {"tooltip": None}}})
themes.register('none', lambda: {})
+
+for theme in VEGA_THEMES:
+ themes.register(theme, VegaTheme(theme))
+
themes.enable('default')
| altair-viz/altair | 2e74eb40bf7a832747436b2c18d696371c84df01 | diff --git a/altair/vegalite/v3/tests/test_theme.py b/altair/vegalite/v3/tests/test_theme.py
new file mode 100644
index 00000000..5e1fa361
--- /dev/null
+++ b/altair/vegalite/v3/tests/test_theme.py
@@ -0,0 +1,17 @@
+import pytest
+
+import altair.vegalite.v3 as alt
+from altair.vegalite.v3.theme import VEGA_THEMES
+
+
[email protected]
+def chart():
+ return alt.Chart('data.csv').mark_bar().encode(x='x:Q')
+
+def test_vega_themes(chart):
+ for theme in VEGA_THEMES:
+ with alt.themes.enable(theme):
+ dct = chart.to_dict()
+ assert dct['usermeta'] == {'embedOptions': {'theme': theme}}
+ assert dct['config'] == {"view": {"width": 400, "height": 300},
+ "mark": {"tooltip": None}}
| Support built-in vega themes
See [vega-themes](https://github.com/vega/vega-themes). Themes should be supported via the current theme infrastructure, maybe something like this:
```python
alt.themes.enable('vega.themes.dark')
```
We'll have to think about how to best populate the list of available themes, and how to make this work cleanly with user-specified themes from within Altair. | 0.0 | 2e74eb40bf7a832747436b2c18d696371c84df01 | [
"altair/vegalite/v3/tests/test_theme.py::test_vega_themes"
]
| []
| {
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
} | 2019-05-30 03:52:50+00:00 | bsd-3-clause | 1,055 |
|
altair-viz__altair-1852 | diff --git a/altair/vega/v5/schema/core.py b/altair/vega/v5/schema/core.py
index fd4aeb23..5e2a668d 100644
--- a/altair/vega/v5/schema/core.py
+++ b/altair/vega/v5/schema/core.py
@@ -774,7 +774,7 @@ class projection(VegaSchema):
extent : oneOf(List(oneOf(List(:class:`numberOrSignal`), :class:`signal`)), :class:`signal`)
- fit : oneOf(Mapping(required=[]), List(Mapping(required=[])))
+ fit : oneOf(Mapping(required=[]), List(Any))
parallels : oneOf(List(:class:`numberOrSignal`), :class:`signal`)
@@ -868,7 +868,7 @@ class scope(VegaSchema):
class signalName(VegaSchema):
"""signalName schema wrapper
- not Mapping(required=[])
+ not enum('parent', 'datum', 'event', 'item')
"""
_schema = {'$ref': '#/defs/signalName'}
_rootschema = Root._schema
@@ -977,7 +977,7 @@ class crossfilterTransform(VegaSchema):
fields : oneOf(List(oneOf(:class:`scaleField`, :class:`paramField`, :class:`expr`)),
:class:`signal`)
- query : oneOf(List(Mapping(required=[])), :class:`signal`)
+ query : oneOf(List(Any), :class:`signal`)
type : enum('crossfilter')
@@ -1000,7 +1000,7 @@ class resolvefilterTransform(VegaSchema):
Attributes
----------
- filter : Mapping(required=[])
+ filter : Any
ignore : anyOf(float, :class:`signal`)
@@ -1339,11 +1339,11 @@ class graticuleTransform(VegaSchema):
type : enum('graticule')
- extent : oneOf(List(Mapping(required=[])), :class:`signal`)
+ extent : oneOf(List(Any), :class:`signal`)
- extentMajor : oneOf(List(Mapping(required=[])), :class:`signal`)
+ extentMajor : oneOf(List(Any), :class:`signal`)
- extentMinor : oneOf(List(Mapping(required=[])), :class:`signal`)
+ extentMinor : oneOf(List(Any), :class:`signal`)
precision : anyOf(float, :class:`signal`)
@@ -2182,13 +2182,13 @@ class imputeTransform(VegaSchema):
groupby : oneOf(List(oneOf(:class:`scaleField`, :class:`paramField`, :class:`expr`)),
:class:`signal`)
- keyvals : oneOf(List(Mapping(required=[])), :class:`signal`)
+ keyvals : oneOf(List(Any), :class:`signal`)
method : anyOf(enum('value', 'mean', 'median', 'max', 'min'), :class:`signal`)
signal : string
- value : Mapping(required=[])
+ value : Any
"""
_schema = {'$ref': '#/defs/imputeTransform'}
@@ -2301,7 +2301,7 @@ class lookupTransform(VegaSchema):
type : enum('lookup')
- default : Mapping(required=[])
+ default : Any
signal : string
@@ -2586,7 +2586,7 @@ class voronoiTransform(VegaSchema):
y : oneOf(:class:`scaleField`, :class:`paramField`, :class:`expr`)
- extent : oneOf(List(Mapping(required=[])), :class:`signal`)
+ extent : oneOf(List(Any), :class:`signal`)
signal : string
@@ -3458,7 +3458,7 @@ class signal(VegaSchema):
class arrayOrSignal(VegaSchema):
"""arrayOrSignal schema wrapper
- oneOf(List(Mapping(required=[])), :class:`signal`)
+ oneOf(List(Any), :class:`signal`)
"""
_schema = {'$ref': '#/refs/arrayOrSignal'}
_rootschema = Root._schema
diff --git a/altair/vegalite/v3/schema/core.py b/altair/vegalite/v3/schema/core.py
index 52997ac1..14b19534 100644
--- a/altair/vegalite/v3/schema/core.py
+++ b/altair/vegalite/v3/schema/core.py
@@ -1321,7 +1321,7 @@ class BaseMarkConfig(VegaLiteSchema):
the ``x`` and ``y`` properties. Values for ``theta`` follow the same convention of
``arc`` mark ``startAngle`` and ``endAngle`` properties: angles are measured in
radians, with ``0`` indicating "north".
- tooltip : Mapping(required=[])
+ tooltip : Any
The tooltip text to show upon mouse hover.
width : float
Width of the marks.
@@ -1550,7 +1550,7 @@ class BindRadioSelect(Binding):
input : enum('radio', 'select')
- options : List(Mapping(required=[]))
+ options : List(Any)
debounce : float
@@ -4059,13 +4059,13 @@ class ErrorBarExtent(VegaLiteSchema):
class EventStream(VegaLiteSchema):
"""EventStream schema wrapper
- Mapping(required=[])
+ Any
"""
_schema = {'$ref': '#/definitions/EventStream'}
_rootschema = Root._schema
- def __init__(self, **kwds):
- super(EventStream, self).__init__(**kwds)
+ def __init__(self, *args, **kwds):
+ super(EventStream, self).__init__(*args, **kwds)
class FacetFieldDef(VegaLiteSchema):
@@ -5927,7 +5927,7 @@ class ImputeParams(VegaLiteSchema):
**Default value:** : ``[null, null]`` indicating that the window includes all
objects.
- keyvals : anyOf(List(Mapping(required=[])), :class:`ImputeSequence`)
+ keyvals : anyOf(List(Any), :class:`ImputeSequence`)
Defines the key values that should be considered for imputation.
An array of key values or an object defining a `number sequence
<https://vega.github.io/vega-lite/docs/impute.html#sequence-def>`__.
@@ -5943,7 +5943,7 @@ class ImputeParams(VegaLiteSchema):
One of ``value``, ``mean``, ``median``, ``max`` or ``min``.
**Default value:** ``"value"``
- value : Mapping(required=[])
+ value : Any
The field value to use when the imputation ``method`` is ``"value"``.
"""
_schema = {'$ref': '#/definitions/ImputeParams'}
@@ -15392,7 +15392,7 @@ class ImputeTransform(Transform):
groupby : List(:class:`FieldName`)
An optional array of fields by which to group the values.
Imputation will then be performed on a per-group basis.
- keyvals : anyOf(List(Mapping(required=[])), :class:`ImputeSequence`)
+ keyvals : anyOf(List(Any), :class:`ImputeSequence`)
Defines the key values that should be considered for imputation.
An array of key values or an object defining a `number sequence
<https://vega.github.io/vega-lite/docs/impute.html#sequence-def>`__.
@@ -15408,7 +15408,7 @@ class ImputeTransform(Transform):
One of ``value``, ``mean``, ``median``, ``max`` or ``min``.
**Default value:** ``"value"``
- value : Mapping(required=[])
+ value : Any
The field value to use when the imputation ``method`` is ``"value"``.
"""
_schema = {'$ref': '#/definitions/ImputeTransform'}
diff --git a/altair/vegalite/v4/schema/core.py b/altair/vegalite/v4/schema/core.py
index 929815af..addb0b93 100644
--- a/altair/vegalite/v4/schema/core.py
+++ b/altair/vegalite/v4/schema/core.py
@@ -1414,7 +1414,7 @@ class BaseMarkConfig(VegaLiteSchema):
the ``x`` and ``y`` properties. Values for ``theta`` follow the same convention of
``arc`` mark ``startAngle`` and ``endAngle`` properties: angles are measured in
radians, with ``0`` indicating "north".
- tooltip : Mapping(required=[])
+ tooltip : Any
The tooltip text to show upon mouse hover.
width : float
Width of the marks.
@@ -1693,7 +1693,7 @@ class BindRadioSelect(Binding):
input : enum('radio', 'select')
- options : List(Mapping(required=[]))
+ options : List(Any)
debounce : float
@@ -7557,7 +7557,7 @@ class ImputeParams(VegaLiteSchema):
**Default value:** : ``[null, null]`` indicating that the window includes all
objects.
- keyvals : anyOf(List(Mapping(required=[])), :class:`ImputeSequence`)
+ keyvals : anyOf(List(Any), :class:`ImputeSequence`)
Defines the key values that should be considered for imputation.
An array of key values or an object defining a `number sequence
<https://vega.github.io/vega-lite/docs/impute.html#sequence-def>`__.
@@ -7573,7 +7573,7 @@ class ImputeParams(VegaLiteSchema):
One of ``"value"``, ``"mean"``, ``"median"``, ``"max"`` or ``"min"``.
**Default value:** ``"value"``
- value : Mapping(required=[])
+ value : Any
The field value to use when the imputation ``method`` is ``"value"``.
"""
_schema = {'$ref': '#/definitions/ImputeParams'}
@@ -17546,7 +17546,7 @@ class ImputeTransform(Transform):
groupby : List(:class:`FieldName`)
An optional array of fields by which to group the values.
Imputation will then be performed on a per-group basis.
- keyvals : anyOf(List(Mapping(required=[])), :class:`ImputeSequence`)
+ keyvals : anyOf(List(Any), :class:`ImputeSequence`)
Defines the key values that should be considered for imputation.
An array of key values or an object defining a `number sequence
<https://vega.github.io/vega-lite/docs/impute.html#sequence-def>`__.
@@ -17562,7 +17562,7 @@ class ImputeTransform(Transform):
One of ``"value"``, ``"mean"``, ``"median"``, ``"max"`` or ``"min"``.
**Default value:** ``"value"``
- value : Mapping(required=[])
+ value : Any
The field value to use when the imputation ``method`` is ``"value"``.
"""
_schema = {'$ref': '#/definitions/ImputeTransform'}
diff --git a/tools/schemapi/utils.py b/tools/schemapi/utils.py
index c367db92..d976ff4f 100644
--- a/tools/schemapi/utils.py
+++ b/tools/schemapi/utils.py
@@ -188,7 +188,7 @@ class SchemaInfo(object):
return '[{0}]'.format(', '.join(self.child(s).short_description
for s in self.schema))
elif self.is_empty():
- return 'any object'
+ return 'Any'
elif self.is_enum():
return 'enum({})'.format(', '.join(map(repr, self.enum)))
elif self.is_anyOf():
@@ -266,7 +266,7 @@ class SchemaInfo(object):
@property
def not_(self):
- return self.child(self.schema.get('not_', {}))
+ return self.child(self.schema.get('not', {}))
@property
def items(self):
@@ -299,7 +299,7 @@ class SchemaInfo(object):
return 'enum' in self.schema
def is_empty(self):
- return set(self.schema.keys()) - set(EXCLUDE_KEYS) == {}
+ return not (set(self.schema.keys()) - set(EXCLUDE_KEYS))
def is_compound(self):
return any(key in self.schema for key in ['anyOf', 'allOf', 'oneOf'])
| altair-viz/altair | c05a7caa26c2f592ed69a6d4d95276fdb1e80331 | diff --git a/tools/schemapi/tests/test_utils.py b/tools/schemapi/tests/test_utils.py
index f168b57b..769042f7 100644
--- a/tools/schemapi/tests/test_utils.py
+++ b/tools/schemapi/tests/test_utils.py
@@ -1,6 +1,6 @@
import pytest
-from ..utils import get_valid_identifier
+from ..utils import get_valid_identifier, SchemaInfo
from ..schemapi import _FromDict
@@ -31,3 +31,16 @@ def test_hash_schema(refschema, use_json):
copy['description'] = "A schema"
copy['title'] = "Schema to test"
assert _FromDict.hash_schema(refschema) == _FromDict.hash_schema(copy)
+
[email protected]('schema, expected', [
+ ({}, 'Any'),
+ ({'type': 'number'}, 'float'),
+ ({'enum': ['A', 'B', 'C']}, "enum('A', 'B', 'C')"),
+ ({'type': 'array'}, 'List(Any)'),
+ ({'type': 'object'}, 'Mapping(required=[])'),
+ ({"type": "string", "not": {'enum': ['A', 'B', 'C']}}, "not enum('A', 'B', 'C')"),
+])
+def test_medium_description(schema, expected):
+ description = SchemaInfo(schema).medium_description
+ assert description == expected
+
| Code generator uses `mapping(required=[])` in confusing ways
In particular:
```
>>> alt.Chart.transform_impute?
[...]
keyvals : anyOf(List(Mapping(required=[])), :class:`ImputeSequence`)
[...]
```
This should be something like
```
keyvals : anyOf(List(Any), :class:`ImputeSequence`)
```
Somehow the schema generator is treating an empty ``items`` schema as an object. | 0.0 | c05a7caa26c2f592ed69a6d4d95276fdb1e80331 | [
"tools/schemapi/tests/test_utils.py::test_medium_description[schema0-Any]",
"tools/schemapi/tests/test_utils.py::test_medium_description[schema3-List(Any)]",
"tools/schemapi/tests/test_utils.py::test_medium_description[schema5-not"
]
| [
"tools/schemapi/tests/test_utils.py::test_get_valid_identifier",
"tools/schemapi/tests/test_utils.py::test_hash_schema[True]",
"tools/schemapi/tests/test_utils.py::test_hash_schema[False]",
"tools/schemapi/tests/test_utils.py::test_medium_description[schema1-float]",
"tools/schemapi/tests/test_utils.py::test_medium_description[schema2-enum('A',",
"tools/schemapi/tests/test_utils.py::test_medium_description[schema4-Mapping(required=[])]"
]
| {
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2019-12-07 17:11:08+00:00 | bsd-3-clause | 1,056 |
|
altair-viz__altair-1986 | diff --git a/altair/utils/deprecation.py b/altair/utils/deprecation.py
index 766ab212..099795e2 100644
--- a/altair/utils/deprecation.py
+++ b/altair/utils/deprecation.py
@@ -1,12 +1,25 @@
import warnings
-# import functools
+import functools
class AltairDeprecationWarning(UserWarning):
pass
-def _deprecated(obj, name=None, message=None):
+def deprecated(message=None):
+ """Decorator to deprecate a function or class.
+
+ Parameters
+ ----------
+ message : string (optional)
+ The deprecation message
+ """
+ def wrapper(obj):
+ return _deprecate(obj, message=message)
+ return wrapper
+
+
+def _deprecate(obj, name=None, message=None):
"""Return a version of a class or function that raises a deprecation warning.
Parameters
@@ -26,7 +39,7 @@ def _deprecated(obj, name=None, message=None):
Examples
--------
>>> class Foo(object): pass
- >>> OldFoo = _deprecated(Foo, "OldFoo")
+ >>> OldFoo = _deprecate(Foo, "OldFoo")
>>> f = OldFoo() # doctest: +SKIP
AltairDeprecationWarning: alt.OldFoo is deprecated. Use alt.Foo instead.
"""
@@ -36,9 +49,9 @@ def _deprecated(obj, name=None, message=None):
if isinstance(obj, type):
return type(name, (obj,),
{'__doc__': obj.__doc__,
- '__init__': _deprecated(obj.__init__, "__init__", message)})
+ '__init__': _deprecate(obj.__init__, "__init__", message)})
elif callable(obj):
- # @functools.wraps(obj) # TODO: use this in Py3 only
+ @functools.wraps(obj)
def new_obj(*args, **kwargs):
warnings.warn(message, AltairDeprecationWarning)
return obj(*args, **kwargs)
diff --git a/altair/utils/display.py b/altair/utils/display.py
index 8d7f4370..63b5d1cc 100644
--- a/altair/utils/display.py
+++ b/altair/utils/display.py
@@ -26,10 +26,10 @@ class RendererRegistry(PluginRegistry[RendererType]):
See https://altair-viz.github.io/getting_started/installation.html
for more information.
"""),
- 'vegascope': textwrap.dedent(
+ 'altair_viewer': textwrap.dedent(
"""
- To use the 'vegascope' renderer, you must install the vegascope
- package; see http://github.com/diana-hep/vegascope/
+ To use the 'altair_viewer' renderer, you must install the altair_viewer
+ package; see http://github.com/altair-viz/altair_viewer/
for more information.
"""),
}
diff --git a/altair/utils/html.py b/altair/utils/html.py
index 39699c13..6848e8c5 100644
--- a/altair/utils/html.py
+++ b/altair/utils/html.py
@@ -85,7 +85,10 @@ HTML_TEMPLATE_UNIVERSAL = jinja2.Template("""
<div id="{{ output_div }}"></div>
<script type="text/javascript">
(function(spec, embedOpt){
- const outputDiv = document.getElementById("{{ output_div }}");
+ let outputDiv = document.currentScript.previousElementSibling;
+ if (outputDiv.id !== "{{ output_div }}") {
+ outputDiv = document.getElementById("{{ output_div }}");
+ }
const paths = {
"vega": "{{ base_url }}/vega@{{ vega_version }}?noext",
"vega-lib": "{{ base_url }}/vega-lib?noext",
diff --git a/altair/vegalite/v3/_deprecated.py b/altair/vegalite/v3/_deprecated.py
index 5241d1ed..f25f7fbd 100644
--- a/altair/vegalite/v3/_deprecated.py
+++ b/altair/vegalite/v3/_deprecated.py
@@ -1,19 +1,19 @@
-from ...utils.deprecation import _deprecated
+from ...utils.deprecation import _deprecate
from . import channels
# Deprecated classes (see https://github.com/altair-viz/altair/issues/1474).
# TODO: Remove these in Altair 3.2.
-Fillopacity = _deprecated(channels.FillOpacity, 'Fillopacity')
-FillopacityValue = _deprecated(channels.FillOpacityValue, 'FillopacityValue')
-Strokeopacity = _deprecated(channels.StrokeOpacity, 'Strokeopacity')
-StrokeopacityValue = _deprecated(channels.StrokeOpacityValue, 'StrokeopacityValue')
-Strokewidth = _deprecated(channels.StrokeWidth, 'Strokewidth')
-StrokewidthValue = _deprecated(channels.StrokeWidthValue, 'StrokewidthValue')
-Xerror = _deprecated(channels.XError, 'Xerror')
-XerrorValue = _deprecated(channels.XErrorValue, 'XerrorValue')
-Xerror2 = _deprecated(channels.XError2, 'Xerror2')
-Xerror2Value = _deprecated(channels.XError2Value, 'Xerror2Value')
-Yerror = _deprecated(channels.YError, 'Yerror')
-YerrorValue = _deprecated(channels.YErrorValue, 'YerrorValue')
-Yerror2 = _deprecated(channels.YError2, 'Yerror2')
-Yerror2Value = _deprecated(channels.YError2Value, 'Yerror2Value')
+Fillopacity = _deprecate(channels.FillOpacity, 'Fillopacity')
+FillopacityValue = _deprecate(channels.FillOpacityValue, 'FillopacityValue')
+Strokeopacity = _deprecate(channels.StrokeOpacity, 'Strokeopacity')
+StrokeopacityValue = _deprecate(channels.StrokeOpacityValue, 'StrokeopacityValue')
+Strokewidth = _deprecate(channels.StrokeWidth, 'Strokewidth')
+StrokewidthValue = _deprecate(channels.StrokeWidthValue, 'StrokewidthValue')
+Xerror = _deprecate(channels.XError, 'Xerror')
+XerrorValue = _deprecate(channels.XErrorValue, 'XerrorValue')
+Xerror2 = _deprecate(channels.XError2, 'Xerror2')
+Xerror2Value = _deprecate(channels.XError2Value, 'Xerror2Value')
+Yerror = _deprecate(channels.YError, 'Yerror')
+YerrorValue = _deprecate(channels.YErrorValue, 'YerrorValue')
+Yerror2 = _deprecate(channels.YError2, 'Yerror2')
+Yerror2Value = _deprecate(channels.YError2Value, 'Yerror2Value')
diff --git a/altair/vegalite/v4/api.py b/altair/vegalite/v4/api.py
index f99f605f..344ae16b 100644
--- a/altair/vegalite/v4/api.py
+++ b/altair/vegalite/v4/api.py
@@ -1498,6 +1498,7 @@ class TopLevelMixin(mixins.ConfigMethodMixin):
else:
display(self)
+ @utils.deprecation.deprecated(message="serve() is deprecated. Use show() instead.")
def serve(self, ip='127.0.0.1', port=8888, n_retries=50, files=None,
jupyter_warning=True, open_browser=True, http_server=None,
**kwargs):
@@ -1538,6 +1539,27 @@ class TopLevelMixin(mixins.ConfigMethodMixin):
files=files, jupyter_warning=jupyter_warning,
open_browser=open_browser, http_server=http_server)
+ def show(self, embed_opt=None, open_browser=None):
+ """Show the chart in an external browser window.
+
+ This requires a recent version of the altair_viewer package.
+
+ Parameters
+ ----------
+ embed_opt : dict (optional)
+ The Vega embed options that control the dispay of the chart.
+ open_browser : bool (optional)
+ Specify whether a browser window should be opened. If not specified,
+ a browser window will be opened only if the server is not already
+ connected to a browser.
+ """
+ try:
+ import altair_viewer
+ except ImportError:
+ raise ValueError("show() method requires the altair_viewer package. "
+ "See http://github.com/altair-viz/altair_viewer")
+ altair_viewer.show(self, embed_opt=embed_opt, open_browser=open_browser)
+
@utils.use_signature(core.Resolve)
def _set_resolve(self, **kwargs):
"""Copy the chart and update the resolve property with kwargs"""
diff --git a/doc/user_guide/display_frontends.rst b/doc/user_guide/display_frontends.rst
index 354aa5a8..364756de 100644
--- a/doc/user_guide/display_frontends.rst
+++ b/doc/user_guide/display_frontends.rst
@@ -141,46 +141,26 @@ Examples are:
- The Hydrogen_ project, which is built on nteract_ and renders Altair charts
via the ``mimebundle`` renderer.
-The Vegascope Renderer
-~~~~~~~~~~~~~~~~~~~~~~
-For other IDEs, a useful companion is the `VegaScope`_ project, which provides
-an Altair renderer that works directly from a Python terminal.
+Altair Viewer
+~~~~~~~~~~~~~
+For non-notebook IDEs, a useful companion is the `Altair Viewer`_ package,
+which provides an Altair renderer that works directly from any Python terminal.
Start by installing the package::
- $ pip install vegascope
+ $ pip install altair_viewer
-Now in your Python script you can enable the vegascope renderer::
+When enabled, this will serve charts via a local HTTP server and automatically open
+a browser window in which to view them, with subsequent charts displayed in the
+same window.
- import altair as alt
- alt.renderers.enable('vegascope')
-
- # load a simple dataset as a pandas DataFrame
- from vega_datasets import data
- cars = data.cars()
+If you are using an IPython-compatible terminal ``altair_viewer`` can be enabled via
+Altair's standard renderer framework::
- chart = alt.Chart(cars).mark_point().encode(
- x='Horsepower',
- y='Miles_per_Gallon',
- color='Origin',
- ).interactive()
-
-In an IPython environment, this will automatically trigger vegascope to serve
-the chart in a background process to your web browser, and unlike Altair's
-:meth:`Chart.serve` method, any subsequently created charts will use
-the same server.
-
-If you are in a non-IPython terminal, you can trigger the renderer manually
-using the :meth:`Chart.display` method::
-
- chart.display()
-
-Built-in ``serve()`` method
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Altair includes a :meth:`Chart.serve` method which will seamlessly convert a
-chart to HTML, start a web server serving that HTML, and open your system's
-default web browser to view it.
+ import altair as alt
+ alt.renderers.enable('altair_viewer')
-For example, you can serve a chart to a web browser like this::
+If you prefer to manually trigger chart display, you can use the built-in :meth:`Chart.show`
+method to manually trigger chart display::
import altair as alt
@@ -194,10 +174,10 @@ For example, you can serve a chart to a web browser like this::
color='Origin',
).interactive()
- chart.serve()
+ chart.show()
-The command will block the Python interpreter, and will have to be canceled with
-``Ctrl-C`` to execute any further code.
+This command will block the Python interpreter until the browser window containing
+the chart is closed.
Manual ``save()`` and display
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -284,11 +264,11 @@ For an example, see `ipyvega`_.
.. _nteract: https://nteract.io
.. _nbconvert: https://nbconvert.readthedocs.io/
.. _nbviewer: https://nbviewer.jupyter.org/
+.. _Altair Viewer: https://github.com/altair-viz/altair_viewer/
.. _Colab: https://colab.research.google.com
.. _Hydrogen: https://github.com/nteract/hydrogen
.. _Jupyter Notebook: https://jupyter-notebook.readthedocs.io/en/stable/
.. _Vega-Lite: http://vega.github.io/vega-lite
.. _Vega: https://vega.github.io/vega/
-.. _VegaScope: https://github.com/scikit-hep/vegascope
.. _VSCode-Python: https://code.visualstudio.com/docs/python/python-tutorial
.. _Zeppelin: https://zeppelin.apache.org/
\ No newline at end of file
diff --git a/doc/user_guide/faq.rst b/doc/user_guide/faq.rst
index d8a54f20..3ce5224c 100644
--- a/doc/user_guide/faq.rst
+++ b/doc/user_guide/faq.rst
@@ -15,12 +15,12 @@ altair to an environment capable of executing the javascript code that
turns the JSON specification into a visual chart.
There are extensions included in JupyterLab, Jupyter Notebook, Colab,
-Kaggle kernels, Hydrogen, and nteract that know how to automatically perform
-this rendering (see :ref:`installation` for details).
+Kaggle kernels, VSCode, Hydrogen, and nteract that know how to automatically
+perform this rendering (see :ref:`installation` for details).
For other frontends that don't have vega-lite rendering built-in, it is
-possible to work with Altair charts using either the ``vegascope`` project,
-or the build-in :meth:`Chart.serve` or :meth:`Chart.save` methods.
+possible to work with Altair charts using the build-in :meth:`Chart.show`
+or :meth:`Chart.save` methods.
For more information on these, see :ref:`display-general`.
.. _faq-no-display:
| altair-viz/altair | 880837d9a9cd8a156a92bb3a738685dc308f64d4 | diff --git a/altair/utils/tests/test_deprecation.py b/altair/utils/tests/test_deprecation.py
index 035d6087..4fac9112 100644
--- a/altair/utils/tests/test_deprecation.py
+++ b/altair/utils/tests/test_deprecation.py
@@ -2,12 +2,22 @@ import pytest
import altair as alt
from altair.utils import AltairDeprecationWarning
-from altair.utils.deprecation import _deprecated
+from altair.utils.deprecation import _deprecate, deprecated
def test_deprecated_class():
- OldChart = _deprecated(alt.Chart, "OldChart")
+ OldChart = _deprecate(alt.Chart, "OldChart")
with pytest.warns(AltairDeprecationWarning) as record:
OldChart()
assert "alt.OldChart" in record[0].message.args[0]
- assert "alt.Chart" in record[0].message.args[0]
\ No newline at end of file
+ assert "alt.Chart" in record[0].message.args[0]
+
+
+def test_deprecation_decorator():
+ @deprecated(message="func is deprecated")
+ def func(x):
+ return x + 1
+ with pytest.warns(AltairDeprecationWarning) as record:
+ y = func(1)
+ assert y == 2
+ assert record[0].message.args[0] == "func is deprecated"
| Multiple views in JupyterLab blank with default html renderer
To reproduce:
1. Create an environment with JupyterLab 1.2.6 and altair 4.0.1:
```
conda create -c conda-forge -yn testenv jupyterlab altair pandas
conda activate testenv
```
2. Start JupyterLab:
```
jupyter lab
```
3. Create a new notebook with an example plot:
```
import altair as alt
import numpy as np
import pandas as pd
# Compute x^2 + y^2 across a 2D grid
x, y = np.meshgrid(range(-5, 5), range(-5, 5))
z = x ** 2 + y ** 2
# Convert this grid to columnar data expected by Altair
source = pd.DataFrame({'x': x.ravel(),
'y': y.ravel(),
'z': z.ravel()})
alt.Chart(source).mark_rect().encode(
x='x:O',
y='y:O',
color='z:Q'
)
```
4. Right-click on the plot and choose "Create New view for output" to open a new jlab tab for the plot. The new tab is blank:
<img width="622" alt="Screen Shot 2020-02-24 at 11 22 23 AM" src="https://user-images.githubusercontent.com/192614/75183729-f70da980-56f7-11ea-8e25-bf11bfda8b9f.png">
I also see the same issue if opening a new view of the notebook (right-clicking on the file's tab, selecting "New View for Notebook"
I think at least part of the problem is that the plot div has a hard-coded id, and ids should be unique on a page, so viewing the same output multiple times can be a problem. There may be other issues with the js not knowing to redraw the plot in the new jlab tab, etc.
| 0.0 | 880837d9a9cd8a156a92bb3a738685dc308f64d4 | [
"altair/utils/tests/test_deprecation.py::test_deprecated_class",
"altair/utils/tests/test_deprecation.py::test_deprecation_decorator"
]
| []
| {
"failed_lite_validators": [
"has_hyperlinks",
"has_media",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2020-02-25 04:16:18+00:00 | bsd-3-clause | 1,057 |
|
altair-viz__altair-2522 | diff --git a/altair/utils/core.py b/altair/utils/core.py
index 53785b17..c47b9a04 100644
--- a/altair/utils/core.py
+++ b/altair/utils/core.py
@@ -193,8 +193,6 @@ def infer_vegalite_type(data):
# Otherwise, infer based on the dtype of the input
typ = infer_dtype(data)
- # TODO: Once this returns 'O', please update test_select_x and test_select_y in test_api.py
-
if typ in [
"floating",
"mixed-integer-float",
@@ -203,6 +201,8 @@ def infer_vegalite_type(data):
"complex",
]:
return "quantitative"
+ elif typ == "categorical" and data.cat.ordered:
+ return ("ordinal", data.cat.categories.tolist())
elif typ in ["string", "bytes", "categorical", "boolean", "mixed", "unicode"]:
return "nominal"
elif typ in [
@@ -316,8 +316,9 @@ def sanitize_dataframe(df): # noqa: C901
for col_name, dtype in df.dtypes.items():
if str(dtype) == "category":
- # XXXX: work around bug in to_json for categorical types
+ # Work around bug in to_json for categorical types in older versions of pandas
# https://github.com/pydata/pandas/issues/10778
+ # https://github.com/altair-viz/altair/pull/2170
col = df[col_name].astype(object)
df[col_name] = col.where(col.notnull(), None)
elif str(dtype) == "string":
@@ -527,6 +528,10 @@ def parse_shorthand(
if isinstance(data, pd.DataFrame) and "type" not in attrs:
if "field" in attrs and attrs["field"] in data.columns:
attrs["type"] = infer_vegalite_type(data[attrs["field"]])
+ # ordered categorical dataframe columns return the type and sort order as a tuple
+ if isinstance(attrs["type"], tuple):
+ attrs["sort"] = attrs["type"][1]
+ attrs["type"] = attrs["type"][0]
return attrs
diff --git a/altair/utils/schemapi.py b/altair/utils/schemapi.py
index b9f8bae9..fab24a4a 100644
--- a/altair/utils/schemapi.py
+++ b/altair/utils/schemapi.py
@@ -364,6 +364,14 @@ class SchemaBase(object):
# parsed_shorthand is removed from context if it exists so that it is
# not passed to child to_dict function calls
parsed_shorthand = context.pop("parsed_shorthand", {})
+ # Prevent that pandas categorical data is automatically sorted
+ # when a non-ordinal data type is specifed manually
+ if "sort" in parsed_shorthand and kwds["type"] not in [
+ "ordinal",
+ Undefined,
+ ]:
+ parsed_shorthand.pop("sort")
+
kwds.update(
{
k: v
diff --git a/doc/user_guide/encodings/channels.rst b/doc/user_guide/encodings/channels.rst
index 057db9c7..073580b2 100644
--- a/doc/user_guide/encodings/channels.rst
+++ b/doc/user_guide/encodings/channels.rst
@@ -110,7 +110,7 @@ We map the ``symbol`` variable to ``detail`` to use them to group lines.
Order
~~~~~
-The `order` option and :class:`Order` channel can sort how marks are drawn on the chart.
+The ``order`` option and :class:`Order` channel can sort how marks are drawn on the chart.
For stacked marks, this controls the order of components of the stack. Here, the elements of each bar are sorted alphabetically by the name of the nominal data in the color channel.
@@ -144,9 +144,6 @@ The order can be reversed by changing the sort option to `descending`.
order=alt.Order("site", sort="descending")
)
-If we want to sort stacked segments in a custom order, we can `follow the approach in this issue comment <https://github.com/altair-viz/altair/issues/245#issuecomment-748443434>`_, although there might be edge cases where this is not fully supported. This also makes the order of the segments align with the order colors shows up in a legend that uses custom sorting for the color domain.
-
-
The same approach works for other mark types, like stacked areas charts.
.. altair-plot::
@@ -163,7 +160,12 @@ The same approach works for other mark types, like stacked areas charts.
order=alt.Order("site", sort="ascending")
)
-For line marks, the `order` channel encodes the order in which data points are connected. This can be useful for creating a scatter plot that draws lines between the dots using a different field than the x and y axes.
+Note that unlike the ``sort`` parameter to positional encoding channels,
+the :class:`Order` channel cannot take a list of values to sort by
+and is not automatically sorted when an ordered pandas categorical column is passed.
+If we want to sort stacked segments in a custom order, we can `follow the approach in this issue comment <https://github.com/altair-viz/altair/issues/245#issuecomment-748443434>`_, although there might be edge cases where this is not fully supported. This workaround also makes the order of the segments align with the order that the colors shows up in a legend that uses custom sorting for the color domain.
+
+For line marks, the :class:`Order` channel encodes the order in which data points are connected. This can be useful for creating a scatter plot that draws lines between the dots using a different field than the x and y axes.
.. altair-plot::
diff --git a/doc/user_guide/encodings/index.rst b/doc/user_guide/encodings/index.rst
index b46acf6b..32073259 100644
--- a/doc/user_guide/encodings/index.rst
+++ b/doc/user_guide/encodings/index.rst
@@ -318,18 +318,22 @@ Sort Option
~~~~~~~~~~~
Some channels accept a :class:`sort` option which determines the
-order of the scale being used for the channel. There are a number of different
-sort options available:
+order of the scale being used for the channel.
+By default the scale is sorted in ascending alphabetical order,
+unless an `ordered pandas categorical column <https://pandas.pydata.org/docs/user_guide/categorical.html?highlight=categorical#sorting-and-order>`_ is passed (without an explicit type specification)
+in which case Altair will use the column's inherent order to sort the scale.
+There are a number of different
+options available to change the sort order:
- ``sort='ascending'`` (Default) will sort the field's value in ascending order.
- for string data, this uses standard alphabetical order.
+ For string data, this uses standard alphabetical order.
- ``sort='descending'`` will sort the field's value in descending order
-- passing the name of an encoding channel to ``sort``, such as ``"x"`` or ``"y"``, allows for
+- Passing the name of an encoding channel to ``sort``, such as ``"x"`` or ``"y"``, allows for
sorting by that channel. An optional minus prefix can be used for a descending
sort. For example ``sort='-x'`` would sort by the x channel in descending order.
-- passing a list to ``sort`` allows you to explicitly set the order in which
+- Passing a list to ``sort`` allows you to explicitly set the order in which
you would like the encoding to appear
-- passing a :class:`EncodingSortField` class to ``sort`` allows you to sort
+- Passing a :class:`EncodingSortField` class to ``sort`` allows you to sort
an axis by the value of some other field in the dataset.
Here is an example of applying these five different sort approaches on the
diff --git a/tools/schemapi/schemapi.py b/tools/schemapi/schemapi.py
index aeaf156c..d04bdf2d 100644
--- a/tools/schemapi/schemapi.py
+++ b/tools/schemapi/schemapi.py
@@ -362,6 +362,14 @@ class SchemaBase(object):
# parsed_shorthand is removed from context if it exists so that it is
# not passed to child to_dict function calls
parsed_shorthand = context.pop("parsed_shorthand", {})
+ # Prevent that pandas categorical data is automatically sorted
+ # when a non-ordinal data type is specifed manually
+ if "sort" in parsed_shorthand and kwds["type"] not in [
+ "ordinal",
+ Undefined,
+ ]:
+ parsed_shorthand.pop("sort")
+
kwds.update(
{
k: v
| altair-viz/altair | 1f6d1c953cac4a50e9ff2ba0a25ba3f398887784 | diff --git a/tests/vegalite/v5/tests/test_api.py b/tests/vegalite/v5/tests/test_api.py
index b2e5aa15..12e8b666 100644
--- a/tests/vegalite/v5/tests/test_api.py
+++ b/tests/vegalite/v5/tests/test_api.py
@@ -123,6 +123,7 @@ def test_chart_infer_types():
"x": pd.date_range("2012", periods=10, freq="Y"),
"y": range(10),
"c": list("abcabcabca"),
+ "s": pd.Categorical([1, 2] * 5, categories=[2, 1], ordered=True),
}
)
@@ -134,32 +135,45 @@ def test_chart_infer_types():
assert dct["encoding"]["y"]["field"] == "y"
assert dct["encoding"]["color"]["type"] == "nominal"
assert dct["encoding"]["color"]["field"] == "c"
+ assert dct["encoding"]["size"]["type"] == "ordinal"
+ assert dct["encoding"]["size"]["field"] == "s"
+ assert dct["encoding"]["size"]["sort"] == [2, 1]
# Pass field names by keyword
- chart = alt.Chart(data).mark_point().encode(x="x", y="y", color="c")
+ chart = alt.Chart(data).mark_point().encode(x="x", y="y", color="c", size="s")
_check_encodings(chart)
# pass Channel objects by keyword
chart = (
alt.Chart(data)
.mark_point()
- .encode(x=alt.X("x"), y=alt.Y("y"), color=alt.Color("c"))
+ .encode(x=alt.X("x"), y=alt.Y("y"), color=alt.Color("c"), size=alt.Size("s"))
)
_check_encodings(chart)
# pass Channel objects by value
- chart = alt.Chart(data).mark_point().encode(alt.X("x"), alt.Y("y"), alt.Color("c"))
+ chart = (
+ alt.Chart(data)
+ .mark_point()
+ .encode(alt.X("x"), alt.Y("y"), alt.Color("c"), alt.Size("s"))
+ )
_check_encodings(chart)
# override default types
chart = (
alt.Chart(data)
.mark_point()
- .encode(alt.X("x", type="nominal"), alt.Y("y", type="ordinal"))
+ .encode(
+ alt.X("x", type="nominal"),
+ alt.Y("y", type="ordinal"),
+ alt.Size("s", type="nominal"),
+ )
)
dct = chart.to_dict()
assert dct["encoding"]["x"]["type"] == "nominal"
assert dct["encoding"]["y"]["type"] == "ordinal"
+ assert dct["encoding"]["size"]["type"] == "nominal"
+ assert "sort" not in dct["encoding"]["size"]
@pytest.mark.parametrize(
| Support of ordinal based on pandas' ordered Categorical type?
I've just started to play with altair, using the [diamonds](http://vincentarelbundock.github.io/Rdatasets/datasets.html) dataset. Here is the notebook to clarify what I did https://gist.github.com/pierre-haessig/09fa9268aa0a0e7d91356f681f96ca18
Since, I'm not familiar with altair, I maybe missed something, but I've got the feeling that ordered Categorical types from pandas are not supported.
Indeed, if I use a color='cut' encoding, when cut is a pandas Series with an _ordered_ category dtype, I get by default a nominal type of coloring (with "unordered" colors).
On the other hand, if I force the use of ordered with color='cut:O', I indeed get the ordered colored (the shades of green), but the _order is wrong_! (I get Fair, Good, Ideal, Premium, Very Good, while the correct order is 'Fair', 'Good', 'Very Good', 'Premium', 'Ideal', as manually defined in pandas' category)
| 0.0 | 1f6d1c953cac4a50e9ff2ba0a25ba3f398887784 | [
"tests/vegalite/v5/tests/test_api.py::test_chart_infer_types"
]
| [
"tests/vegalite/v5/tests/test_api.py::test_chart_data_types",
"tests/vegalite/v5/tests/test_api.py::test_multiple_encodings[args0-kwargs0]",
"tests/vegalite/v5/tests/test_api.py::test_multiple_encodings[args1-kwargs1]",
"tests/vegalite/v5/tests/test_api.py::test_multiple_encodings[args2-kwargs2]",
"tests/vegalite/v5/tests/test_api.py::test_multiple_encodings[args3-kwargs3]",
"tests/vegalite/v5/tests/test_api.py::test_multiple_encodings[args4-kwargs4]",
"tests/vegalite/v5/tests/test_api.py::test_multiple_encodings[args5-kwargs5]",
"tests/vegalite/v5/tests/test_api.py::test_chart_operations",
"tests/vegalite/v5/tests/test_api.py::test_selection_to_dict",
"tests/vegalite/v5/tests/test_api.py::test_selection_expression",
"tests/vegalite/v5/tests/test_api.py::test_save[altair_saver-html]",
"tests/vegalite/v5/tests/test_api.py::test_save[altair_saver-json]",
"tests/vegalite/v5/tests/test_api.py::test_save[altair_saver-png]",
"tests/vegalite/v5/tests/test_api.py::test_save[altair_saver-svg]",
"tests/vegalite/v5/tests/test_api.py::test_save[altair_saver-pdf]",
"tests/vegalite/v5/tests/test_api.py::test_save[altair_saver-bogus]",
"tests/vegalite/v5/tests/test_api.py::test_save[vl-convert-html]",
"tests/vegalite/v5/tests/test_api.py::test_save[vl-convert-json]",
"tests/vegalite/v5/tests/test_api.py::test_save[vl-convert-pdf]",
"tests/vegalite/v5/tests/test_api.py::test_save[vl-convert-bogus]",
"tests/vegalite/v5/tests/test_api.py::test_save_html[False]",
"tests/vegalite/v5/tests/test_api.py::test_facet_basic",
"tests/vegalite/v5/tests/test_api.py::test_facet_parse",
"tests/vegalite/v5/tests/test_api.py::test_facet_parse_data",
"tests/vegalite/v5/tests/test_api.py::test_selection",
"tests/vegalite/v5/tests/test_api.py::test_transforms",
"tests/vegalite/v5/tests/test_api.py::test_filter_transform_selection_predicates",
"tests/vegalite/v5/tests/test_api.py::test_resolve_methods",
"tests/vegalite/v5/tests/test_api.py::test_layer_encodings",
"tests/vegalite/v5/tests/test_api.py::test_add_selection",
"tests/vegalite/v5/tests/test_api.py::test_repeat_add_selections",
"tests/vegalite/v5/tests/test_api.py::test_facet_add_selections",
"tests/vegalite/v5/tests/test_api.py::test_layer_add_selection",
"tests/vegalite/v5/tests/test_api.py::test_compound_add_selections[concat]",
"tests/vegalite/v5/tests/test_api.py::test_compound_add_selections[hconcat]",
"tests/vegalite/v5/tests/test_api.py::test_compound_add_selections[vconcat]",
"tests/vegalite/v5/tests/test_api.py::test_selection_property",
"tests/vegalite/v5/tests/test_api.py::test_LookupData",
"tests/vegalite/v5/tests/test_api.py::test_themes",
"tests/vegalite/v5/tests/test_api.py::test_chart_from_dict",
"tests/vegalite/v5/tests/test_api.py::test_consolidate_datasets",
"tests/vegalite/v5/tests/test_api.py::test_consolidate_InlineData",
"tests/vegalite/v5/tests/test_api.py::test_repeat",
"tests/vegalite/v5/tests/test_api.py::test_data_property",
"tests/vegalite/v5/tests/test_api.py::test_subcharts_with_same_data[data.json-layer]",
"tests/vegalite/v5/tests/test_api.py::test_subcharts_with_same_data[data.json-hconcat]",
"tests/vegalite/v5/tests/test_api.py::test_subcharts_with_same_data[data.json-vconcat]",
"tests/vegalite/v5/tests/test_api.py::test_subcharts_with_same_data[data.json-concat]",
"tests/vegalite/v5/tests/test_api.py::test_subcharts_with_same_data[data1-layer]",
"tests/vegalite/v5/tests/test_api.py::test_subcharts_with_same_data[data1-hconcat]",
"tests/vegalite/v5/tests/test_api.py::test_subcharts_with_same_data[data1-vconcat]",
"tests/vegalite/v5/tests/test_api.py::test_subcharts_with_same_data[data1-concat]",
"tests/vegalite/v5/tests/test_api.py::test_subcharts_different_data[data.json-layer]",
"tests/vegalite/v5/tests/test_api.py::test_subcharts_different_data[data.json-hconcat]",
"tests/vegalite/v5/tests/test_api.py::test_subcharts_different_data[data.json-vconcat]",
"tests/vegalite/v5/tests/test_api.py::test_subcharts_different_data[data.json-concat]",
"tests/vegalite/v5/tests/test_api.py::test_subcharts_different_data[data1-layer]",
"tests/vegalite/v5/tests/test_api.py::test_subcharts_different_data[data1-hconcat]",
"tests/vegalite/v5/tests/test_api.py::test_subcharts_different_data[data1-vconcat]",
"tests/vegalite/v5/tests/test_api.py::test_subcharts_different_data[data1-concat]",
"tests/vegalite/v5/tests/test_api.py::test_layer_facet",
"tests/vegalite/v5/tests/test_api.py::test_layer_errors",
"tests/vegalite/v5/tests/test_api.py::test_resolve[layer]",
"tests/vegalite/v5/tests/test_api.py::test_resolve[hconcat]",
"tests/vegalite/v5/tests/test_api.py::test_resolve[vconcat]",
"tests/vegalite/v5/tests/test_api.py::test_resolve[concat]",
"tests/vegalite/v5/tests/test_api.py::test_resolve[facet]",
"tests/vegalite/v5/tests/test_api.py::test_resolve[facet_encoding]",
"tests/vegalite/v5/tests/test_api.py::test_resolve[repeat]",
"tests/vegalite/v5/tests/test_api.py::test_facet[None-chart]",
"tests/vegalite/v5/tests/test_api.py::test_facet[None-layer]",
"tests/vegalite/v5/tests/test_api.py::test_facet[facet-chart]",
"tests/vegalite/v5/tests/test_api.py::test_facet[facet-layer]",
"tests/vegalite/v5/tests/test_api.py::test_facet[row-chart]",
"tests/vegalite/v5/tests/test_api.py::test_facet[row-layer]",
"tests/vegalite/v5/tests/test_api.py::test_facet[column-chart]",
"tests/vegalite/v5/tests/test_api.py::test_facet[column-layer]",
"tests/vegalite/v5/tests/test_api.py::test_sequence",
"tests/vegalite/v5/tests/test_api.py::test_graticule",
"tests/vegalite/v5/tests/test_api.py::test_sphere",
"tests/vegalite/v5/tests/test_api.py::test_validate_dataset"
]
| {
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2021-11-18 03:19:55+00:00 | bsd-3-clause | 1,058 |
|
altair-viz__altair-2885 | diff --git a/altair/utils/schemapi.py b/altair/utils/schemapi.py
index de631b62..0b4dd372 100644
--- a/altair/utils/schemapi.py
+++ b/altair/utils/schemapi.py
@@ -404,10 +404,10 @@ class SchemaBase(object):
parsed_shorthand = context.pop("parsed_shorthand", {})
# Prevent that pandas categorical data is automatically sorted
# when a non-ordinal data type is specifed manually
- if "sort" in parsed_shorthand and kwds["type"] not in [
- "ordinal",
- Undefined,
- ]:
+ # or if the encoding channel does not support sorting
+ if "sort" in parsed_shorthand and (
+ "sort" not in kwds or kwds["type"] not in ["ordinal", Undefined]
+ ):
parsed_shorthand.pop("sort")
kwds.update(
diff --git a/tools/schemapi/schemapi.py b/tools/schemapi/schemapi.py
index 9fa2dccb..7f14f7bb 100644
--- a/tools/schemapi/schemapi.py
+++ b/tools/schemapi/schemapi.py
@@ -402,10 +402,10 @@ class SchemaBase(object):
parsed_shorthand = context.pop("parsed_shorthand", {})
# Prevent that pandas categorical data is automatically sorted
# when a non-ordinal data type is specifed manually
- if "sort" in parsed_shorthand and kwds["type"] not in [
- "ordinal",
- Undefined,
- ]:
+ # or if the encoding channel does not support sorting
+ if "sort" in parsed_shorthand and (
+ "sort" not in kwds or kwds["type"] not in ["ordinal", Undefined]
+ ):
parsed_shorthand.pop("sort")
kwds.update(
| altair-viz/altair | c6cbdfa72c93a94631177b6bc8b0d3f0d8871704 | diff --git a/tests/vegalite/v5/tests/test_api.py b/tests/vegalite/v5/tests/test_api.py
index b4b82a17..404f0a45 100644
--- a/tests/vegalite/v5/tests/test_api.py
+++ b/tests/vegalite/v5/tests/test_api.py
@@ -138,16 +138,30 @@ def test_chart_infer_types():
assert dct["encoding"]["size"]["type"] == "ordinal"
assert dct["encoding"]["size"]["field"] == "s"
assert dct["encoding"]["size"]["sort"] == [2, 1]
+ assert dct["encoding"]["tooltip"]["type"] == "ordinal"
+ assert dct["encoding"]["tooltip"]["field"] == "s"
+ # "sort" should be removed for channels that don't support it
+ assert "sort" not in dct["encoding"]["tooltip"]
# Pass field names by keyword
- chart = alt.Chart(data).mark_point().encode(x="x", y="y", color="c", size="s")
+ chart = (
+ alt.Chart(data)
+ .mark_point()
+ .encode(x="x", y="y", color="c", size="s", tooltip="s")
+ )
_check_encodings(chart)
# pass Channel objects by keyword
chart = (
alt.Chart(data)
.mark_point()
- .encode(x=alt.X("x"), y=alt.Y("y"), color=alt.Color("c"), size=alt.Size("s"))
+ .encode(
+ x=alt.X("x"),
+ y=alt.Y("y"),
+ color=alt.Color("c"),
+ size=alt.Size("s"),
+ tooltip=alt.Tooltip("s"),
+ )
)
_check_encodings(chart)
@@ -155,7 +169,7 @@ def test_chart_infer_types():
chart = (
alt.Chart(data)
.mark_point()
- .encode(alt.X("x"), alt.Y("y"), alt.Color("c"), alt.Size("s"))
+ .encode(alt.X("x"), alt.Y("y"), alt.Color("c"), alt.Size("s"), alt.Tooltip("s"))
)
_check_encodings(chart)
@@ -167,6 +181,7 @@ def test_chart_infer_types():
alt.X("x", type="nominal"),
alt.Y("y", type="ordinal"),
alt.Size("s", type="nominal"),
+ alt.Tooltip("s", type="nominal"),
)
)
dct = chart.to_dict()
@@ -174,6 +189,8 @@ def test_chart_infer_types():
assert dct["encoding"]["y"]["type"] == "ordinal"
assert dct["encoding"]["size"]["type"] == "nominal"
assert "sort" not in dct["encoding"]["size"]
+ assert dct["encoding"]["tooltip"]["type"] == "nominal"
+ assert "sort" not in dct["encoding"]["tooltip"]
@pytest.mark.parametrize(
| tooltip throws error for Categorical variable
The following code used to work in recent versions of `altair` including the current in-development branch
But by commit f8912bad75d4247ab7 this code throws an error.
The problem appears to be that specifying a variable for a tooltip without a type throws an error if the variable in a `pandas.Categorical`.
Specifically, this code:
````
import altair as alt
import pandas as pd
df = (
pd.DataFrame({"x": [1, 2], "y": [1, 2], "note": ["a", "b"]})
.assign(note=lambda x: pd.Categorical(x["note"], ordered=True))
)
alt.Chart(df).encode(x="x", y="y", tooltip=["x", "y", "note"]).mark_point()
```
Now throws an error.
The error is:
```
---------------------------------------------------------------------------
SchemaValidationError Traceback (most recent call last)
File /fh/fast/bloom_j/software/miniconda3/envs/dms-vep-pipeline/lib/python3.11/site-packages/altair/vegalite/v5/api.py:2194, in Chart.to_dict(self, *args, **kwargs)
2192 copy.data = core.InlineData(values=[{}])
2193 return super(Chart, copy).to_dict(*args, **kwargs)
-> 2194 return super().to_dict(*args, **kwargs)
File /fh/fast/bloom_j/software/miniconda3/envs/dms-vep-pipeline/lib/python3.11/site-packages/altair/vegalite/v5/api.py:559, in TopLevelMixin.to_dict(self, *args, **kwargs)
556 context["top_level"] = False
557 kwargs["context"] = context
--> 559 dct = super(TopLevelMixin, copy).to_dict(*args, **kwargs)
561 # TODO: following entries are added after validation. Should they be validated?
562 if is_top_level:
563 # since this is top-level we add $schema if it's missing
File /fh/fast/bloom_j/software/miniconda3/envs/dms-vep-pipeline/lib/python3.11/site-packages/altair/utils/schemapi.py:422, in SchemaBase.to_dict(self, validate, ignore, context)
420 self.validate(result)
421 except jsonschema.ValidationError as err:
--> 422 raise SchemaValidationError(self, err)
423 return result
SchemaValidationError: Invalid specification
altair.vegalite.v5.api.Chart->0, validating 'type'
[{'field': 'x', 'type': 'quantitative'}, {'field': 'y', 'type': 'quantitative'}, {'field': 'note', 'type': 'ordinal', 'sort': ['a', 'b']}] is not of type 'object'
[{'field': 'x', 'type': 'quantitative'}, {'field': 'y', 'type': 'quantitative'}, {'field': 'note', 'type': 'ordinal', 'sort': ['a', 'b']}] is not of type 'object'
Additional properties are not allowed ('sort' was unexpected)
[{'field': 'x', 'type': 'quantitative'}, {'field': 'y', 'type': 'quantitative'}, {'field': 'note', 'type': 'ordinal', 'sort': ['a', 'b']}] is not of type 'null'
```
| 0.0 | c6cbdfa72c93a94631177b6bc8b0d3f0d8871704 | [
"tests/vegalite/v5/tests/test_api.py::test_chart_infer_types"
]
| [
"tests/vegalite/v5/tests/test_api.py::test_chart_data_types",
"tests/vegalite/v5/tests/test_api.py::test_multiple_encodings[args0-kwargs0]",
"tests/vegalite/v5/tests/test_api.py::test_multiple_encodings[args1-kwargs1]",
"tests/vegalite/v5/tests/test_api.py::test_multiple_encodings[args2-kwargs2]",
"tests/vegalite/v5/tests/test_api.py::test_multiple_encodings[args3-kwargs3]",
"tests/vegalite/v5/tests/test_api.py::test_multiple_encodings[args4-kwargs4]",
"tests/vegalite/v5/tests/test_api.py::test_multiple_encodings[args5-kwargs5]",
"tests/vegalite/v5/tests/test_api.py::test_chart_operations",
"tests/vegalite/v5/tests/test_api.py::test_selection_to_dict",
"tests/vegalite/v5/tests/test_api.py::test_selection_expression",
"tests/vegalite/v5/tests/test_api.py::test_save[altair_saver-html]",
"tests/vegalite/v5/tests/test_api.py::test_save[altair_saver-json]",
"tests/vegalite/v5/tests/test_api.py::test_save[altair_saver-png]",
"tests/vegalite/v5/tests/test_api.py::test_save[altair_saver-svg]",
"tests/vegalite/v5/tests/test_api.py::test_save[altair_saver-pdf]",
"tests/vegalite/v5/tests/test_api.py::test_save[altair_saver-bogus]",
"tests/vegalite/v5/tests/test_api.py::test_save[vl-convert-html]",
"tests/vegalite/v5/tests/test_api.py::test_save[vl-convert-json]",
"tests/vegalite/v5/tests/test_api.py::test_save[vl-convert-pdf]",
"tests/vegalite/v5/tests/test_api.py::test_save[vl-convert-bogus]",
"tests/vegalite/v5/tests/test_api.py::test_save_html[False]",
"tests/vegalite/v5/tests/test_api.py::test_facet_basic",
"tests/vegalite/v5/tests/test_api.py::test_facet_parse",
"tests/vegalite/v5/tests/test_api.py::test_facet_parse_data",
"tests/vegalite/v5/tests/test_api.py::test_selection",
"tests/vegalite/v5/tests/test_api.py::test_transforms",
"tests/vegalite/v5/tests/test_api.py::test_filter_transform_selection_predicates",
"tests/vegalite/v5/tests/test_api.py::test_resolve_methods",
"tests/vegalite/v5/tests/test_api.py::test_layer_encodings",
"tests/vegalite/v5/tests/test_api.py::test_add_selection",
"tests/vegalite/v5/tests/test_api.py::test_repeat_add_selections",
"tests/vegalite/v5/tests/test_api.py::test_facet_add_selections",
"tests/vegalite/v5/tests/test_api.py::test_layer_add_selection",
"tests/vegalite/v5/tests/test_api.py::test_compound_add_selections[concat]",
"tests/vegalite/v5/tests/test_api.py::test_compound_add_selections[hconcat]",
"tests/vegalite/v5/tests/test_api.py::test_compound_add_selections[vconcat]",
"tests/vegalite/v5/tests/test_api.py::test_selection_property",
"tests/vegalite/v5/tests/test_api.py::test_LookupData",
"tests/vegalite/v5/tests/test_api.py::test_themes",
"tests/vegalite/v5/tests/test_api.py::test_chart_from_dict",
"tests/vegalite/v5/tests/test_api.py::test_consolidate_datasets",
"tests/vegalite/v5/tests/test_api.py::test_consolidate_InlineData",
"tests/vegalite/v5/tests/test_api.py::test_repeat",
"tests/vegalite/v5/tests/test_api.py::test_data_property",
"tests/vegalite/v5/tests/test_api.py::test_subcharts_with_same_data[data.json-layer]",
"tests/vegalite/v5/tests/test_api.py::test_subcharts_with_same_data[data.json-hconcat]",
"tests/vegalite/v5/tests/test_api.py::test_subcharts_with_same_data[data.json-vconcat]",
"tests/vegalite/v5/tests/test_api.py::test_subcharts_with_same_data[data.json-concat]",
"tests/vegalite/v5/tests/test_api.py::test_subcharts_with_same_data[data1-layer]",
"tests/vegalite/v5/tests/test_api.py::test_subcharts_with_same_data[data1-hconcat]",
"tests/vegalite/v5/tests/test_api.py::test_subcharts_with_same_data[data1-vconcat]",
"tests/vegalite/v5/tests/test_api.py::test_subcharts_with_same_data[data1-concat]",
"tests/vegalite/v5/tests/test_api.py::test_subcharts_different_data[data.json-layer]",
"tests/vegalite/v5/tests/test_api.py::test_subcharts_different_data[data.json-hconcat]",
"tests/vegalite/v5/tests/test_api.py::test_subcharts_different_data[data.json-vconcat]",
"tests/vegalite/v5/tests/test_api.py::test_subcharts_different_data[data.json-concat]",
"tests/vegalite/v5/tests/test_api.py::test_subcharts_different_data[data1-layer]",
"tests/vegalite/v5/tests/test_api.py::test_subcharts_different_data[data1-hconcat]",
"tests/vegalite/v5/tests/test_api.py::test_subcharts_different_data[data1-vconcat]",
"tests/vegalite/v5/tests/test_api.py::test_subcharts_different_data[data1-concat]",
"tests/vegalite/v5/tests/test_api.py::test_layer_facet",
"tests/vegalite/v5/tests/test_api.py::test_layer_errors",
"tests/vegalite/v5/tests/test_api.py::test_resolve[layer]",
"tests/vegalite/v5/tests/test_api.py::test_resolve[hconcat]",
"tests/vegalite/v5/tests/test_api.py::test_resolve[vconcat]",
"tests/vegalite/v5/tests/test_api.py::test_resolve[concat]",
"tests/vegalite/v5/tests/test_api.py::test_resolve[facet]",
"tests/vegalite/v5/tests/test_api.py::test_resolve[facet_encoding]",
"tests/vegalite/v5/tests/test_api.py::test_resolve[repeat]",
"tests/vegalite/v5/tests/test_api.py::test_facet[None-chart]",
"tests/vegalite/v5/tests/test_api.py::test_facet[None-layer]",
"tests/vegalite/v5/tests/test_api.py::test_facet[facet-chart]",
"tests/vegalite/v5/tests/test_api.py::test_facet[facet-layer]",
"tests/vegalite/v5/tests/test_api.py::test_facet[row-chart]",
"tests/vegalite/v5/tests/test_api.py::test_facet[row-layer]",
"tests/vegalite/v5/tests/test_api.py::test_facet[column-chart]",
"tests/vegalite/v5/tests/test_api.py::test_facet[column-layer]",
"tests/vegalite/v5/tests/test_api.py::test_sequence",
"tests/vegalite/v5/tests/test_api.py::test_graticule",
"tests/vegalite/v5/tests/test_api.py::test_sphere",
"tests/vegalite/v5/tests/test_api.py::test_validate_dataset"
]
| {
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
} | 2023-02-13 06:12:51+00:00 | bsd-3-clause | 1,059 |
|
altair-viz__altair-3128 | diff --git a/altair/utils/core.py b/altair/utils/core.py
index 1d4d6f17..082db4cc 100644
--- a/altair/utils/core.py
+++ b/altair/utils/core.py
@@ -298,6 +298,13 @@ def sanitize_geo_interface(geo: MutableMapping) -> dict:
return geo_dct
+def numpy_is_subtype(dtype: Any, subtype: Any) -> bool:
+ try:
+ return np.issubdtype(dtype, subtype)
+ except (NotImplementedError, TypeError):
+ return False
+
+
def sanitize_dataframe(df: pd.DataFrame) -> pd.DataFrame: # noqa: C901
"""Sanitize a DataFrame to prepare it for serialization.
@@ -339,26 +346,27 @@ def sanitize_dataframe(df: pd.DataFrame) -> pd.DataFrame: # noqa: C901
return val
for col_name, dtype in df.dtypes.items():
- if str(dtype) == "category":
+ dtype_name = str(dtype)
+ if dtype_name == "category":
# Work around bug in to_json for categorical types in older versions of pandas
# https://github.com/pydata/pandas/issues/10778
# https://github.com/altair-viz/altair/pull/2170
col = df[col_name].astype(object)
df[col_name] = col.where(col.notnull(), None)
- elif str(dtype) == "string":
+ elif dtype_name == "string":
# dedicated string datatype (since 1.0)
# https://pandas.pydata.org/pandas-docs/version/1.0.0/whatsnew/v1.0.0.html#dedicated-string-data-type
col = df[col_name].astype(object)
df[col_name] = col.where(col.notnull(), None)
- elif str(dtype) == "bool":
+ elif dtype_name == "bool":
# convert numpy bools to objects; np.bool is not JSON serializable
df[col_name] = df[col_name].astype(object)
- elif str(dtype) == "boolean":
+ elif dtype_name == "boolean":
# dedicated boolean datatype (since 1.0)
# https://pandas.io/docs/user_guide/boolean.html
col = df[col_name].astype(object)
df[col_name] = col.where(col.notnull(), None)
- elif str(dtype).startswith("datetime"):
+ elif dtype_name.startswith("datetime") or dtype_name.startswith("timestamp"):
# Convert datetimes to strings. This needs to be a full ISO string
# with time, which is why we cannot use ``col.astype(str)``.
# This is because Javascript parses date-only times in UTC, but
@@ -368,18 +376,18 @@ def sanitize_dataframe(df: pd.DataFrame) -> pd.DataFrame: # noqa: C901
df[col_name] = (
df[col_name].apply(lambda x: x.isoformat()).replace("NaT", "")
)
- elif str(dtype).startswith("timedelta"):
+ elif dtype_name.startswith("timedelta"):
raise ValueError(
'Field "{col_name}" has type "{dtype}" which is '
"not supported by Altair. Please convert to "
"either a timestamp or a numerical value."
"".format(col_name=col_name, dtype=dtype)
)
- elif str(dtype).startswith("geometry"):
+ elif dtype_name.startswith("geometry"):
# geopandas >=0.6.1 uses the dtype geometry. Continue here
# otherwise it will give an error on np.issubdtype(dtype, np.integer)
continue
- elif str(dtype) in {
+ elif dtype_name in {
"Int8",
"Int16",
"Int32",
@@ -394,10 +402,10 @@ def sanitize_dataframe(df: pd.DataFrame) -> pd.DataFrame: # noqa: C901
# https://pandas.pydata.org/pandas-docs/version/0.25/whatsnew/v0.24.0.html#optional-integer-na-support
col = df[col_name].astype(object)
df[col_name] = col.where(col.notnull(), None)
- elif np.issubdtype(dtype, np.integer):
+ elif numpy_is_subtype(dtype, np.integer):
# convert integers to objects; np.int is not JSON serializable
df[col_name] = df[col_name].astype(object)
- elif np.issubdtype(dtype, np.floating):
+ elif numpy_is_subtype(dtype, np.floating):
# For floats, convert to Python float: np.float is not JSON serializable
# Also convert NaN/inf values to null, as they are not JSON serializable
col = df[col_name]
@@ -635,7 +643,7 @@ def infer_vegalite_type_for_dfi_column(
# error message for the presence of datetime64.
#
# See https://github.com/pandas-dev/pandas/issues/54239
- if "datetime64" in e.args[0]:
+ if "datetime64" in e.args[0] or "timestamp" in e.args[0]:
return "temporal"
raise e
| altair-viz/altair | 72a361c68731212d8aa042f4c73d5070aa110a9a | diff --git a/tests/utils/test_core.py b/tests/utils/test_core.py
index 376c72a3..b5ce8e76 100644
--- a/tests/utils/test_core.py
+++ b/tests/utils/test_core.py
@@ -8,6 +8,12 @@ import altair as alt
from altair.utils.core import parse_shorthand, update_nested, infer_encoding_types
from altair.utils.core import infer_dtype
+try:
+ import pyarrow as pa
+except ImportError:
+ pa = None
+
+
FAKE_CHANNELS_MODULE = '''
"""Fake channels module for utility tests."""
@@ -148,6 +154,20 @@ def test_parse_shorthand_with_data():
check("month(t)", data, timeUnit="month", field="t", type="temporal")
[email protected](pa is None, reason="pyarrow not installed")
+def test_parse_shorthand_for_arrow_timestamp():
+ data = pd.DataFrame(
+ {
+ "z": pd.date_range("2018-01-01", periods=5, freq="D"),
+ "t": pd.date_range("2018-01-01", periods=5, freq="D").tz_localize("UTC"),
+ }
+ )
+ # Convert to arrow-packed dtypes
+ data = pa.Table.from_pandas(data).to_pandas(types_mapper=pd.ArrowDtype)
+ assert parse_shorthand("z", data) == {"field": "z", "type": "temporal"}
+ assert parse_shorthand("z", data) == {"field": "z", "type": "temporal"}
+
+
def test_parse_shorthand_all_aggregates():
aggregates = alt.Root._schema["definitions"]["AggregateOp"]["enum"]
for aggregate in aggregates:
diff --git a/tests/utils/test_utils.py b/tests/utils/test_utils.py
index 690fdc85..65e0ac0f 100644
--- a/tests/utils/test_utils.py
+++ b/tests/utils/test_utils.py
@@ -7,6 +7,11 @@ import pandas as pd
from altair.utils import infer_vegalite_type, sanitize_dataframe
+try:
+ import pyarrow as pa
+except ImportError:
+ pa = None
+
def test_infer_vegalite_type():
def _check(arr, typ):
@@ -83,6 +88,37 @@ def test_sanitize_dataframe():
assert df.equals(df2)
[email protected](pa is None, reason="pyarrow not installed")
+def test_sanitize_dataframe_arrow_columns():
+ # create a dataframe with various types
+ df = pd.DataFrame(
+ {
+ "s": list("abcde"),
+ "f": np.arange(5, dtype=float),
+ "i": np.arange(5, dtype=int),
+ "b": np.array([True, False, True, True, False]),
+ "d": pd.date_range("2012-01-01", periods=5, freq="H"),
+ "c": pd.Series(list("ababc"), dtype="category"),
+ "p": pd.date_range("2012-01-01", periods=5, freq="H").tz_localize("UTC"),
+ }
+ )
+ df_arrow = pa.Table.from_pandas(df).to_pandas(types_mapper=pd.ArrowDtype)
+ df_clean = sanitize_dataframe(df_arrow)
+ records = df_clean.to_dict(orient="records")
+ assert records[0] == {
+ "s": "a",
+ "f": 0.0,
+ "i": 0,
+ "b": True,
+ "d": "2012-01-01T00:00:00",
+ "c": "a",
+ "p": "2012-01-01T00:00:00+00:00",
+ }
+
+ # Make sure we can serialize to JSON without error
+ json.dumps(records)
+
+
def test_sanitize_dataframe_colnames():
df = pd.DataFrame(np.arange(12).reshape(4, 3))
| Pandas 2.0 with pyarrow backend: "TypeError: Cannot interpret 'timestamp[ms][pyarrow]' as a data type"
* Vega-Altair 5.0.1
* Pandas 2.0.3
* PyArrow 12.0.1
Essential outline of what I'm doing:
```
import pandas as pd
arrow_table = [make an Arrow table]
pandas_df = arrow_table.to_pandas(types_mapper=pd.ArrowDtype)
```
<details>
<summary>Stack trace from my actual app</summary>
```
File "/usr/local/lib/python3.11/site-packages/altair/vegalite/v5/api.py", line 948, in save
result = save(**kwds)
^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/altair/utils/save.py", line 131, in save
spec = chart.to_dict()
^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/altair/vegalite/v5/api.py", line 838, in to_dict
copy.data = _prepare_data(original_data, context)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/altair/vegalite/v5/api.py", line 100, in _prepare_data
data = _pipe(data, data_transformers.get())
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/toolz/functoolz.py", line 628, in pipe
data = func(data)
^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/toolz/functoolz.py", line 304, in __call__
return self._partial(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/altair/vegalite/data.py", line 19, in default_data_transformer
return curried.pipe(data, limit_rows(max_rows=max_rows), to_values)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/toolz/functoolz.py", line 628, in pipe
data = func(data)
^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/toolz/functoolz.py", line 304, in __call__
return self._partial(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/altair/utils/data.py", line 160, in to_values
data = sanitize_dataframe(data)
^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/altair/utils/core.py", line 383, in sanitize_dataframe
elif np.issubdtype(dtype, np.integer):
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/numpy/core/numerictypes.py", line 417, in issubdtype
arg1 = dtype(arg1).type
^^^^^^^^^^^
TypeError: Cannot interpret 'timestamp[ms][pyarrow]' as a data type
```
</details>
| 0.0 | 72a361c68731212d8aa042f4c73d5070aa110a9a | [
"tests/utils/test_utils.py::test_sanitize_dataframe_arrow_columns"
]
| [
"tests/utils/test_core.py::test_infer_dtype[value0-integer]",
"tests/utils/test_core.py::test_infer_dtype[value1-floating]",
"tests/utils/test_core.py::test_infer_dtype[value2-mixed-integer-float]",
"tests/utils/test_core.py::test_infer_dtype[value3-string]",
"tests/utils/test_core.py::test_infer_dtype[value4-mixed]",
"tests/utils/test_core.py::test_parse_shorthand",
"tests/utils/test_core.py::test_parse_shorthand_with_data",
"tests/utils/test_core.py::test_parse_shorthand_for_arrow_timestamp",
"tests/utils/test_core.py::test_parse_shorthand_all_aggregates",
"tests/utils/test_core.py::test_parse_shorthand_all_timeunits",
"tests/utils/test_core.py::test_parse_shorthand_window_count",
"tests/utils/test_core.py::test_parse_shorthand_all_window_ops",
"tests/utils/test_core.py::test_update_nested",
"tests/utils/test_core.py::test_infer_encoding_types",
"tests/utils/test_core.py::test_infer_encoding_types_with_condition",
"tests/utils/test_core.py::test_invalid_data_type",
"tests/utils/test_utils.py::test_infer_vegalite_type",
"tests/utils/test_utils.py::test_sanitize_dataframe",
"tests/utils/test_utils.py::test_sanitize_dataframe_colnames",
"tests/utils/test_utils.py::test_sanitize_dataframe_timedelta",
"tests/utils/test_utils.py::test_sanitize_dataframe_infs",
"tests/utils/test_utils.py::test_sanitize_nullable_integers",
"tests/utils/test_utils.py::test_sanitize_string_dtype",
"tests/utils/test_utils.py::test_sanitize_boolean_dtype"
]
| {
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2023-07-28 22:02:13+00:00 | bsd-3-clause | 1,060 |
|
altair-viz__altair-518 | diff --git a/altair/utils/core.py b/altair/utils/core.py
index 9ed12ea4..cb016dd7 100644
--- a/altair/utils/core.py
+++ b/altair/utils/core.py
@@ -3,6 +3,8 @@ Utility routines
"""
import re
import warnings
+import collections
+from copy import deepcopy
import six
import pandas as pd
@@ -296,3 +298,42 @@ def update_subtraits(obj, attrs, **kwargs):
trait = dct[attr] = {}
dct[attr] = update_subtraits(trait, attrs[1:], **kwargs)
return obj
+
+
+def update_nested(original, update, copy=False):
+ """Update nested dictionaries
+
+ Parameters
+ ----------
+ original : dict
+ the original (nested) dictionary, which will be updated in-place
+ update : dict
+ the nested dictionary of updates
+ copy : bool, default False
+ if True, then copy the original dictionary rather than modifying it
+
+ Returns
+ -------
+ original : dict
+ a reference to the (modified) original dict
+
+ Examples
+ --------
+ >>> original = {'x': {'b': 2, 'c': 4}}
+ >>> update = {'x': {'b': 5, 'd': 6}, 'y': 40}
+ >>> update_nested(original, update)
+ >>> original
+ {'x': {'b': 5, 'c': 2, 'd': 6}, 'y': 40}
+ """
+ if copy:
+ original = deepcopy(original)
+ for key, val in update.items():
+ if isinstance(val, collections.Mapping):
+ orig_val = original.get(key, {})
+ if isinstance(orig_val, collections.Mapping):
+ original[key] = update_nested(orig_val, val)
+ else:
+ original[key] = val
+ else:
+ original[key] = val
+ return original
diff --git a/altair/vegalite/v2/api.py b/altair/vegalite/v2/api.py
index 403a4051..61bd00d7 100644
--- a/altair/vegalite/v2/api.py
+++ b/altair/vegalite/v2/api.py
@@ -6,7 +6,8 @@ from .schema import *
from .schema import core, channels, Undefined
from .data import data_transformers, pipe
-from ...utils import infer_vegalite_type, parse_shorthand_plus_data, use_signature
+from ...utils import (infer_vegalite_type, parse_shorthand_plus_data,
+ use_signature, update_nested)
from .display import renderers
@@ -133,6 +134,8 @@ def condition(predicate, if_true, if_false):
# Top-level objects
class TopLevelMixin(object):
+ _default_spec_values = {"config": {"view": {"width": 400, "height": 300}}}
+
def _prepare_data(self):
if isinstance(self.data, (dict, core.Data, core.InlineData,
core.UrlData, core.NamedData)):
@@ -143,27 +146,31 @@ class TopLevelMixin(object):
self.data = core.UrlData(self.data)
def to_dict(self, *args, **kwargs):
- # TODO: it's a bit weird that to_dict modifies the object.
- # Should we create a copy first?
- original_data = getattr(self, 'data', Undefined)
- self._prepare_data()
+ copy = self.copy()
+ original_data = getattr(copy, 'data', Undefined)
+ copy._prepare_data()
# We make use of two context markers:
# - 'data' points to the data that should be referenced for column type
# inference.
- # - 'toplevel' is a boolean flag that is assumed to be true; if it's
+ # - 'top_level' is a boolean flag that is assumed to be true; if it's
# true then a "$schema" arg is added to the dict.
context = kwargs.get('context', {}).copy()
+ is_top_level = context.get('top_level', True)
+
+ context['top_level'] = False
if original_data is not Undefined:
context['data'] = original_data
- if context.get('top_level', True):
- # since this is top-level we add $schema if it's missing
- if '$schema' not in self._kwds:
- self._kwds['$schema'] = SCHEMA_URL
- # subschemas below this one are not top-level
- context['top_level'] = False
kwargs['context'] = context
- return super(TopLevelMixin, self).to_dict(*args, **kwargs)
+
+ dct = super(TopLevelMixin, copy).to_dict(*args, **kwargs)
+
+ if is_top_level:
+ # since this is top-level we add $schema if it's missing
+ if '$schema' not in dct:
+ dct['$schema'] = SCHEMA_URL
+ dct = update_nested(copy._default_spec_values, dct, copy=True)
+ return dct
# Layering and stacking
@@ -185,7 +192,7 @@ class TopLevelMixin(object):
class Chart(TopLevelMixin, core.TopLevelFacetedUnitSpec):
def __init__(self, data=Undefined, encoding=Undefined, mark=Undefined,
- width=400, height=300, **kwargs):
+ width=Undefined, height=Undefined, **kwargs):
super(Chart, self).__init__(data=data, encoding=encoding, mark=mark,
width=width, height=height, **kwargs)
| altair-viz/altair | e37000c8f54bc5e0e98ea8457b9a3c913cd58ccb | diff --git a/altair/utils/tests/test_core.py b/altair/utils/tests/test_core.py
index 904f39be..b2b597f1 100644
--- a/altair/utils/tests/test_core.py
+++ b/altair/utils/tests/test_core.py
@@ -5,7 +5,7 @@ import json
import numpy as np
import pandas as pd
-from .. import parse_shorthand, parse_shorthand_plus_data
+from .. import parse_shorthand, parse_shorthand_plus_data, update_nested
def test_parse_shorthand():
@@ -57,3 +57,16 @@ def test_parse_shorthand_plus_data():
check('z', data, field='z', type='temporal')
check('count(x)', data, field='x', aggregate='count', type='quantitative')
check('mean(*)', data, field='*', aggregate='mean')
+
+
+def test_update_nested():
+ original = {'x': {'b': {'foo': 2}, 'c': 4}}
+ update = {'x': {'b': {'foo': 5}, 'd': 6}, 'y': 40}
+
+ output = update_nested(original, update, copy=True)
+ assert output is not original
+ assert output == {'x': {'b': {'foo': 5}, 'c': 4, 'd': 6}, 'y': 40}
+
+ output2 = update_nested(original, update)
+ assert output2 is original
+ assert output == output2
diff --git a/altair/vegalite/tests/test_common.py b/altair/vegalite/tests/test_common.py
index 4034c62d..9ecc237b 100644
--- a/altair/vegalite/tests/test_common.py
+++ b/altair/vegalite/tests/test_common.py
@@ -4,23 +4,35 @@ import pytest
from .. import v1, v2
-
[email protected]
-def basic_spec():
- return {
- 'data': {'url': 'data.csv'},
- 'mark': 'line',
- 'encoding': {
- 'color': {'type': 'nominal', 'field': 'color'},
- 'x': {'type': 'quantitative', 'field': 'xval'},
- 'y': {'type': 'ordinal', 'field': 'yval'}
- },
- 'height': 300,
- 'width': 400
+v1_defaults = {
+ 'width': 400,
+ 'height': 300
+}
+
+v2_defaults = {
+ 'config': {
+ 'view':{
+ 'height':300,
+ 'width':400
+ }
}
+}
+
+basic_spec = {
+ 'data': {'url': 'data.csv'},
+ 'mark': 'line',
+ 'encoding': {
+ 'color': {'type': 'nominal', 'field': 'color'},
+ 'x': {'type': 'quantitative', 'field': 'xval'},
+ 'y': {'type': 'ordinal', 'field': 'yval'}
+ },
+}
+
+spec_v1 = dict(v1_defaults, **basic_spec)
+spec_v2 = dict(v2_defaults, **basic_spec)
[email protected]('alt', [v1, v2])
[email protected]('alt,basic_spec', [(v1, spec_v1), (v2, spec_v2)])
def test_basic_chart_to_dict(alt, basic_spec):
chart = alt.Chart('data.csv').mark_line().encode(
alt.X('xval:Q'),
@@ -36,7 +48,7 @@ def test_basic_chart_to_dict(alt, basic_spec):
assert dct == basic_spec
[email protected]('alt', [v1, v2])
[email protected]('alt,basic_spec', [(v1, spec_v1), (v2, spec_v2)])
def test_basic_chart_from_dict(alt, basic_spec):
chart = alt.Chart.from_dict(basic_spec)
dct = chart.to_dict()
| Remove default width/height from Chart?
Having defaults causes issues with faceted plots (which become comically large) and with things like ``rangeStep``, which are ignored if ``width`` and ``height`` are set (see #481).
I would propose removing these defaults.
The disadvantage is that straightforward charts end up being smaller than is perhaps ideal for display in the notebook. | 0.0 | e37000c8f54bc5e0e98ea8457b9a3c913cd58ccb | [
"altair/utils/tests/test_core.py::test_parse_shorthand",
"altair/utils/tests/test_core.py::test_parse_shorthand_plus_data",
"altair/utils/tests/test_core.py::test_update_nested",
"altair/vegalite/tests/test_common.py::test_basic_chart_to_dict[altair.vegalite.v1-basic_spec0]",
"altair/vegalite/tests/test_common.py::test_basic_chart_to_dict[altair.vegalite.v2-basic_spec1]",
"altair/vegalite/tests/test_common.py::test_basic_chart_from_dict[altair.vegalite.v1-basic_spec0]",
"altair/vegalite/tests/test_common.py::test_basic_chart_from_dict[altair.vegalite.v2-basic_spec1]"
]
| []
| {
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2018-02-27 04:41:13+00:00 | bsd-3-clause | 1,061 |
|
alvinwan__TexSoup-108 | diff --git a/TexSoup/reader.py b/TexSoup/reader.py
index 11cdd75..304b557 100644
--- a/TexSoup/reader.py
+++ b/TexSoup/reader.py
@@ -7,20 +7,23 @@ from TexSoup.data import arg_type
from TexSoup.tokens import (
TC,
tokenize,
- SKIP_ENVS,
+ SKIP_ENV_NAMES,
+ MATH_ENV_NAMES,
)
import functools
import string
import sys
-MATH_ENVS = (
+MODE_MATH = 'mode:math'
+MODE_NON_MATH = 'mode:non-math'
+MATH_SIMPLE_ENVS = (
TexDisplayMathModeEnv,
TexMathModeEnv,
TexDisplayMathEnv,
TexMathEnv
)
-MATH_TOKEN_TO_ENV = {env.token_begin: env for env in MATH_ENVS}
+MATH_TOKEN_TO_ENV = {env.token_begin: env for env in MATH_SIMPLE_ENVS}
ARG_BEGIN_TO_ENV = {arg.token_begin: arg for arg in arg_type}
SIGNATURES = {
@@ -44,7 +47,7 @@ def read_tex(buf, skip_envs=(), tolerance=0):
"""
while buf.hasNext():
yield read_expr(buf,
- skip_envs=SKIP_ENVS + skip_envs,
+ skip_envs=SKIP_ENV_NAMES + skip_envs,
tolerance=tolerance)
@@ -74,32 +77,36 @@ def make_read_peek(f):
return wrapper
-def read_expr(src, skip_envs=(), tolerance=0):
+def read_expr(src, skip_envs=(), tolerance=0, mode=MODE_NON_MATH):
r"""Read next expression from buffer
:param Buffer src: a buffer of tokens
:param Tuple[str] skip_envs: environments to skip parsing
:param int tolerance: error tolerance level (only supports 0 or 1)
+ :param str mode: math or not math mode
:return: parsed expression
:rtype: [TexExpr, Token]
"""
c = next(src)
if c.category in MATH_TOKEN_TO_ENV.keys():
expr = MATH_TOKEN_TO_ENV[c.category]([], position=c.position)
- return read_math_env(src, expr)
+ return read_math_env(src, expr, tolerance=tolerance)
elif c.category == TC.Escape:
- name, args = read_command(src, tolerance=tolerance)
+ name, args = read_command(src, tolerance=tolerance, mode=mode)
if name == 'item':
+ assert mode != MODE_MATH, 'Command \item invalid in math mode.'
contents = read_item(src)
expr = TexCmd(name, contents, args, position=c.position)
elif name == 'begin':
assert args, 'Begin command must be followed by an env name.'
expr = TexNamedEnv(
args[0].string, args=args[1:], position=c.position)
+ if expr.name in MATH_ENV_NAMES:
+ mode = MODE_MATH
if expr.name in skip_envs:
read_skip_env(src, expr)
else:
- read_env(src, expr, tolerance=tolerance)
+ read_env(src, expr, tolerance=tolerance, mode=mode)
else:
expr = TexCmd(name, args=args, position=c.position)
return expr
@@ -170,6 +177,7 @@ def unclosed_env_handler(src, expr, end):
:param Buffer src: a buffer of tokens
:param TexExpr expr: expression for the environment
+ :param int tolerance: error tolerance level (only supports 0 or 1)
:param end str: Actual end token (as opposed to expected)
"""
clo = CharToLineOffset(str(src))
@@ -179,7 +187,7 @@ def unclosed_env_handler(src, expr, end):
line, offset, expr.name, expr.end, explanation))
-def read_math_env(src, expr):
+def read_math_env(src, expr, tolerance=0):
r"""Read the environment from buffer.
Advances the buffer until right after the end of the environment. Adds
@@ -197,11 +205,13 @@ def read_math_env(src, expr):
...
EOFError: [Line: 0, Offset: 7] "$" env expecting $. Reached end of file.
"""
- content = src.forward_until(lambda c: c.category == expr.token_end)
+ contents = []
+ while src.hasNext() and src.peek().category != expr.token_end:
+ contents.append(read_expr(src, tolerance=tolerance, mode=MODE_MATH))
if not src.hasNext() or src.peek().category != expr.token_end:
unclosed_env_handler(src, expr, src.peek())
next(src)
- expr.append(content)
+ expr.append(*contents)
return expr
@@ -235,7 +245,7 @@ def read_skip_env(src, expr):
return expr
-def read_env(src, expr, tolerance=0):
+def read_env(src, expr, tolerance=0, mode=MODE_NON_MATH):
r"""Read the environment from buffer.
Advances the buffer until right after the end of the environment. Adds
@@ -244,6 +254,7 @@ def read_env(src, expr, tolerance=0):
:param Buffer src: a buffer of tokens
:param TexExpr expr: expression for the environment
:param int tolerance: error tolerance level (only supports 0 or 1)
+ :param str mode: math or not math mode
:rtype: TexExpr
>>> from TexSoup.category import categorize
@@ -264,10 +275,10 @@ def read_env(src, expr, tolerance=0):
while src.hasNext():
if src.peek().category == TC.Escape:
name, args = make_read_peek(read_command)(
- src, 1, skip=1, tolerance=tolerance)
+ src, 1, skip=1, tolerance=tolerance, mode=mode)
if name == 'end':
break
- contents.append(read_expr(src, tolerance=tolerance))
+ contents.append(read_expr(src, tolerance=tolerance, mode=mode))
error = not src.hasNext() or not args or args[0].string != expr.name
if error and tolerance == 0:
unclosed_env_handler(src, expr, src.peek((0, 6)))
@@ -284,7 +295,8 @@ def read_env(src, expr, tolerance=0):
# TODO: handle macro-weirdness e.g., \def\blah[#1][[[[[[[[#2{"#1 . #2"}
# TODO: add newcommand macro
-def read_args(src, n_required=-1, n_optional=-1, args=None, tolerance=0):
+def read_args(src, n_required=-1, n_optional=-1, args=None, tolerance=0,
+ mode=MODE_NON_MATH):
r"""Read all arguments from buffer.
This function assumes that the command name has already been parsed. By
@@ -300,6 +312,7 @@ def read_args(src, n_required=-1, n_optional=-1, args=None, tolerance=0):
:param int n_optional: Number of optional arguments. If < 0, all valid
bracket groups will be captured.
:param int tolerance: error tolerance level (only supports 0 or 1)
+ :param str mode: math or not math mode
:return: parsed arguments
:rtype: TexArgs
@@ -325,17 +338,18 @@ def read_args(src, n_required=-1, n_optional=-1, args=None, tolerance=0):
if n_required == 0 and n_optional == 0:
return args
- n_optional = read_arg_optional(src, args, n_optional, tolerance)
- n_required = read_arg_required(src, args, n_required, tolerance)
+ n_optional = read_arg_optional(src, args, n_optional, tolerance, mode)
+ n_required = read_arg_required(src, args, n_required, tolerance, mode)
if src.hasNext() and src.peek().category == TC.BracketBegin:
- n_optional = read_arg_optional(src, args, n_optional, tolerance)
+ n_optional = read_arg_optional(src, args, n_optional, tolerance, mode)
if src.hasNext() and src.peek().category == TC.GroupBegin:
- n_required = read_arg_required(src, args, n_required, tolerance)
+ n_required = read_arg_required(src, args, n_required, tolerance, mode)
return args
-def read_arg_optional(src, args, n_optional=-1, tolerance=0):
+def read_arg_optional(
+ src, args, n_optional=-1, tolerance=0, mode=MODE_NON_MATH):
"""Read next optional argument from buffer.
If the command has remaining optional arguments, look for:
@@ -349,6 +363,7 @@ def read_arg_optional(src, args, n_optional=-1, tolerance=0):
:param int n_optional: Number of optional arguments. If < 0, all valid
bracket groups will be captured.
:param int tolerance: error tolerance level (only supports 0 or 1)
+ :param str mode: math or not math mode
:return: number of remaining optional arguments
:rtype: int
"""
@@ -358,12 +373,13 @@ def read_arg_optional(src, args, n_optional=-1, tolerance=0):
if spacer:
src.backward(1)
break
- args.append(read_arg(src, next(src), tolerance=tolerance))
+ args.append(read_arg(src, next(src), tolerance=tolerance, mode=mode))
n_optional -= 1
return n_optional
-def read_arg_required(src, args, n_required=-1, tolerance=0):
+def read_arg_required(
+ src, args, n_required=-1, tolerance=0, mode=MODE_NON_MATH):
r"""Read next required argument from buffer.
If the command has remaining required arguments, look for:
@@ -379,6 +395,7 @@ def read_arg_required(src, args, n_required=-1, tolerance=0):
:param int n_required: Number of required arguments. If < 0, all valid
brace groups will be captured.
:param int tolerance: error tolerance level (only supports 0 or 1)
+ :param str mode: math or not math mode
:return: number of remaining optional arguments
:rtype: int
@@ -397,7 +414,8 @@ def read_arg_required(src, args, n_required=-1, tolerance=0):
spacer = read_spacer(src)
if src.hasNext() and src.peek().category == TC.GroupBegin:
- args.append(read_arg(src, next(src), tolerance=tolerance))
+ args.append(read_arg(
+ src, next(src), tolerance=tolerance, mode=mode))
n_required -= 1
continue
elif src.hasNext() and n_required > 0:
@@ -411,7 +429,7 @@ def read_arg_required(src, args, n_required=-1, tolerance=0):
return n_required
-def read_arg(src, c, tolerance=0):
+def read_arg(src, c, tolerance=0, mode=MODE_NON_MATH):
r"""Read the argument from buffer.
Advances buffer until right before the end of the argument.
@@ -419,6 +437,7 @@ def read_arg(src, c, tolerance=0):
:param Buffer src: a buffer of tokens
:param str c: argument token (starting token)
:param int tolerance: error tolerance level (only supports 0 or 1)
+ :param str mode: math or not math mode
:return: the parsed argument
:rtype: TexGroup
@@ -439,7 +458,7 @@ def read_arg(src, c, tolerance=0):
src.forward()
return arg(*content[1:], position=c.position)
else:
- content.append(read_expr(src, tolerance=tolerance))
+ content.append(read_expr(src, tolerance=tolerance, mode=mode))
if tolerance == 0:
clo = CharToLineOffset(str(src))
@@ -478,7 +497,7 @@ def read_spacer(buf):
def read_command(buf, n_required_args=-1, n_optional_args=-1, skip=0,
- tolerance=0):
+ tolerance=0, mode=MODE_NON_MATH):
r"""Parses command and all arguments. Assumes escape has just been parsed.
No whitespace is allowed between escape and command name. e.g.,
@@ -505,7 +524,7 @@ def read_command(buf, n_required_args=-1, n_optional_args=-1, skip=0,
('item', [])
>>> buf.peek()
' aaa '
-
+
# >>> buf = Buffer(tokenize(categorize('\\sect abcd')))
# >>> _ = next(buf)
# >>> read_command(buf)
@@ -519,5 +538,5 @@ def read_command(buf, n_required_args=-1, n_optional_args=-1, skip=0,
if n_required_args < 0 and n_optional_args < 0:
n_required_args, n_optional_args = SIGNATURES.get(name, (-1, -1))
args = read_args(buf, n_required_args, n_optional_args,
- tolerance=tolerance)
+ tolerance=tolerance, mode=mode)
return name, args
diff --git a/TexSoup/tokens.py b/TexSoup/tokens.py
index 749dd36..9578013 100644
--- a/TexSoup/tokens.py
+++ b/TexSoup/tokens.py
@@ -12,15 +12,17 @@ import itertools
import string
# Custom higher-level combinations of primitives
-SKIP_ENVS = ('verbatim', 'equation', 'lstlisting', 'align', 'alignat',
- 'equation*', 'align*', 'math', 'displaymath', 'split', 'array',
- 'eqnarray', 'eqnarray*', 'multline', 'multline*', 'gather',
- 'gather*', 'flalign', 'flalign*',
- '$', '$$', r'\[', r'\]', r'\(', r'\)')
-BRACKETS_DELIMITERS = {'(', ')', '<', '>', '[', ']', '{', '}',
- r'\{', r'\}', '.' '|', r'\langle', r'\rangle',
- r'\lfloor', '\rfloor', r'\lceil', r'\rceil',
- r'\ulcorner', r'\urcorner', r'\lbrack', r'\rbrack'}
+SKIP_ENV_NAMES = ('lstlisting', 'verbatim')
+MATH_ENV_NAMES = (
+ 'align', 'align*', 'alignat', 'array', 'displaymath', 'eqnarray',
+ 'eqnarray*', 'equation', 'equation*', 'flalign', 'flalign*', 'gather',
+ 'gather*', 'math', 'multline', 'multline*', 'split'
+)
+BRACKETS_DELIMITERS = {
+ '(', ')', '<', '>', '[', ']', '{', '}', r'\{', r'\}', '.' '|', r'\langle',
+ r'\rangle', r'\lfloor', '\rfloor', r'\lceil', r'\rceil', r'\ulcorner',
+ r'\urcorner', r'\lbrack', r'\rbrack'
+}
# TODO: looks like left-right do have to match
SIZE_PREFIX = ('left', 'right', 'big', 'Big', 'bigg', 'Bigg')
PUNCTUATION_COMMANDS = {command + bracket
| alvinwan/TexSoup | a7976e70d568afc0053ba72260088ba0c93488fa | diff --git a/tests/test_api.py b/tests/test_api.py
index 11e7ff7..c7f9c40 100644
--- a/tests/test_api.py
+++ b/tests/test_api.py
@@ -186,6 +186,21 @@ def test_access_position(chikin):
assert clo(chikin.section.position) == (4, 0)
+def test_math_env_change():
+ """Tests that commands in math environments can be found / modified"""
+ soup = TexSoup(r'\begin{align}\infer{A}{B}\infer{C}{D}\end{align}')
+ assert soup.infer is not None, repr(soup.expr)
+ for infer in soup.find_all('infer'):
+ infer.args = infer.args[::-1]
+ assert str(soup) == r'\begin{align}\infer{B}{A}\infer{D}{C}\end{align}'
+
+ soup = TexSoup(r'$$\infer{A}{B}\infer{C}{D}$$')
+ assert soup.infer is not None, repr(soup.expr)
+ for infer in soup.find_all('infer'):
+ infer.args = infer.args[::-1]
+ assert str(soup) == r'$$\infer{B}{A}\infer{D}{C}$$'
+
+
#########
# TEXT #
########
diff --git a/tests/test_parser.py b/tests/test_parser.py
index f1950c3..b1334eb 100644
--- a/tests/test_parser.py
+++ b/tests/test_parser.py
@@ -410,11 +410,11 @@ def test_non_letter_commands():
(whether valid or not).
"""
for punctuation in '!@#$%^&*_+-=~`<>,./?;:|':
- tex = rf"""
+ tex = r"""
\begin{{document}}
- \lstinline{{\{punctuation} Word [a-z]+}}
+ \lstinline{{\{} Word [a-z]+}}
\end{{document}}
- """
+ """.format(punctuation)
soup = TexSoup(tex)
assert str(soup) == tex
| Parsing the content of math environments?
Currently, it seems to me that TexSoup treats the content of a math environment ($...$, \(...) or \[...\]) like a single token.
Is there any way to get a structured representation of this content too? I was thinking of getting a string representation of the inside of $...$ and then calling TexSoup on it recursively but I am not sure this would work.
To be even more concrete, here is an example of a task I am interested in performing:
- Find all instances of `\infer{A}{B}` within a math environment and replace it by `\infer{B}{A}` where A and B could be arbitrary Latex code.
Could one use TexSoup to do this? | 0.0 | a7976e70d568afc0053ba72260088ba0c93488fa | [
"tests/test_api.py::test_math_env_change"
]
| [
"TexSoup/__init__.py::TexSoup.TexSoup",
"TexSoup/category.py::TexSoup.category.categorize",
"TexSoup/data.py::TexSoup.data.TexArgs",
"TexSoup/data.py::TexSoup.data.TexArgs.__contains__",
"TexSoup/data.py::TexSoup.data.TexArgs.__getitem__",
"TexSoup/data.py::TexSoup.data.TexArgs.__repr__",
"TexSoup/data.py::TexSoup.data.TexArgs.__str__",
"TexSoup/data.py::TexSoup.data.TexArgs.append",
"TexSoup/data.py::TexSoup.data.TexArgs.clear",
"TexSoup/data.py::TexSoup.data.TexArgs.extend",
"TexSoup/data.py::TexSoup.data.TexArgs.insert",
"TexSoup/data.py::TexSoup.data.TexArgs.pop",
"TexSoup/data.py::TexSoup.data.TexArgs.remove",
"TexSoup/data.py::TexSoup.data.TexArgs.reverse",
"TexSoup/data.py::TexSoup.data.TexCmd",
"TexSoup/data.py::TexSoup.data.TexEnv",
"TexSoup/data.py::TexSoup.data.TexEnv.__init__",
"TexSoup/data.py::TexSoup.data.TexExpr.__eq__",
"TexSoup/data.py::TexSoup.data.TexExpr.all",
"TexSoup/data.py::TexSoup.data.TexExpr.append",
"TexSoup/data.py::TexSoup.data.TexExpr.contents",
"TexSoup/data.py::TexSoup.data.TexExpr.insert",
"TexSoup/data.py::TexSoup.data.TexExpr.remove",
"TexSoup/data.py::TexSoup.data.TexExpr.string",
"TexSoup/data.py::TexSoup.data.TexGroup.parse",
"TexSoup/data.py::TexSoup.data.TexNamedEnv",
"TexSoup/data.py::TexSoup.data.TexNode.__iter__",
"TexSoup/data.py::TexSoup.data.TexNode.__match__",
"TexSoup/data.py::TexSoup.data.TexNode.all",
"TexSoup/data.py::TexSoup.data.TexNode.append",
"TexSoup/data.py::TexSoup.data.TexNode.args",
"TexSoup/data.py::TexSoup.data.TexNode.char_pos_to_line",
"TexSoup/data.py::TexSoup.data.TexNode.children",
"TexSoup/data.py::TexSoup.data.TexNode.contents",
"TexSoup/data.py::TexSoup.data.TexNode.copy",
"TexSoup/data.py::TexSoup.data.TexNode.count",
"TexSoup/data.py::TexSoup.data.TexNode.delete",
"TexSoup/data.py::TexSoup.data.TexNode.descendants",
"TexSoup/data.py::TexSoup.data.TexNode.find",
"TexSoup/data.py::TexSoup.data.TexNode.find_all",
"TexSoup/data.py::TexSoup.data.TexNode.insert",
"TexSoup/data.py::TexSoup.data.TexNode.name",
"TexSoup/data.py::TexSoup.data.TexNode.remove",
"TexSoup/data.py::TexSoup.data.TexNode.replace",
"TexSoup/data.py::TexSoup.data.TexNode.replace_with",
"TexSoup/data.py::TexSoup.data.TexNode.string",
"TexSoup/data.py::TexSoup.data.TexNode.text",
"TexSoup/data.py::TexSoup.data.TexText",
"TexSoup/data.py::TexSoup.data.TexText.__contains__",
"TexSoup/data.py::TexSoup.data.TexText.__eq__",
"TexSoup/data.py::TexSoup.data.TexText.__repr__",
"TexSoup/data.py::TexSoup.data.TexText.__str__",
"TexSoup/reader.py::TexSoup.reader.make_read_peek",
"TexSoup/reader.py::TexSoup.reader.read_arg",
"TexSoup/reader.py::TexSoup.reader.read_arg_required",
"TexSoup/reader.py::TexSoup.reader.read_args",
"TexSoup/reader.py::TexSoup.reader.read_command",
"TexSoup/reader.py::TexSoup.reader.read_env",
"TexSoup/reader.py::TexSoup.reader.read_item",
"TexSoup/reader.py::TexSoup.reader.read_math_env",
"TexSoup/reader.py::TexSoup.reader.read_skip_env",
"TexSoup/reader.py::TexSoup.reader.read_spacer",
"TexSoup/tokens.py::TexSoup.tokens.next_token",
"TexSoup/tokens.py::TexSoup.tokens.tokenize",
"TexSoup/tokens.py::TexSoup.tokens.tokenize_command_name",
"TexSoup/tokens.py::TexSoup.tokens.tokenize_escaped_symbols",
"TexSoup/tokens.py::TexSoup.tokens.tokenize_ignore",
"TexSoup/tokens.py::TexSoup.tokens.tokenize_line_break",
"TexSoup/tokens.py::TexSoup.tokens.tokenize_line_comment",
"TexSoup/tokens.py::TexSoup.tokens.tokenize_math_asym_switch",
"TexSoup/tokens.py::TexSoup.tokens.tokenize_math_sym_switch",
"TexSoup/tokens.py::TexSoup.tokens.tokenize_spacers",
"TexSoup/tokens.py::TexSoup.tokens.tokenize_string",
"TexSoup/tokens.py::TexSoup.tokens.tokenize_symbols",
"TexSoup/utils.py::TexSoup.utils.Buffer",
"TexSoup/utils.py::TexSoup.utils.Buffer.__getitem__",
"TexSoup/utils.py::TexSoup.utils.Buffer.backward",
"TexSoup/utils.py::TexSoup.utils.Buffer.forward",
"TexSoup/utils.py::TexSoup.utils.Buffer.forward_until",
"TexSoup/utils.py::TexSoup.utils.CharToLineOffset",
"TexSoup/utils.py::TexSoup.utils.MixedBuffer.__init__",
"TexSoup/utils.py::TexSoup.utils.Token.__add__",
"TexSoup/utils.py::TexSoup.utils.Token.__contains__",
"TexSoup/utils.py::TexSoup.utils.Token.__eq__",
"TexSoup/utils.py::TexSoup.utils.Token.__getitem__",
"TexSoup/utils.py::TexSoup.utils.Token.__hash__",
"TexSoup/utils.py::TexSoup.utils.Token.__iadd__",
"TexSoup/utils.py::TexSoup.utils.Token.__iter__",
"TexSoup/utils.py::TexSoup.utils.Token.__radd__",
"TexSoup/utils.py::TexSoup.utils.Token.lstrip",
"TexSoup/utils.py::TexSoup.utils.Token.rstrip",
"TexSoup/utils.py::TexSoup.utils.to_list",
"tests/test_api.py::test_navigation_attributes",
"tests/test_api.py::test_navigation_parent",
"tests/test_api.py::test_navigation_children",
"tests/test_api.py::test_navigation_descendants",
"tests/test_api.py::test_navigation_positions",
"tests/test_api.py::test_find_basic",
"tests/test_api.py::test_find_by_command",
"tests/test_api.py::test_find_env",
"tests/test_api.py::test_delete",
"tests/test_api.py::test_delete_arg",
"tests/test_api.py::test_delete_token",
"tests/test_api.py::test_replace_single",
"tests/test_api.py::test_replace_multiple",
"tests/test_api.py::test_append",
"tests/test_api.py::test_insert",
"tests/test_api.py::test_change_string",
"tests/test_api.py::test_change_name",
"tests/test_api.py::test_access_position",
"tests/test_api.py::test_text",
"tests/test_api.py::test_search_regex",
"tests/test_api.py::test_search_regex_precompiled_pattern",
"tests/test_api.py::test_skip_envs",
"tests/test_parser.py::test_commands_only",
"tests/test_parser.py::test_commands_envs_only",
"tests/test_parser.py::test_commands_envs_text",
"tests/test_parser.py::test_text_preserved",
"tests/test_parser.py::test_command_name_parse",
"tests/test_parser.py::test_command_env_name_parse",
"tests/test_parser.py::test_commands_without_arguments",
"tests/test_parser.py::test_unlabeled_environment",
"tests/test_parser.py::test_ignore_environment",
"tests/test_parser.py::test_inline_math",
"tests/test_parser.py::test_escaped_characters",
"tests/test_parser.py::test_math_environment_weirdness",
"tests/test_parser.py::test_item_parsing",
"tests/test_parser.py::test_item_argument_parsing",
"tests/test_parser.py::test_comment_escaping",
"tests/test_parser.py::test_comment_unparsed",
"tests/test_parser.py::test_comment_after_escape",
"tests/test_parser.py::test_items_with_labels",
"tests/test_parser.py::test_multiline_args",
"tests/test_parser.py::test_nested_commands",
"tests/test_parser.py::test_def_item",
"tests/test_parser.py::test_basic_whitespace",
"tests/test_parser.py::test_whitespace_in_command",
"tests/test_parser.py::test_math_environment_whitespace",
"tests/test_parser.py::test_non_letter_commands",
"tests/test_parser.py::test_math_environment_escape",
"tests/test_parser.py::test_punctuation_command_structure",
"tests/test_parser.py::test_non_punctuation_command_structure",
"tests/test_parser.py::test_allow_unclosed_non_curly_braces",
"tests/test_parser.py::test_buffer",
"tests/test_parser.py::test_to_buffer",
"tests/test_parser.py::test_unclosed_commands",
"tests/test_parser.py::test_unclosed_environments",
"tests/test_parser.py::test_unclosed_math_environments",
"tests/test_parser.py::test_arg_parse",
"tests/test_parser.py::test_tolerance_env_unclosed"
]
| {
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2020-08-01 00:44:15+00:00 | bsd-2-clause | 1,062 |
|
alvinwan__TexSoup-132 | diff --git a/TexSoup/data.py b/TexSoup/data.py
index f7279c8..58cd070 100644
--- a/TexSoup/data.py
+++ b/TexSoup/data.py
@@ -1317,7 +1317,7 @@ class TexArgs(list):
"""
arg = self.__coerce(arg)
- if isinstance(arg, TexGroup):
+ if isinstance(arg, (TexGroup, TexCmd)):
super().insert(i, arg)
if len(self) <= 1:
diff --git a/TexSoup/reader.py b/TexSoup/reader.py
index 3347393..fd53989 100644
--- a/TexSoup/reader.py
+++ b/TexSoup/reader.py
@@ -391,7 +391,7 @@ def read_arg_required(
b. A curly-brace delimiter. If the required argument is brace-delimited,
the contents of the brace group are used as the argument.
c. Spacer or not, if a brace group is not found, simply use the next
- character.
+ character, unless it is a backslash, in which case use the full command name
:param Buffer src: a buffer of tokens
:param TexArgs args: existing arguments to extend
@@ -422,7 +422,12 @@ def read_arg_required(
n_required -= 1
continue
elif src.hasNext() and n_required > 0:
- args.append('{%s}' % next(src))
+ next_token = next(src)
+ if next_token.category == TC.Escape:
+ name, _ = read_command(src, 0, 0, tolerance=tolerance, mode=mode)
+ args.append(TexCmd(name, position=next_token.position))
+ else:
+ args.append('{%s}' % next_token)
n_required -= 1
continue
| alvinwan/TexSoup | f91d4e71b21aa6852378d2d60ecc551b39e05bf0 | diff --git a/tests/test_parser.py b/tests/test_parser.py
index 136710e..bb175ff 100644
--- a/tests/test_parser.py
+++ b/tests/test_parser.py
@@ -384,6 +384,14 @@ def test_def_item():
assert soup.item is not None
+def test_def_without_braces():
+ """Tests that def without braces around the new command parses correctly"""
+ soup = TexSoup(r"\def\acommandname{replacement text}")
+ assert len(soup.find("def").args) == 2
+ assert str(soup.find("def").args[0]) == r"\acommandname"
+ assert str(soup.find("def").args[1]) == "{replacement text}"
+
+
def test_grouping_optional_argument():
"""Tests that grouping occurs correctly"""
soup = TexSoup(r"\begin{Theorem}[The argopt contains {$]\int_\infty$} the square bracket]\end{Theorem}")
| \def\command not parsed correctly
For example, `\def\arraystretch{1.1}` will be parsed as `\def{\}{arraystretch}{1.1}`. The bad part of this result is it breaks the balance of braces as `\}` is escaped. | 0.0 | f91d4e71b21aa6852378d2d60ecc551b39e05bf0 | [
"tests/test_parser.py::test_def_without_braces"
]
| [
"TexSoup/__init__.py::TexSoup.TexSoup",
"TexSoup/category.py::TexSoup.category.categorize",
"TexSoup/data.py::TexSoup.data.TexArgs",
"TexSoup/data.py::TexSoup.data.TexArgs.__contains__",
"TexSoup/data.py::TexSoup.data.TexArgs.__getitem__",
"TexSoup/data.py::TexSoup.data.TexArgs.__repr__",
"TexSoup/data.py::TexSoup.data.TexArgs.__str__",
"TexSoup/data.py::TexSoup.data.TexArgs.append",
"TexSoup/data.py::TexSoup.data.TexArgs.clear",
"TexSoup/data.py::TexSoup.data.TexArgs.extend",
"TexSoup/data.py::TexSoup.data.TexArgs.insert",
"TexSoup/data.py::TexSoup.data.TexArgs.pop",
"TexSoup/data.py::TexSoup.data.TexArgs.remove",
"TexSoup/data.py::TexSoup.data.TexArgs.reverse",
"TexSoup/data.py::TexSoup.data.TexCmd",
"TexSoup/data.py::TexSoup.data.TexEnv",
"TexSoup/data.py::TexSoup.data.TexEnv.__init__",
"TexSoup/data.py::TexSoup.data.TexExpr.__eq__",
"TexSoup/data.py::TexSoup.data.TexExpr.all",
"TexSoup/data.py::TexSoup.data.TexExpr.append",
"TexSoup/data.py::TexSoup.data.TexExpr.contents",
"TexSoup/data.py::TexSoup.data.TexExpr.insert",
"TexSoup/data.py::TexSoup.data.TexExpr.remove",
"TexSoup/data.py::TexSoup.data.TexExpr.string",
"TexSoup/data.py::TexSoup.data.TexGroup.parse",
"TexSoup/data.py::TexSoup.data.TexNamedEnv",
"TexSoup/data.py::TexSoup.data.TexNode.__iter__",
"TexSoup/data.py::TexSoup.data.TexNode.__match__",
"TexSoup/data.py::TexSoup.data.TexNode.all",
"TexSoup/data.py::TexSoup.data.TexNode.append",
"TexSoup/data.py::TexSoup.data.TexNode.args",
"TexSoup/data.py::TexSoup.data.TexNode.char_pos_to_line",
"TexSoup/data.py::TexSoup.data.TexNode.children",
"TexSoup/data.py::TexSoup.data.TexNode.contents",
"TexSoup/data.py::TexSoup.data.TexNode.copy",
"TexSoup/data.py::TexSoup.data.TexNode.count",
"TexSoup/data.py::TexSoup.data.TexNode.delete",
"TexSoup/data.py::TexSoup.data.TexNode.descendants",
"TexSoup/data.py::TexSoup.data.TexNode.find",
"TexSoup/data.py::TexSoup.data.TexNode.find_all",
"TexSoup/data.py::TexSoup.data.TexNode.insert",
"TexSoup/data.py::TexSoup.data.TexNode.name",
"TexSoup/data.py::TexSoup.data.TexNode.remove",
"TexSoup/data.py::TexSoup.data.TexNode.replace",
"TexSoup/data.py::TexSoup.data.TexNode.replace_with",
"TexSoup/data.py::TexSoup.data.TexNode.string",
"TexSoup/data.py::TexSoup.data.TexNode.text",
"TexSoup/data.py::TexSoup.data.TexText",
"TexSoup/data.py::TexSoup.data.TexText.__contains__",
"TexSoup/data.py::TexSoup.data.TexText.__eq__",
"TexSoup/data.py::TexSoup.data.TexText.__repr__",
"TexSoup/data.py::TexSoup.data.TexText.__str__",
"TexSoup/reader.py::TexSoup.reader.make_read_peek",
"TexSoup/reader.py::TexSoup.reader.read_arg",
"TexSoup/reader.py::TexSoup.reader.read_arg_required",
"TexSoup/reader.py::TexSoup.reader.read_args",
"TexSoup/reader.py::TexSoup.reader.read_command",
"TexSoup/reader.py::TexSoup.reader.read_env",
"TexSoup/reader.py::TexSoup.reader.read_item",
"TexSoup/reader.py::TexSoup.reader.read_math_env",
"TexSoup/reader.py::TexSoup.reader.read_skip_env",
"TexSoup/reader.py::TexSoup.reader.read_spacer",
"TexSoup/tokens.py::TexSoup.tokens.next_token",
"TexSoup/tokens.py::TexSoup.tokens.tokenize",
"TexSoup/tokens.py::TexSoup.tokens.tokenize_command_name",
"TexSoup/tokens.py::TexSoup.tokens.tokenize_escaped_symbols",
"TexSoup/tokens.py::TexSoup.tokens.tokenize_ignore",
"TexSoup/tokens.py::TexSoup.tokens.tokenize_line_break",
"TexSoup/tokens.py::TexSoup.tokens.tokenize_line_comment",
"TexSoup/tokens.py::TexSoup.tokens.tokenize_math_asym_switch",
"TexSoup/tokens.py::TexSoup.tokens.tokenize_math_sym_switch",
"TexSoup/tokens.py::TexSoup.tokens.tokenize_spacers",
"TexSoup/tokens.py::TexSoup.tokens.tokenize_string",
"TexSoup/tokens.py::TexSoup.tokens.tokenize_symbols",
"TexSoup/utils.py::TexSoup.utils.Buffer",
"TexSoup/utils.py::TexSoup.utils.Buffer.__getitem__",
"TexSoup/utils.py::TexSoup.utils.Buffer.backward",
"TexSoup/utils.py::TexSoup.utils.Buffer.forward",
"TexSoup/utils.py::TexSoup.utils.Buffer.forward_until",
"TexSoup/utils.py::TexSoup.utils.CharToLineOffset",
"TexSoup/utils.py::TexSoup.utils.MixedBuffer.__init__",
"TexSoup/utils.py::TexSoup.utils.Token.__add__",
"TexSoup/utils.py::TexSoup.utils.Token.__contains__",
"TexSoup/utils.py::TexSoup.utils.Token.__eq__",
"TexSoup/utils.py::TexSoup.utils.Token.__getitem__",
"TexSoup/utils.py::TexSoup.utils.Token.__hash__",
"TexSoup/utils.py::TexSoup.utils.Token.__iadd__",
"TexSoup/utils.py::TexSoup.utils.Token.__iter__",
"TexSoup/utils.py::TexSoup.utils.Token.__radd__",
"TexSoup/utils.py::TexSoup.utils.Token.lstrip",
"TexSoup/utils.py::TexSoup.utils.Token.rstrip",
"TexSoup/utils.py::TexSoup.utils.to_list",
"tests/test_parser.py::test_commands_only",
"tests/test_parser.py::test_commands_envs_only",
"tests/test_parser.py::test_commands_envs_text",
"tests/test_parser.py::test_text_preserved",
"tests/test_parser.py::test_command_name_parse",
"tests/test_parser.py::test_command_env_name_parse",
"tests/test_parser.py::test_commands_without_arguments",
"tests/test_parser.py::test_unlabeled_environment",
"tests/test_parser.py::test_ignore_environment",
"tests/test_parser.py::test_inline_math",
"tests/test_parser.py::test_escaped_characters",
"tests/test_parser.py::test_math_environment_weirdness",
"tests/test_parser.py::test_tokenize_punctuation_command_names",
"tests/test_parser.py::test_item_parsing",
"tests/test_parser.py::test_item_argument_parsing",
"tests/test_parser.py::test_comment_escaping",
"tests/test_parser.py::test_comment_unparsed",
"tests/test_parser.py::test_comment_after_escape",
"tests/test_parser.py::test_items_with_labels",
"tests/test_parser.py::test_multiline_args",
"tests/test_parser.py::test_nested_commands",
"tests/test_parser.py::test_def_item",
"tests/test_parser.py::test_grouping_optional_argument",
"tests/test_parser.py::test_basic_whitespace",
"tests/test_parser.py::test_whitespace_in_command",
"tests/test_parser.py::test_math_environment_whitespace",
"tests/test_parser.py::test_non_letter_commands",
"tests/test_parser.py::test_math_environment_escape",
"tests/test_parser.py::test_punctuation_command_structure",
"tests/test_parser.py::test_non_punctuation_command_structure",
"tests/test_parser.py::test_allow_unclosed_non_curly_braces",
"tests/test_parser.py::test_buffer",
"tests/test_parser.py::test_to_buffer",
"tests/test_parser.py::test_unclosed_commands",
"tests/test_parser.py::test_unclosed_environments",
"tests/test_parser.py::test_unclosed_math_environments",
"tests/test_parser.py::test_arg_parse",
"tests/test_parser.py::test_tolerance_env_unclosed"
]
| {
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
} | 2021-12-03 14:24:38+00:00 | bsd-2-clause | 1,063 |
|
alvinwan__TexSoup-133 | diff --git a/TexSoup/data.py b/TexSoup/data.py
index f7279c8..58cd070 100644
--- a/TexSoup/data.py
+++ b/TexSoup/data.py
@@ -1317,7 +1317,7 @@ class TexArgs(list):
"""
arg = self.__coerce(arg)
- if isinstance(arg, TexGroup):
+ if isinstance(arg, (TexGroup, TexCmd)):
super().insert(i, arg)
if len(self) <= 1:
diff --git a/TexSoup/reader.py b/TexSoup/reader.py
index 3347393..fd53989 100644
--- a/TexSoup/reader.py
+++ b/TexSoup/reader.py
@@ -391,7 +391,7 @@ def read_arg_required(
b. A curly-brace delimiter. If the required argument is brace-delimited,
the contents of the brace group are used as the argument.
c. Spacer or not, if a brace group is not found, simply use the next
- character.
+ character, unless it is a backslash, in which case use the full command name
:param Buffer src: a buffer of tokens
:param TexArgs args: existing arguments to extend
@@ -422,7 +422,12 @@ def read_arg_required(
n_required -= 1
continue
elif src.hasNext() and n_required > 0:
- args.append('{%s}' % next(src))
+ next_token = next(src)
+ if next_token.category == TC.Escape:
+ name, _ = read_command(src, 0, 0, tolerance=tolerance, mode=mode)
+ args.append(TexCmd(name, position=next_token.position))
+ else:
+ args.append('{%s}' % next_token)
n_required -= 1
continue
diff --git a/TexSoup/tokens.py b/TexSoup/tokens.py
index 1f66cd5..492c50e 100644
--- a/TexSoup/tokens.py
+++ b/TexSoup/tokens.py
@@ -116,14 +116,15 @@ def tokenize_escaped_symbols(text, prev=None):
'\\}'
>>> tokenize_escaped_symbols(categorize(r'\%'))
'\\%'
- >>> tokenize_escaped_symbols(categorize(r'\ %')) # not even one spacer is allowed
+ >>> tokenize_escaped_symbols(categorize(r'\ '))
+ '\\ '
"""
if text.peek().category == CC.Escape \
and text.peek(1) \
and text.peek(1).category in (
CC.Escape, CC.GroupBegin, CC.GroupEnd, CC.MathSwitch,
CC.Alignment, CC.Macro, CC.Superscript, CC.Subscript,
- CC.Active, CC.Comment, CC.Other):
+ CC.Spacer, CC.Active, CC.Comment, CC.Other):
result = text.forward(2)
result.category = TC.EscapedComment
return result
| alvinwan/TexSoup | f91d4e71b21aa6852378d2d60ecc551b39e05bf0 | diff --git a/tests/test_parser.py b/tests/test_parser.py
index 136710e..60ea596 100644
--- a/tests/test_parser.py
+++ b/tests/test_parser.py
@@ -189,9 +189,9 @@ def test_escaped_characters():
"""
soup = TexSoup(r"""
\begin{itemize}
- \item Ice cream costs \$4-\$5 around here. \}[\{]
+ \item Ice cream costs \$4-\$5 around here. \}\ [\{]
\end{itemize}""")
- assert str(soup.item).strip() == r'\item Ice cream costs \$4-\$5 around here. \}[\{]'
+ assert str(soup.item).strip() == r'\item Ice cream costs \$4-\$5 around here. \}\ [\{]'
assert '\\$4-\\$5' in str(soup), 'Escaped characters not properly rendered.'
@@ -384,6 +384,14 @@ def test_def_item():
assert soup.item is not None
+def test_def_without_braces():
+ """Tests that def without braces around the new command parses correctly"""
+ soup = TexSoup(r"\def\acommandname{replacement text}")
+ assert len(soup.find("def").args) == 2
+ assert str(soup.find("def").args[0]) == r"\acommandname"
+ assert str(soup.find("def").args[1]) == "{replacement text}"
+
+
def test_grouping_optional_argument():
"""Tests that grouping occurs correctly"""
soup = TexSoup(r"\begin{Theorem}[The argopt contains {$]\int_\infty$} the square bracket]\end{Theorem}")
| Control spaces "\ " in math mode don't make the roundtrip
People often use `\ ` (called control space) to insert a little extra space in math mode—to avoid the default behaviour of ignoring math spaces.
In TexSoup these get eaten up somehow, so when serialize back out, the tex is different:
Failing test case:
```python
def test_contol_space_command():
"""A control space "\ " can be used to insert extra spacing in math mode."""
math_with_spaces = r"""$a \ = \ \sin\theta$"""
soup = TexSoup(math_with_spaces)
assert str(soup) == math_with_spaces, 'Control spaces not preserved in math'
```
<img width="561" alt="Screen Shot 2020-10-18 at 1 10 39 AM" src="https://user-images.githubusercontent.com/163966/96359259-cb07c400-10de-11eb-9364-228d308bc34b.png">
expected: `$a \ = \ \sin\theta$`
observed: `$a \=\\sin\theta$`
Here are the tokens in case that's helpful:
```
'$'_MathSwitch
'a '_Text
'\'_Escape
' = '_Text
'\'_Escape
' '_MergedSpacer
'\'_Escape
'sin'_CommandName
'\'_Escape
'theta'_CommandName
'$'_MathSwitch
```
See https://tex.stackexchange.com/a/74354 for a complete list of all special LaTeX spacing commands inside and outide of math mode.
No urgency to fix this --- perhaps it is even a feature, since prevents users from trying to override default LaTeX spacing behaviour :)
| 0.0 | f91d4e71b21aa6852378d2d60ecc551b39e05bf0 | [
"tests/test_parser.py::test_escaped_characters",
"tests/test_parser.py::test_def_without_braces"
]
| [
"TexSoup/__init__.py::TexSoup.TexSoup",
"TexSoup/category.py::TexSoup.category.categorize",
"TexSoup/data.py::TexSoup.data.TexArgs",
"TexSoup/data.py::TexSoup.data.TexArgs.__contains__",
"TexSoup/data.py::TexSoup.data.TexArgs.__getitem__",
"TexSoup/data.py::TexSoup.data.TexArgs.__repr__",
"TexSoup/data.py::TexSoup.data.TexArgs.__str__",
"TexSoup/data.py::TexSoup.data.TexArgs.append",
"TexSoup/data.py::TexSoup.data.TexArgs.clear",
"TexSoup/data.py::TexSoup.data.TexArgs.extend",
"TexSoup/data.py::TexSoup.data.TexArgs.insert",
"TexSoup/data.py::TexSoup.data.TexArgs.pop",
"TexSoup/data.py::TexSoup.data.TexArgs.remove",
"TexSoup/data.py::TexSoup.data.TexArgs.reverse",
"TexSoup/data.py::TexSoup.data.TexCmd",
"TexSoup/data.py::TexSoup.data.TexEnv",
"TexSoup/data.py::TexSoup.data.TexEnv.__init__",
"TexSoup/data.py::TexSoup.data.TexExpr.__eq__",
"TexSoup/data.py::TexSoup.data.TexExpr.all",
"TexSoup/data.py::TexSoup.data.TexExpr.append",
"TexSoup/data.py::TexSoup.data.TexExpr.contents",
"TexSoup/data.py::TexSoup.data.TexExpr.insert",
"TexSoup/data.py::TexSoup.data.TexExpr.remove",
"TexSoup/data.py::TexSoup.data.TexExpr.string",
"TexSoup/data.py::TexSoup.data.TexGroup.parse",
"TexSoup/data.py::TexSoup.data.TexNamedEnv",
"TexSoup/data.py::TexSoup.data.TexNode.__iter__",
"TexSoup/data.py::TexSoup.data.TexNode.__match__",
"TexSoup/data.py::TexSoup.data.TexNode.all",
"TexSoup/data.py::TexSoup.data.TexNode.append",
"TexSoup/data.py::TexSoup.data.TexNode.args",
"TexSoup/data.py::TexSoup.data.TexNode.char_pos_to_line",
"TexSoup/data.py::TexSoup.data.TexNode.children",
"TexSoup/data.py::TexSoup.data.TexNode.contents",
"TexSoup/data.py::TexSoup.data.TexNode.copy",
"TexSoup/data.py::TexSoup.data.TexNode.count",
"TexSoup/data.py::TexSoup.data.TexNode.delete",
"TexSoup/data.py::TexSoup.data.TexNode.descendants",
"TexSoup/data.py::TexSoup.data.TexNode.find",
"TexSoup/data.py::TexSoup.data.TexNode.find_all",
"TexSoup/data.py::TexSoup.data.TexNode.insert",
"TexSoup/data.py::TexSoup.data.TexNode.name",
"TexSoup/data.py::TexSoup.data.TexNode.remove",
"TexSoup/data.py::TexSoup.data.TexNode.replace",
"TexSoup/data.py::TexSoup.data.TexNode.replace_with",
"TexSoup/data.py::TexSoup.data.TexNode.string",
"TexSoup/data.py::TexSoup.data.TexNode.text",
"TexSoup/data.py::TexSoup.data.TexText",
"TexSoup/data.py::TexSoup.data.TexText.__contains__",
"TexSoup/data.py::TexSoup.data.TexText.__eq__",
"TexSoup/data.py::TexSoup.data.TexText.__repr__",
"TexSoup/data.py::TexSoup.data.TexText.__str__",
"TexSoup/reader.py::TexSoup.reader.make_read_peek",
"TexSoup/reader.py::TexSoup.reader.read_arg",
"TexSoup/reader.py::TexSoup.reader.read_arg_required",
"TexSoup/reader.py::TexSoup.reader.read_args",
"TexSoup/reader.py::TexSoup.reader.read_command",
"TexSoup/reader.py::TexSoup.reader.read_env",
"TexSoup/reader.py::TexSoup.reader.read_item",
"TexSoup/reader.py::TexSoup.reader.read_math_env",
"TexSoup/reader.py::TexSoup.reader.read_skip_env",
"TexSoup/reader.py::TexSoup.reader.read_spacer",
"TexSoup/tokens.py::TexSoup.tokens.next_token",
"TexSoup/tokens.py::TexSoup.tokens.tokenize",
"TexSoup/tokens.py::TexSoup.tokens.tokenize_command_name",
"TexSoup/tokens.py::TexSoup.tokens.tokenize_escaped_symbols",
"TexSoup/tokens.py::TexSoup.tokens.tokenize_ignore",
"TexSoup/tokens.py::TexSoup.tokens.tokenize_line_break",
"TexSoup/tokens.py::TexSoup.tokens.tokenize_line_comment",
"TexSoup/tokens.py::TexSoup.tokens.tokenize_math_asym_switch",
"TexSoup/tokens.py::TexSoup.tokens.tokenize_math_sym_switch",
"TexSoup/tokens.py::TexSoup.tokens.tokenize_spacers",
"TexSoup/tokens.py::TexSoup.tokens.tokenize_string",
"TexSoup/tokens.py::TexSoup.tokens.tokenize_symbols",
"TexSoup/utils.py::TexSoup.utils.Buffer",
"TexSoup/utils.py::TexSoup.utils.Buffer.__getitem__",
"TexSoup/utils.py::TexSoup.utils.Buffer.backward",
"TexSoup/utils.py::TexSoup.utils.Buffer.forward",
"TexSoup/utils.py::TexSoup.utils.Buffer.forward_until",
"TexSoup/utils.py::TexSoup.utils.CharToLineOffset",
"TexSoup/utils.py::TexSoup.utils.MixedBuffer.__init__",
"TexSoup/utils.py::TexSoup.utils.Token.__add__",
"TexSoup/utils.py::TexSoup.utils.Token.__contains__",
"TexSoup/utils.py::TexSoup.utils.Token.__eq__",
"TexSoup/utils.py::TexSoup.utils.Token.__getitem__",
"TexSoup/utils.py::TexSoup.utils.Token.__hash__",
"TexSoup/utils.py::TexSoup.utils.Token.__iadd__",
"TexSoup/utils.py::TexSoup.utils.Token.__iter__",
"TexSoup/utils.py::TexSoup.utils.Token.__radd__",
"TexSoup/utils.py::TexSoup.utils.Token.lstrip",
"TexSoup/utils.py::TexSoup.utils.Token.rstrip",
"TexSoup/utils.py::TexSoup.utils.to_list",
"tests/test_parser.py::test_commands_only",
"tests/test_parser.py::test_commands_envs_only",
"tests/test_parser.py::test_commands_envs_text",
"tests/test_parser.py::test_text_preserved",
"tests/test_parser.py::test_command_name_parse",
"tests/test_parser.py::test_command_env_name_parse",
"tests/test_parser.py::test_commands_without_arguments",
"tests/test_parser.py::test_unlabeled_environment",
"tests/test_parser.py::test_ignore_environment",
"tests/test_parser.py::test_inline_math",
"tests/test_parser.py::test_math_environment_weirdness",
"tests/test_parser.py::test_tokenize_punctuation_command_names",
"tests/test_parser.py::test_item_parsing",
"tests/test_parser.py::test_item_argument_parsing",
"tests/test_parser.py::test_comment_escaping",
"tests/test_parser.py::test_comment_unparsed",
"tests/test_parser.py::test_comment_after_escape",
"tests/test_parser.py::test_items_with_labels",
"tests/test_parser.py::test_multiline_args",
"tests/test_parser.py::test_nested_commands",
"tests/test_parser.py::test_def_item",
"tests/test_parser.py::test_grouping_optional_argument",
"tests/test_parser.py::test_basic_whitespace",
"tests/test_parser.py::test_whitespace_in_command",
"tests/test_parser.py::test_math_environment_whitespace",
"tests/test_parser.py::test_non_letter_commands",
"tests/test_parser.py::test_math_environment_escape",
"tests/test_parser.py::test_punctuation_command_structure",
"tests/test_parser.py::test_non_punctuation_command_structure",
"tests/test_parser.py::test_allow_unclosed_non_curly_braces",
"tests/test_parser.py::test_buffer",
"tests/test_parser.py::test_to_buffer",
"tests/test_parser.py::test_unclosed_commands",
"tests/test_parser.py::test_unclosed_environments",
"tests/test_parser.py::test_unclosed_math_environments",
"tests/test_parser.py::test_arg_parse",
"tests/test_parser.py::test_tolerance_env_unclosed"
]
| {
"failed_lite_validators": [
"has_hyperlinks",
"has_media",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2021-12-03 22:31:51+00:00 | bsd-2-clause | 1,064 |
|
alvinwan__TexSoup-138 | diff --git a/TexSoup/reader.py b/TexSoup/reader.py
index fd53989..ba92aaa 100644
--- a/TexSoup/reader.py
+++ b/TexSoup/reader.py
@@ -31,7 +31,11 @@ SIGNATURES = {
'textbf': (1, 0),
'section': (1, 1),
'label': (1, 0),
+ 'cap': (0, 0),
'cup': (0, 0),
+ 'in': (0, 0),
+ 'notin': (0, 0),
+ 'infty': (0, 0),
'noindent': (0, 0),
}
@@ -278,7 +282,7 @@ def read_env(src, expr, skip_envs=(), tolerance=0, mode=MODE_NON_MATH):
while src.hasNext():
if src.peek().category == TC.Escape:
name, args = make_read_peek(read_command)(
- src, 1, skip=1, tolerance=tolerance, mode=mode)
+ src, skip=1, tolerance=tolerance, mode=mode)
if name == 'end':
break
contents.append(read_expr(src, skip_envs=skip_envs, tolerance=tolerance, mode=mode))
| alvinwan/TexSoup | 19451a322a51e12fd30a9a391301aa31b937e9e2 | diff --git a/tests/test_parser.py b/tests/test_parser.py
index 60ea596..1b97eb9 100644
--- a/tests/test_parser.py
+++ b/tests/test_parser.py
@@ -385,7 +385,7 @@ def test_def_item():
def test_def_without_braces():
- """Tests that def without braces around the new command parses correctly"""
+ """Tests that def without braces around the new command parses correctly."""
soup = TexSoup(r"\def\acommandname{replacement text}")
assert len(soup.find("def").args) == 2
assert str(soup.find("def").args[0]) == r"\acommandname"
@@ -398,6 +398,19 @@ def test_grouping_optional_argument():
assert len(soup.Theorem.args) == 1
+def test_zero_argument_signatures():
+ """Tests that specific commands that do not take arguments are parsed correctly."""
+ soup = TexSoup(r"$\cap[\cup[\in[\notin[\infty[$")
+ assert len(soup.find("cap").args) == 0
+ assert len(soup.find("cup").args) == 0
+ assert len(soup.find("in").args) == 0
+ assert len(soup.find("notin").args) == 0
+ assert len(soup.find("infty").args) == 0
+
+ soup = TexSoup(r"\begin{equation} \cup [0, \infty) \end{equation}")
+ assert len(soup.find("cup").args) == 0
+
+
##############
# FORMATTING #
##############
| Non-matching brackets not parsed correctly
Certain math notation involves non-matched brackets.
For example the set of nonnegative numbers is denoted `$[0, \infty)$` in interval notation. TexSoup handle this notation fine on it's own but has trouble if there is command before it this non-matching expression, e.g. `$S \cup [0, \infty)$`.
```
tokenize(categorize(r"""$\cup [0, \infty)$"""))
```
GIVES
```
tokens= '$'_MathSwitch
'\'_Escape
'cup'_CommandName
' '_MergedSpacer
'['_BracketBegin
'0, '_Text
'\'_Escape
'infty'_CommandName
')'_Text
'$'_MathSwitch
```
I'm thinking it sees the command then the `_BracketBegin` so it starts to look for a closing bracket thinking these are optional arguments for the command `\cup`.
Here is the minimal failing test case:
```python
def test_mixed_brackets():
"""Tests handling of math with non-matching bracket after a tex command."""
soup = TexSoup(r"""$(-\infty,0]$""") # works fine
soup = TexSoup(r"""$[0, \infty)$""") # works fine
# GH115
soup = TexSoup(r"""$S \cup [0, \infty)$""")
assert True
```
| 0.0 | 19451a322a51e12fd30a9a391301aa31b937e9e2 | [
"tests/test_parser.py::test_zero_argument_signatures"
]
| [
"TexSoup/__init__.py::TexSoup.TexSoup",
"TexSoup/category.py::TexSoup.category.categorize",
"TexSoup/data.py::TexSoup.data.TexArgs",
"TexSoup/data.py::TexSoup.data.TexArgs.__contains__",
"TexSoup/data.py::TexSoup.data.TexArgs.__getitem__",
"TexSoup/data.py::TexSoup.data.TexArgs.__repr__",
"TexSoup/data.py::TexSoup.data.TexArgs.__str__",
"TexSoup/data.py::TexSoup.data.TexArgs.append",
"TexSoup/data.py::TexSoup.data.TexArgs.clear",
"TexSoup/data.py::TexSoup.data.TexArgs.extend",
"TexSoup/data.py::TexSoup.data.TexArgs.insert",
"TexSoup/data.py::TexSoup.data.TexArgs.pop",
"TexSoup/data.py::TexSoup.data.TexArgs.remove",
"TexSoup/data.py::TexSoup.data.TexArgs.reverse",
"TexSoup/data.py::TexSoup.data.TexCmd",
"TexSoup/data.py::TexSoup.data.TexEnv",
"TexSoup/data.py::TexSoup.data.TexEnv.__init__",
"TexSoup/data.py::TexSoup.data.TexExpr.__eq__",
"TexSoup/data.py::TexSoup.data.TexExpr.all",
"TexSoup/data.py::TexSoup.data.TexExpr.append",
"TexSoup/data.py::TexSoup.data.TexExpr.contents",
"TexSoup/data.py::TexSoup.data.TexExpr.insert",
"TexSoup/data.py::TexSoup.data.TexExpr.remove",
"TexSoup/data.py::TexSoup.data.TexExpr.string",
"TexSoup/data.py::TexSoup.data.TexGroup.parse",
"TexSoup/data.py::TexSoup.data.TexNamedEnv",
"TexSoup/data.py::TexSoup.data.TexNode.__iter__",
"TexSoup/data.py::TexSoup.data.TexNode.__match__",
"TexSoup/data.py::TexSoup.data.TexNode.all",
"TexSoup/data.py::TexSoup.data.TexNode.append",
"TexSoup/data.py::TexSoup.data.TexNode.args",
"TexSoup/data.py::TexSoup.data.TexNode.char_pos_to_line",
"TexSoup/data.py::TexSoup.data.TexNode.children",
"TexSoup/data.py::TexSoup.data.TexNode.contents",
"TexSoup/data.py::TexSoup.data.TexNode.copy",
"TexSoup/data.py::TexSoup.data.TexNode.count",
"TexSoup/data.py::TexSoup.data.TexNode.delete",
"TexSoup/data.py::TexSoup.data.TexNode.descendants",
"TexSoup/data.py::TexSoup.data.TexNode.find",
"TexSoup/data.py::TexSoup.data.TexNode.find_all",
"TexSoup/data.py::TexSoup.data.TexNode.insert",
"TexSoup/data.py::TexSoup.data.TexNode.name",
"TexSoup/data.py::TexSoup.data.TexNode.remove",
"TexSoup/data.py::TexSoup.data.TexNode.replace",
"TexSoup/data.py::TexSoup.data.TexNode.replace_with",
"TexSoup/data.py::TexSoup.data.TexNode.string",
"TexSoup/data.py::TexSoup.data.TexNode.text",
"TexSoup/data.py::TexSoup.data.TexText",
"TexSoup/data.py::TexSoup.data.TexText.__contains__",
"TexSoup/data.py::TexSoup.data.TexText.__eq__",
"TexSoup/data.py::TexSoup.data.TexText.__repr__",
"TexSoup/data.py::TexSoup.data.TexText.__str__",
"TexSoup/reader.py::TexSoup.reader.make_read_peek",
"TexSoup/reader.py::TexSoup.reader.read_arg",
"TexSoup/reader.py::TexSoup.reader.read_arg_required",
"TexSoup/reader.py::TexSoup.reader.read_args",
"TexSoup/reader.py::TexSoup.reader.read_command",
"TexSoup/reader.py::TexSoup.reader.read_env",
"TexSoup/reader.py::TexSoup.reader.read_item",
"TexSoup/reader.py::TexSoup.reader.read_math_env",
"TexSoup/reader.py::TexSoup.reader.read_skip_env",
"TexSoup/reader.py::TexSoup.reader.read_spacer",
"TexSoup/tokens.py::TexSoup.tokens.next_token",
"TexSoup/tokens.py::TexSoup.tokens.tokenize",
"TexSoup/tokens.py::TexSoup.tokens.tokenize_command_name",
"TexSoup/tokens.py::TexSoup.tokens.tokenize_escaped_symbols",
"TexSoup/tokens.py::TexSoup.tokens.tokenize_ignore",
"TexSoup/tokens.py::TexSoup.tokens.tokenize_line_break",
"TexSoup/tokens.py::TexSoup.tokens.tokenize_line_comment",
"TexSoup/tokens.py::TexSoup.tokens.tokenize_math_asym_switch",
"TexSoup/tokens.py::TexSoup.tokens.tokenize_math_sym_switch",
"TexSoup/tokens.py::TexSoup.tokens.tokenize_spacers",
"TexSoup/tokens.py::TexSoup.tokens.tokenize_string",
"TexSoup/tokens.py::TexSoup.tokens.tokenize_symbols",
"TexSoup/utils.py::TexSoup.utils.Buffer",
"TexSoup/utils.py::TexSoup.utils.Buffer.__getitem__",
"TexSoup/utils.py::TexSoup.utils.Buffer.backward",
"TexSoup/utils.py::TexSoup.utils.Buffer.forward",
"TexSoup/utils.py::TexSoup.utils.Buffer.forward_until",
"TexSoup/utils.py::TexSoup.utils.CharToLineOffset",
"TexSoup/utils.py::TexSoup.utils.MixedBuffer.__init__",
"TexSoup/utils.py::TexSoup.utils.Token.__add__",
"TexSoup/utils.py::TexSoup.utils.Token.__contains__",
"TexSoup/utils.py::TexSoup.utils.Token.__eq__",
"TexSoup/utils.py::TexSoup.utils.Token.__getitem__",
"TexSoup/utils.py::TexSoup.utils.Token.__hash__",
"TexSoup/utils.py::TexSoup.utils.Token.__iadd__",
"TexSoup/utils.py::TexSoup.utils.Token.__iter__",
"TexSoup/utils.py::TexSoup.utils.Token.__radd__",
"TexSoup/utils.py::TexSoup.utils.Token.lstrip",
"TexSoup/utils.py::TexSoup.utils.Token.rstrip",
"TexSoup/utils.py::TexSoup.utils.to_list",
"tests/test_parser.py::test_commands_only",
"tests/test_parser.py::test_commands_envs_only",
"tests/test_parser.py::test_commands_envs_text",
"tests/test_parser.py::test_text_preserved",
"tests/test_parser.py::test_command_name_parse",
"tests/test_parser.py::test_command_env_name_parse",
"tests/test_parser.py::test_commands_without_arguments",
"tests/test_parser.py::test_unlabeled_environment",
"tests/test_parser.py::test_ignore_environment",
"tests/test_parser.py::test_inline_math",
"tests/test_parser.py::test_escaped_characters",
"tests/test_parser.py::test_math_environment_weirdness",
"tests/test_parser.py::test_tokenize_punctuation_command_names",
"tests/test_parser.py::test_item_parsing",
"tests/test_parser.py::test_item_argument_parsing",
"tests/test_parser.py::test_comment_escaping",
"tests/test_parser.py::test_comment_unparsed",
"tests/test_parser.py::test_comment_after_escape",
"tests/test_parser.py::test_items_with_labels",
"tests/test_parser.py::test_multiline_args",
"tests/test_parser.py::test_nested_commands",
"tests/test_parser.py::test_def_item",
"tests/test_parser.py::test_def_without_braces",
"tests/test_parser.py::test_grouping_optional_argument",
"tests/test_parser.py::test_basic_whitespace",
"tests/test_parser.py::test_whitespace_in_command",
"tests/test_parser.py::test_math_environment_whitespace",
"tests/test_parser.py::test_non_letter_commands",
"tests/test_parser.py::test_math_environment_escape",
"tests/test_parser.py::test_punctuation_command_structure",
"tests/test_parser.py::test_non_punctuation_command_structure",
"tests/test_parser.py::test_allow_unclosed_non_curly_braces",
"tests/test_parser.py::test_buffer",
"tests/test_parser.py::test_to_buffer",
"tests/test_parser.py::test_unclosed_commands",
"tests/test_parser.py::test_unclosed_environments",
"tests/test_parser.py::test_unclosed_math_environments",
"tests/test_parser.py::test_arg_parse",
"tests/test_parser.py::test_tolerance_env_unclosed"
]
| {
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
} | 2023-02-17 20:14:35+00:00 | bsd-2-clause | 1,065 |
|
alvinwan__TexSoup-140 | diff --git a/TexSoup/reader.py b/TexSoup/reader.py
index fd53989..ba92aaa 100644
--- a/TexSoup/reader.py
+++ b/TexSoup/reader.py
@@ -31,7 +31,11 @@ SIGNATURES = {
'textbf': (1, 0),
'section': (1, 1),
'label': (1, 0),
+ 'cap': (0, 0),
'cup': (0, 0),
+ 'in': (0, 0),
+ 'notin': (0, 0),
+ 'infty': (0, 0),
'noindent': (0, 0),
}
@@ -278,7 +282,7 @@ def read_env(src, expr, skip_envs=(), tolerance=0, mode=MODE_NON_MATH):
while src.hasNext():
if src.peek().category == TC.Escape:
name, args = make_read_peek(read_command)(
- src, 1, skip=1, tolerance=tolerance, mode=mode)
+ src, skip=1, tolerance=tolerance, mode=mode)
if name == 'end':
break
contents.append(read_expr(src, skip_envs=skip_envs, tolerance=tolerance, mode=mode))
diff --git a/TexSoup/tokens.py b/TexSoup/tokens.py
index 492c50e..50e97e2 100644
--- a/TexSoup/tokens.py
+++ b/TexSoup/tokens.py
@@ -123,8 +123,8 @@ def tokenize_escaped_symbols(text, prev=None):
and text.peek(1) \
and text.peek(1).category in (
CC.Escape, CC.GroupBegin, CC.GroupEnd, CC.MathSwitch,
- CC.Alignment, CC.Macro, CC.Superscript, CC.Subscript,
- CC.Spacer, CC.Active, CC.Comment, CC.Other):
+ CC.Alignment, CC.EndOfLine, CC.Macro, CC.Superscript,
+ CC.Subscript, CC.Spacer, CC.Active, CC.Comment, CC.Other):
result = text.forward(2)
result.category = TC.EscapedComment
return result
| alvinwan/TexSoup | 19451a322a51e12fd30a9a391301aa31b937e9e2 | diff --git a/tests/test_parser.py b/tests/test_parser.py
index 60ea596..e958c58 100644
--- a/tests/test_parser.py
+++ b/tests/test_parser.py
@@ -195,6 +195,13 @@ def test_escaped_characters():
assert '\\$4-\\$5' in str(soup), 'Escaped characters not properly rendered.'
+def test_newline_after_backslash():
+ """Tests that newlines after backslashes are preserved."""
+ text = 'a\\\nb'
+ soup = TexSoup(text)
+ assert str(soup) == text
+
+
def test_math_environment_weirdness():
"""Tests that math environment interacts correctly with other envs."""
soup = TexSoup(r"""\begin{a} \end{a}$ b$""")
@@ -385,7 +392,7 @@ def test_def_item():
def test_def_without_braces():
- """Tests that def without braces around the new command parses correctly"""
+ """Tests that def without braces around the new command parses correctly."""
soup = TexSoup(r"\def\acommandname{replacement text}")
assert len(soup.find("def").args) == 2
assert str(soup.find("def").args[0]) == r"\acommandname"
@@ -398,6 +405,19 @@ def test_grouping_optional_argument():
assert len(soup.Theorem.args) == 1
+def test_zero_argument_signatures():
+ """Tests that specific commands that do not take arguments are parsed correctly."""
+ soup = TexSoup(r"$\cap[\cup[\in[\notin[\infty[$")
+ assert len(soup.find("cap").args) == 0
+ assert len(soup.find("cup").args) == 0
+ assert len(soup.find("in").args) == 0
+ assert len(soup.find("notin").args) == 0
+ assert len(soup.find("infty").args) == 0
+
+ soup = TexSoup(r"\begin{equation} \cup [0, \infty) \end{equation}")
+ assert len(soup.find("cup").args) == 0
+
+
##############
# FORMATTING #
##############
| Newlines after backslashes are not parsed correctly
For example:
```python
>>> import TexSoup
>>> text = 'a\\\nb'
>>> print(text)
a\
b
>>> soup = TexSoup.TexSoup(text)
>>> soup
a\b
```
The newline is gone, which has of course changed the meaning of the text, so running it through `TexSoup` again gives a different result:
```python
>>> TexSoup.TexSoup(soup)
a
``` | 0.0 | 19451a322a51e12fd30a9a391301aa31b937e9e2 | [
"tests/test_parser.py::test_newline_after_backslash",
"tests/test_parser.py::test_zero_argument_signatures"
]
| [
"TexSoup/__init__.py::TexSoup.TexSoup",
"TexSoup/category.py::TexSoup.category.categorize",
"TexSoup/data.py::TexSoup.data.TexArgs",
"TexSoup/data.py::TexSoup.data.TexArgs.__contains__",
"TexSoup/data.py::TexSoup.data.TexArgs.__getitem__",
"TexSoup/data.py::TexSoup.data.TexArgs.__repr__",
"TexSoup/data.py::TexSoup.data.TexArgs.__str__",
"TexSoup/data.py::TexSoup.data.TexArgs.append",
"TexSoup/data.py::TexSoup.data.TexArgs.clear",
"TexSoup/data.py::TexSoup.data.TexArgs.extend",
"TexSoup/data.py::TexSoup.data.TexArgs.insert",
"TexSoup/data.py::TexSoup.data.TexArgs.pop",
"TexSoup/data.py::TexSoup.data.TexArgs.remove",
"TexSoup/data.py::TexSoup.data.TexArgs.reverse",
"TexSoup/data.py::TexSoup.data.TexCmd",
"TexSoup/data.py::TexSoup.data.TexEnv",
"TexSoup/data.py::TexSoup.data.TexEnv.__init__",
"TexSoup/data.py::TexSoup.data.TexExpr.__eq__",
"TexSoup/data.py::TexSoup.data.TexExpr.all",
"TexSoup/data.py::TexSoup.data.TexExpr.append",
"TexSoup/data.py::TexSoup.data.TexExpr.contents",
"TexSoup/data.py::TexSoup.data.TexExpr.insert",
"TexSoup/data.py::TexSoup.data.TexExpr.remove",
"TexSoup/data.py::TexSoup.data.TexExpr.string",
"TexSoup/data.py::TexSoup.data.TexGroup.parse",
"TexSoup/data.py::TexSoup.data.TexNamedEnv",
"TexSoup/data.py::TexSoup.data.TexNode.__iter__",
"TexSoup/data.py::TexSoup.data.TexNode.__match__",
"TexSoup/data.py::TexSoup.data.TexNode.all",
"TexSoup/data.py::TexSoup.data.TexNode.append",
"TexSoup/data.py::TexSoup.data.TexNode.args",
"TexSoup/data.py::TexSoup.data.TexNode.char_pos_to_line",
"TexSoup/data.py::TexSoup.data.TexNode.children",
"TexSoup/data.py::TexSoup.data.TexNode.contents",
"TexSoup/data.py::TexSoup.data.TexNode.copy",
"TexSoup/data.py::TexSoup.data.TexNode.count",
"TexSoup/data.py::TexSoup.data.TexNode.delete",
"TexSoup/data.py::TexSoup.data.TexNode.descendants",
"TexSoup/data.py::TexSoup.data.TexNode.find",
"TexSoup/data.py::TexSoup.data.TexNode.find_all",
"TexSoup/data.py::TexSoup.data.TexNode.insert",
"TexSoup/data.py::TexSoup.data.TexNode.name",
"TexSoup/data.py::TexSoup.data.TexNode.remove",
"TexSoup/data.py::TexSoup.data.TexNode.replace",
"TexSoup/data.py::TexSoup.data.TexNode.replace_with",
"TexSoup/data.py::TexSoup.data.TexNode.string",
"TexSoup/data.py::TexSoup.data.TexNode.text",
"TexSoup/data.py::TexSoup.data.TexText",
"TexSoup/data.py::TexSoup.data.TexText.__contains__",
"TexSoup/data.py::TexSoup.data.TexText.__eq__",
"TexSoup/data.py::TexSoup.data.TexText.__repr__",
"TexSoup/data.py::TexSoup.data.TexText.__str__",
"TexSoup/reader.py::TexSoup.reader.make_read_peek",
"TexSoup/reader.py::TexSoup.reader.read_arg",
"TexSoup/reader.py::TexSoup.reader.read_arg_required",
"TexSoup/reader.py::TexSoup.reader.read_args",
"TexSoup/reader.py::TexSoup.reader.read_command",
"TexSoup/reader.py::TexSoup.reader.read_env",
"TexSoup/reader.py::TexSoup.reader.read_item",
"TexSoup/reader.py::TexSoup.reader.read_math_env",
"TexSoup/reader.py::TexSoup.reader.read_skip_env",
"TexSoup/reader.py::TexSoup.reader.read_spacer",
"TexSoup/tokens.py::TexSoup.tokens.next_token",
"TexSoup/tokens.py::TexSoup.tokens.tokenize",
"TexSoup/tokens.py::TexSoup.tokens.tokenize_command_name",
"TexSoup/tokens.py::TexSoup.tokens.tokenize_escaped_symbols",
"TexSoup/tokens.py::TexSoup.tokens.tokenize_ignore",
"TexSoup/tokens.py::TexSoup.tokens.tokenize_line_break",
"TexSoup/tokens.py::TexSoup.tokens.tokenize_line_comment",
"TexSoup/tokens.py::TexSoup.tokens.tokenize_math_asym_switch",
"TexSoup/tokens.py::TexSoup.tokens.tokenize_math_sym_switch",
"TexSoup/tokens.py::TexSoup.tokens.tokenize_spacers",
"TexSoup/tokens.py::TexSoup.tokens.tokenize_string",
"TexSoup/tokens.py::TexSoup.tokens.tokenize_symbols",
"TexSoup/utils.py::TexSoup.utils.Buffer",
"TexSoup/utils.py::TexSoup.utils.Buffer.__getitem__",
"TexSoup/utils.py::TexSoup.utils.Buffer.backward",
"TexSoup/utils.py::TexSoup.utils.Buffer.forward",
"TexSoup/utils.py::TexSoup.utils.Buffer.forward_until",
"TexSoup/utils.py::TexSoup.utils.CharToLineOffset",
"TexSoup/utils.py::TexSoup.utils.MixedBuffer.__init__",
"TexSoup/utils.py::TexSoup.utils.Token.__add__",
"TexSoup/utils.py::TexSoup.utils.Token.__contains__",
"TexSoup/utils.py::TexSoup.utils.Token.__eq__",
"TexSoup/utils.py::TexSoup.utils.Token.__getitem__",
"TexSoup/utils.py::TexSoup.utils.Token.__hash__",
"TexSoup/utils.py::TexSoup.utils.Token.__iadd__",
"TexSoup/utils.py::TexSoup.utils.Token.__iter__",
"TexSoup/utils.py::TexSoup.utils.Token.__radd__",
"TexSoup/utils.py::TexSoup.utils.Token.lstrip",
"TexSoup/utils.py::TexSoup.utils.Token.rstrip",
"TexSoup/utils.py::TexSoup.utils.to_list",
"tests/test_parser.py::test_commands_only",
"tests/test_parser.py::test_commands_envs_only",
"tests/test_parser.py::test_commands_envs_text",
"tests/test_parser.py::test_text_preserved",
"tests/test_parser.py::test_command_name_parse",
"tests/test_parser.py::test_command_env_name_parse",
"tests/test_parser.py::test_commands_without_arguments",
"tests/test_parser.py::test_unlabeled_environment",
"tests/test_parser.py::test_ignore_environment",
"tests/test_parser.py::test_inline_math",
"tests/test_parser.py::test_escaped_characters",
"tests/test_parser.py::test_math_environment_weirdness",
"tests/test_parser.py::test_tokenize_punctuation_command_names",
"tests/test_parser.py::test_item_parsing",
"tests/test_parser.py::test_item_argument_parsing",
"tests/test_parser.py::test_comment_escaping",
"tests/test_parser.py::test_comment_unparsed",
"tests/test_parser.py::test_comment_after_escape",
"tests/test_parser.py::test_items_with_labels",
"tests/test_parser.py::test_multiline_args",
"tests/test_parser.py::test_nested_commands",
"tests/test_parser.py::test_def_item",
"tests/test_parser.py::test_def_without_braces",
"tests/test_parser.py::test_grouping_optional_argument",
"tests/test_parser.py::test_basic_whitespace",
"tests/test_parser.py::test_whitespace_in_command",
"tests/test_parser.py::test_math_environment_whitespace",
"tests/test_parser.py::test_non_letter_commands",
"tests/test_parser.py::test_math_environment_escape",
"tests/test_parser.py::test_punctuation_command_structure",
"tests/test_parser.py::test_non_punctuation_command_structure",
"tests/test_parser.py::test_allow_unclosed_non_curly_braces",
"tests/test_parser.py::test_buffer",
"tests/test_parser.py::test_to_buffer",
"tests/test_parser.py::test_unclosed_commands",
"tests/test_parser.py::test_unclosed_environments",
"tests/test_parser.py::test_unclosed_math_environments",
"tests/test_parser.py::test_arg_parse",
"tests/test_parser.py::test_tolerance_env_unclosed"
]
| {
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
} | 2023-02-23 18:11:23+00:00 | bsd-2-clause | 1,066 |
|
alvinwan__TexSoup-141 | diff --git a/TexSoup/data.py b/TexSoup/data.py
index 58cd070..8e04c24 100644
--- a/TexSoup/data.py
+++ b/TexSoup/data.py
@@ -593,10 +593,15 @@ class TexNode(object):
\item Bye
\end{itemize}
"""
+ for arg in self.expr.args:
+ if child.expr in arg._contents:
+ arg.insert(arg.remove(child.expr), *nodes)
+ return
self.expr.insert(
self.expr.remove(child.expr),
*nodes)
+
def search_regex(self, pattern):
for node in self.text:
for match in re.finditer(pattern, node):
@@ -1082,7 +1087,7 @@ class TexCmd(TexExpr):
def _assert_supports_contents(self):
if not self._supports_contents():
raise TypeError(
- 'Command "{}" has no children. `add_contents` is only valid'
+ 'Command "{}" has no children. `add_contents` is only valid '
'for: 1. environments like `itemize` and 2. `\\item`. '
'Alternatively, you can add, edit, or delete arguments by '
'modifying `.args`, which behaves like a list.'
diff --git a/TexSoup/reader.py b/TexSoup/reader.py
index fd53989..57bd38e 100644
--- a/TexSoup/reader.py
+++ b/TexSoup/reader.py
@@ -31,7 +31,11 @@ SIGNATURES = {
'textbf': (1, 0),
'section': (1, 1),
'label': (1, 0),
+ 'cap': (0, 0),
'cup': (0, 0),
+ 'in': (0, 0),
+ 'notin': (0, 0),
+ 'infty': (0, 0),
'noindent': (0, 0),
}
@@ -97,7 +101,7 @@ def read_expr(src, skip_envs=(), tolerance=0, mode=MODE_NON_MATH):
elif c.category == TC.Escape:
name, args = read_command(src, tolerance=tolerance, mode=mode)
if name == 'item':
- assert mode != MODE_MATH, 'Command \item invalid in math mode.'
+ assert mode != MODE_MATH, r'Command \item invalid in math mode.'
contents = read_item(src)
expr = TexCmd(name, contents, args, position=c.position)
elif name == 'begin':
@@ -278,7 +282,7 @@ def read_env(src, expr, skip_envs=(), tolerance=0, mode=MODE_NON_MATH):
while src.hasNext():
if src.peek().category == TC.Escape:
name, args = make_read_peek(read_command)(
- src, 1, skip=1, tolerance=tolerance, mode=mode)
+ src, skip=1, tolerance=tolerance, mode=mode)
if name == 'end':
break
contents.append(read_expr(src, skip_envs=skip_envs, tolerance=tolerance, mode=mode))
diff --git a/TexSoup/tokens.py b/TexSoup/tokens.py
index 492c50e..50e97e2 100644
--- a/TexSoup/tokens.py
+++ b/TexSoup/tokens.py
@@ -123,8 +123,8 @@ def tokenize_escaped_symbols(text, prev=None):
and text.peek(1) \
and text.peek(1).category in (
CC.Escape, CC.GroupBegin, CC.GroupEnd, CC.MathSwitch,
- CC.Alignment, CC.Macro, CC.Superscript, CC.Subscript,
- CC.Spacer, CC.Active, CC.Comment, CC.Other):
+ CC.Alignment, CC.EndOfLine, CC.Macro, CC.Superscript,
+ CC.Subscript, CC.Spacer, CC.Active, CC.Comment, CC.Other):
result = text.forward(2)
result.category = TC.EscapedComment
return result
| alvinwan/TexSoup | 19451a322a51e12fd30a9a391301aa31b937e9e2 | diff --git a/tests/test_api.py b/tests/test_api.py
index 5c95831..7eb44fb 100644
--- a/tests/test_api.py
+++ b/tests/test_api.py
@@ -129,6 +129,13 @@ def test_replace_multiple(chikin):
assert len(list(chikin.find_all('subsection'))) == 5
+def test_replace_in_args():
+ """Replace an element in an argument"""
+ soup = TexSoup(r'\Fig{\ref{a_label}}')
+ soup.ref.replace_with('2')
+ assert str(soup) == r'\Fig{2}'
+
+
def test_append(chikin):
"""Add a child to the parse tree"""
chikin.itemize.append('asdfghjkl')
diff --git a/tests/test_parser.py b/tests/test_parser.py
index 60ea596..e958c58 100644
--- a/tests/test_parser.py
+++ b/tests/test_parser.py
@@ -195,6 +195,13 @@ def test_escaped_characters():
assert '\\$4-\\$5' in str(soup), 'Escaped characters not properly rendered.'
+def test_newline_after_backslash():
+ """Tests that newlines after backslashes are preserved."""
+ text = 'a\\\nb'
+ soup = TexSoup(text)
+ assert str(soup) == text
+
+
def test_math_environment_weirdness():
"""Tests that math environment interacts correctly with other envs."""
soup = TexSoup(r"""\begin{a} \end{a}$ b$""")
@@ -385,7 +392,7 @@ def test_def_item():
def test_def_without_braces():
- """Tests that def without braces around the new command parses correctly"""
+ """Tests that def without braces around the new command parses correctly."""
soup = TexSoup(r"\def\acommandname{replacement text}")
assert len(soup.find("def").args) == 2
assert str(soup.find("def").args[0]) == r"\acommandname"
@@ -398,6 +405,19 @@ def test_grouping_optional_argument():
assert len(soup.Theorem.args) == 1
+def test_zero_argument_signatures():
+ """Tests that specific commands that do not take arguments are parsed correctly."""
+ soup = TexSoup(r"$\cap[\cup[\in[\notin[\infty[$")
+ assert len(soup.find("cap").args) == 0
+ assert len(soup.find("cup").args) == 0
+ assert len(soup.find("in").args) == 0
+ assert len(soup.find("notin").args) == 0
+ assert len(soup.find("infty").args) == 0
+
+ soup = TexSoup(r"\begin{equation} \cup [0, \infty) \end{equation}")
+ assert len(soup.find("cup").args) == 0
+
+
##############
# FORMATTING #
##############
| replace_with does not work for arguments of a node
```python3
soup = TexSoup(r"\somecommand{\anothercommand}")
some_obj = TexNode(TexText("new text"))
soup.somecommand.anothercommand.replace_with(some_obj)
```
Gives
```python3
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/wilco/.cache/pypoetry/virtualenvs/viaduct-CDUJVlHg-py3.8/lib/python3.8/site-packages/TexSoup/data.py", line 573, in replace_with
self.parent.replace(self, *nodes)
File "/home/wilco/.cache/pypoetry/virtualenvs/viaduct-CDUJVlHg-py3.8/lib/python3.8/site-packages/TexSoup/data.py", line 597, in replace
self.expr.remove(child.expr),
File "/home/wilco/.cache/pypoetry/virtualenvs/viaduct-CDUJVlHg-py3.8/lib/python3.8/site-packages/TexSoup/data.py", line 838, in remove
self._assert_supports_contents()
File "/home/wilco/.cache/pypoetry/virtualenvs/viaduct-CDUJVlHg-py3.8/lib/python3.8/site-packages/TexSoup/data.py", line 1084, in _assert_supports_contents
raise TypeError(
TypeError: Command "somecommand" has no children. `add_contents` is only validfor: 1. environments like `itemize` and 2. `\item`. Alternatively, you can add, edit, or delete arguments by modifying `.args`, which behaves like a list.
```
The `replace_with` method tries to replace a child of 'somecommand' but we are dealing with an argument instead. The documentation does not mention this limitation.
In my use case I am parsing all `newcommand`s, then replacing these commands with their values. I.e.
```latex
\newcommand{\test}{Just a test}
\stuff{\test}
```
Should be converted to
```latex
\newcommand{\test}{Just a test}
\stuff{Just a test}
```
In this case I do not know I am operating on an argument of a node instead of a child of this node. Is there a workaround? | 0.0 | 19451a322a51e12fd30a9a391301aa31b937e9e2 | [
"tests/test_api.py::test_replace_in_args",
"tests/test_parser.py::test_newline_after_backslash",
"tests/test_parser.py::test_zero_argument_signatures"
]
| [
"TexSoup/__init__.py::TexSoup.TexSoup",
"TexSoup/category.py::TexSoup.category.categorize",
"TexSoup/data.py::TexSoup.data.TexArgs",
"TexSoup/data.py::TexSoup.data.TexArgs.__contains__",
"TexSoup/data.py::TexSoup.data.TexArgs.__getitem__",
"TexSoup/data.py::TexSoup.data.TexArgs.__repr__",
"TexSoup/data.py::TexSoup.data.TexArgs.__str__",
"TexSoup/data.py::TexSoup.data.TexArgs.append",
"TexSoup/data.py::TexSoup.data.TexArgs.clear",
"TexSoup/data.py::TexSoup.data.TexArgs.extend",
"TexSoup/data.py::TexSoup.data.TexArgs.insert",
"TexSoup/data.py::TexSoup.data.TexArgs.pop",
"TexSoup/data.py::TexSoup.data.TexArgs.remove",
"TexSoup/data.py::TexSoup.data.TexArgs.reverse",
"TexSoup/data.py::TexSoup.data.TexCmd",
"TexSoup/data.py::TexSoup.data.TexEnv",
"TexSoup/data.py::TexSoup.data.TexEnv.__init__",
"TexSoup/data.py::TexSoup.data.TexExpr.__eq__",
"TexSoup/data.py::TexSoup.data.TexExpr.all",
"TexSoup/data.py::TexSoup.data.TexExpr.append",
"TexSoup/data.py::TexSoup.data.TexExpr.contents",
"TexSoup/data.py::TexSoup.data.TexExpr.insert",
"TexSoup/data.py::TexSoup.data.TexExpr.remove",
"TexSoup/data.py::TexSoup.data.TexExpr.string",
"TexSoup/data.py::TexSoup.data.TexGroup.parse",
"TexSoup/data.py::TexSoup.data.TexNamedEnv",
"TexSoup/data.py::TexSoup.data.TexNode.__iter__",
"TexSoup/data.py::TexSoup.data.TexNode.__match__",
"TexSoup/data.py::TexSoup.data.TexNode.all",
"TexSoup/data.py::TexSoup.data.TexNode.append",
"TexSoup/data.py::TexSoup.data.TexNode.args",
"TexSoup/data.py::TexSoup.data.TexNode.char_pos_to_line",
"TexSoup/data.py::TexSoup.data.TexNode.children",
"TexSoup/data.py::TexSoup.data.TexNode.contents",
"TexSoup/data.py::TexSoup.data.TexNode.copy",
"TexSoup/data.py::TexSoup.data.TexNode.count",
"TexSoup/data.py::TexSoup.data.TexNode.delete",
"TexSoup/data.py::TexSoup.data.TexNode.descendants",
"TexSoup/data.py::TexSoup.data.TexNode.find",
"TexSoup/data.py::TexSoup.data.TexNode.find_all",
"TexSoup/data.py::TexSoup.data.TexNode.insert",
"TexSoup/data.py::TexSoup.data.TexNode.name",
"TexSoup/data.py::TexSoup.data.TexNode.remove",
"TexSoup/data.py::TexSoup.data.TexNode.replace",
"TexSoup/data.py::TexSoup.data.TexNode.replace_with",
"TexSoup/data.py::TexSoup.data.TexNode.string",
"TexSoup/data.py::TexSoup.data.TexNode.text",
"TexSoup/data.py::TexSoup.data.TexText",
"TexSoup/data.py::TexSoup.data.TexText.__contains__",
"TexSoup/data.py::TexSoup.data.TexText.__eq__",
"TexSoup/data.py::TexSoup.data.TexText.__repr__",
"TexSoup/data.py::TexSoup.data.TexText.__str__",
"TexSoup/reader.py::TexSoup.reader.make_read_peek",
"TexSoup/reader.py::TexSoup.reader.read_arg",
"TexSoup/reader.py::TexSoup.reader.read_arg_required",
"TexSoup/reader.py::TexSoup.reader.read_args",
"TexSoup/reader.py::TexSoup.reader.read_command",
"TexSoup/reader.py::TexSoup.reader.read_env",
"TexSoup/reader.py::TexSoup.reader.read_item",
"TexSoup/reader.py::TexSoup.reader.read_math_env",
"TexSoup/reader.py::TexSoup.reader.read_skip_env",
"TexSoup/reader.py::TexSoup.reader.read_spacer",
"TexSoup/tokens.py::TexSoup.tokens.next_token",
"TexSoup/tokens.py::TexSoup.tokens.tokenize",
"TexSoup/tokens.py::TexSoup.tokens.tokenize_command_name",
"TexSoup/tokens.py::TexSoup.tokens.tokenize_escaped_symbols",
"TexSoup/tokens.py::TexSoup.tokens.tokenize_ignore",
"TexSoup/tokens.py::TexSoup.tokens.tokenize_line_break",
"TexSoup/tokens.py::TexSoup.tokens.tokenize_line_comment",
"TexSoup/tokens.py::TexSoup.tokens.tokenize_math_asym_switch",
"TexSoup/tokens.py::TexSoup.tokens.tokenize_math_sym_switch",
"TexSoup/tokens.py::TexSoup.tokens.tokenize_spacers",
"TexSoup/tokens.py::TexSoup.tokens.tokenize_string",
"TexSoup/tokens.py::TexSoup.tokens.tokenize_symbols",
"TexSoup/utils.py::TexSoup.utils.Buffer",
"TexSoup/utils.py::TexSoup.utils.Buffer.__getitem__",
"TexSoup/utils.py::TexSoup.utils.Buffer.backward",
"TexSoup/utils.py::TexSoup.utils.Buffer.forward",
"TexSoup/utils.py::TexSoup.utils.Buffer.forward_until",
"TexSoup/utils.py::TexSoup.utils.CharToLineOffset",
"TexSoup/utils.py::TexSoup.utils.MixedBuffer.__init__",
"TexSoup/utils.py::TexSoup.utils.Token.__add__",
"TexSoup/utils.py::TexSoup.utils.Token.__contains__",
"TexSoup/utils.py::TexSoup.utils.Token.__eq__",
"TexSoup/utils.py::TexSoup.utils.Token.__getitem__",
"TexSoup/utils.py::TexSoup.utils.Token.__hash__",
"TexSoup/utils.py::TexSoup.utils.Token.__iadd__",
"TexSoup/utils.py::TexSoup.utils.Token.__iter__",
"TexSoup/utils.py::TexSoup.utils.Token.__radd__",
"TexSoup/utils.py::TexSoup.utils.Token.lstrip",
"TexSoup/utils.py::TexSoup.utils.Token.rstrip",
"TexSoup/utils.py::TexSoup.utils.to_list",
"tests/test_api.py::test_navigation_attributes",
"tests/test_api.py::test_navigation_parent",
"tests/test_api.py::test_navigation_children",
"tests/test_api.py::test_navigation_descendants",
"tests/test_api.py::test_navigation_positions",
"tests/test_api.py::test_find_basic",
"tests/test_api.py::test_find_by_command",
"tests/test_api.py::test_find_env",
"tests/test_api.py::test_delete",
"tests/test_api.py::test_delete_arg",
"tests/test_api.py::test_delete_token",
"tests/test_api.py::test_replace_single",
"tests/test_api.py::test_replace_multiple",
"tests/test_api.py::test_append",
"tests/test_api.py::test_insert",
"tests/test_api.py::test_change_string",
"tests/test_api.py::test_change_name",
"tests/test_api.py::test_access_position",
"tests/test_api.py::test_math_env_change",
"tests/test_api.py::test_text",
"tests/test_api.py::test_search_regex",
"tests/test_api.py::test_search_regex_precompiled_pattern",
"tests/test_api.py::test_skip_envs",
"tests/test_parser.py::test_commands_only",
"tests/test_parser.py::test_commands_envs_only",
"tests/test_parser.py::test_commands_envs_text",
"tests/test_parser.py::test_text_preserved",
"tests/test_parser.py::test_command_name_parse",
"tests/test_parser.py::test_command_env_name_parse",
"tests/test_parser.py::test_commands_without_arguments",
"tests/test_parser.py::test_unlabeled_environment",
"tests/test_parser.py::test_ignore_environment",
"tests/test_parser.py::test_inline_math",
"tests/test_parser.py::test_escaped_characters",
"tests/test_parser.py::test_math_environment_weirdness",
"tests/test_parser.py::test_tokenize_punctuation_command_names",
"tests/test_parser.py::test_item_parsing",
"tests/test_parser.py::test_item_argument_parsing",
"tests/test_parser.py::test_comment_escaping",
"tests/test_parser.py::test_comment_unparsed",
"tests/test_parser.py::test_comment_after_escape",
"tests/test_parser.py::test_items_with_labels",
"tests/test_parser.py::test_multiline_args",
"tests/test_parser.py::test_nested_commands",
"tests/test_parser.py::test_def_item",
"tests/test_parser.py::test_def_without_braces",
"tests/test_parser.py::test_grouping_optional_argument",
"tests/test_parser.py::test_basic_whitespace",
"tests/test_parser.py::test_whitespace_in_command",
"tests/test_parser.py::test_math_environment_whitespace",
"tests/test_parser.py::test_non_letter_commands",
"tests/test_parser.py::test_math_environment_escape",
"tests/test_parser.py::test_punctuation_command_structure",
"tests/test_parser.py::test_non_punctuation_command_structure",
"tests/test_parser.py::test_allow_unclosed_non_curly_braces",
"tests/test_parser.py::test_buffer",
"tests/test_parser.py::test_to_buffer",
"tests/test_parser.py::test_unclosed_commands",
"tests/test_parser.py::test_unclosed_environments",
"tests/test_parser.py::test_unclosed_math_environments",
"tests/test_parser.py::test_arg_parse",
"tests/test_parser.py::test_tolerance_env_unclosed"
]
| {
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2023-02-24 10:57:39+00:00 | bsd-2-clause | 1,067 |
|
alvinwan__TexSoup-92 | diff --git a/TexSoup/__init__.py b/TexSoup/__init__.py
index cd8782a..9e622c0 100644
--- a/TexSoup/__init__.py
+++ b/TexSoup/__init__.py
@@ -9,7 +9,7 @@ from TexSoup.tex import *
# noinspection PyPep8Naming
-def TexSoup(tex_code):
+def TexSoup(tex_code, skip_envs=()):
r"""
At a high-level, parses provided Tex into a navigable, searchable structure.
This is accomplished in two steps:
@@ -18,6 +18,7 @@ def TexSoup(tex_code):
2. Structure fed to TexNodes for a searchable, coder-friendly interface.
:param Union[str,iterable] tex_code: the Tex source
+ :param Union[str] skip_envs: names of environments to skip parsing
:return: :class:`TexSoup.data.TexNode` object representing tex document
>>> from TexSoup import TexSoup
@@ -80,5 +81,5 @@ def TexSoup(tex_code):
>>> soup
SOUP
"""
- parsed, src = read(tex_code)
+ parsed, src = read(tex_code, skip_envs=skip_envs)
return TexNode(parsed, src=src)
diff --git a/TexSoup/reader.py b/TexSoup/reader.py
index 6dc4a99..0b9c182 100644
--- a/TexSoup/reader.py
+++ b/TexSoup/reader.py
@@ -224,7 +224,7 @@ def tokenize_string(text, delimiters=None):
##########
-def read_tex(src):
+def read_tex(src, skip_envs=()):
r"""Read next expression from buffer
:param Buffer src: a buffer of tokens
@@ -262,7 +262,7 @@ def read_tex(src):
expr.args = read_args(src, expr.args)
if mode == 'begin':
- read_env(src, expr)
+ read_env(src, expr, skip_envs=skip_envs)
return expr
if c in ARG_START_TOKENS:
return read_arg(src, c)
@@ -340,7 +340,7 @@ def read_math_env(src, expr):
return expr
-def read_env(src, expr):
+def read_env(src, expr, skip_envs=()):
r"""Read the environment from buffer.
Advances the buffer until right after the end of the environment. Adds
@@ -351,7 +351,7 @@ def read_env(src, expr):
:rtype: TexExpr
"""
contents = []
- if expr.name in SKIP_ENVS:
+ if expr.name in SKIP_ENVS + skip_envs:
contents = [src.forward_until(lambda s: s == '\\end')]
while src.hasNext() and not src.startswith('\\end{%s}' % expr.name):
contents.append(read_tex(src))
diff --git a/TexSoup/tex.py b/TexSoup/tex.py
index ad8ce44..1b2ec63 100644
--- a/TexSoup/tex.py
+++ b/TexSoup/tex.py
@@ -4,7 +4,7 @@ from TexSoup.utils import *
import itertools
-def read(tex):
+def read(tex, skip_envs=()):
"""Read and parse all LaTeX source
:param Union[str,iterable] tex: LaTeX source
@@ -16,7 +16,7 @@ def read(tex):
tex = ''.join(itertools.chain(*tex))
buf, children = Buffer(tokenize(tex)), []
while buf.hasNext():
- content = read_tex(buf)
+ content = read_tex(buf, skip_envs=skip_envs)
if content is not None:
children.append(content)
return TexEnv('[tex]', children), tex
| alvinwan/TexSoup | 19f91d9ca806018dd83de419c12377f3ca0add3f | diff --git a/tests/test_api.py b/tests/test_api.py
index a3bd66b..b2a3825 100644
--- a/tests/test_api.py
+++ b/tests/test_api.py
@@ -1,6 +1,7 @@
from TexSoup import TexSoup
from TexSoup.utils import TokenWithPosition
from tests.config import chikin
+import pytest
import re
##############
@@ -169,3 +170,20 @@ def test_search_regex_precompiled_pattern(chikin):
assert len(matches) == 1
assert matches[0] == "unless ordered to squat"
assert matches[0].position == 341
+
+
+###########
+# TEXSOUP #
+###########
+
+
+def test_skip_envs():
+ """Test envs with invalid latex are not parsed."""
+ with pytest.raises(TypeError):
+ soup = TexSoup(r"""will raise error \textbf{aaaaa""")
+
+ # no error, ignores verbatim
+ TexSoup(r"""\begin{verbatim} \textbf{aaaaa \end{verbatim}""")
+
+ # no error, customized to ignore foobar
+ TexSoup(r"""\begin{foobar} \textbf{aaaaa \end{foobar}""", skip_envs=('foobar',))
| Ignoring Latex in certain cases
In the LaTex files I try to parse are some blocks that TexSoup shouldn't try to parse as LaTex. They are, in fact, similar to a `\begin{code}
...\end{code}` block, what is between the begin and end should not be read as LaTex. In fact, doing so would lead to a lot of errors in a book about programming. I have some special code block here (not using `\begin{code}`, but `\begin{console}` or `\begin{special}`. I could replace these in the original LaTex before using TexSoup, but that sounds like an unnecessary step. Although I'm not that familiar with laTex, I expect these blocks are described as 'escaped' (perhaps as 'math' blocks) in the `tty` file. But that's just a guess. At any rate, when TexSoup tries to parse such files, it fails (with a `EOFError` or `TypeError`, indicating mismatching punctuation, which is kind of normal).
A fix in the API might not be that easy. A keyword argument in the converter might not be ideal but it would be the easiest fix I guess. An alternative would be another function (or a method on the parser itself) to ignore such codes. Perhaps this method already exists, I couldn't find a way to do it in the documentation.
Thank you again for this tool. I'm using it to try to convert these LaTex books to easy Markdown and that will involve a lot of content and a lot of adjustments, but I think with such a parser it shouldn't be too hard. So I hope, anyway! | 0.0 | 19f91d9ca806018dd83de419c12377f3ca0add3f | [
"tests/test_api.py::test_skip_envs"
]
| [
"TexSoup/__init__.py::TexSoup.TexSoup",
"TexSoup/data.py::TexSoup.data.Arg",
"TexSoup/data.py::TexSoup.data.Arg.delims",
"TexSoup/data.py::TexSoup.data.Arg.parse",
"TexSoup/data.py::TexSoup.data.Arg.value",
"TexSoup/data.py::TexSoup.data.TexArgs",
"TexSoup/data.py::TexSoup.data.TexArgs.__contains__",
"TexSoup/data.py::TexSoup.data.TexArgs.__getitem__",
"TexSoup/data.py::TexSoup.data.TexArgs.__repr__",
"TexSoup/data.py::TexSoup.data.TexArgs.__str__",
"TexSoup/data.py::TexSoup.data.TexArgs.append",
"TexSoup/data.py::TexSoup.data.TexArgs.clear",
"TexSoup/data.py::TexSoup.data.TexArgs.extend",
"TexSoup/data.py::TexSoup.data.TexArgs.insert",
"TexSoup/data.py::TexSoup.data.TexArgs.pop",
"TexSoup/data.py::TexSoup.data.TexArgs.remove",
"TexSoup/data.py::TexSoup.data.TexArgs.reverse",
"TexSoup/data.py::TexSoup.data.TexArgs.sort",
"TexSoup/data.py::TexSoup.data.TexCmd",
"TexSoup/data.py::TexSoup.data.TexEnv",
"TexSoup/data.py::TexSoup.data.TexExpr.all",
"TexSoup/data.py::TexSoup.data.TexExpr.append",
"TexSoup/data.py::TexSoup.data.TexExpr.contents",
"TexSoup/data.py::TexSoup.data.TexExpr.insert",
"TexSoup/data.py::TexSoup.data.TexExpr.remove",
"TexSoup/data.py::TexSoup.data.TexNode.__iter__",
"TexSoup/data.py::TexSoup.data.TexNode.__match__",
"TexSoup/data.py::TexSoup.data.TexNode.all",
"TexSoup/data.py::TexSoup.data.TexNode.append",
"TexSoup/data.py::TexSoup.data.TexNode.args",
"TexSoup/data.py::TexSoup.data.TexNode.char_pos_to_line",
"TexSoup/data.py::TexSoup.data.TexNode.children",
"TexSoup/data.py::TexSoup.data.TexNode.contents",
"TexSoup/data.py::TexSoup.data.TexNode.copy",
"TexSoup/data.py::TexSoup.data.TexNode.count",
"TexSoup/data.py::TexSoup.data.TexNode.delete",
"TexSoup/data.py::TexSoup.data.TexNode.descendants",
"TexSoup/data.py::TexSoup.data.TexNode.find",
"TexSoup/data.py::TexSoup.data.TexNode.find_all",
"TexSoup/data.py::TexSoup.data.TexNode.insert",
"TexSoup/data.py::TexSoup.data.TexNode.name",
"TexSoup/data.py::TexSoup.data.TexNode.remove",
"TexSoup/data.py::TexSoup.data.TexNode.replace",
"TexSoup/data.py::TexSoup.data.TexNode.replace_with",
"TexSoup/data.py::TexSoup.data.TexNode.string",
"TexSoup/data.py::TexSoup.data.TexNode.text",
"TexSoup/data.py::TexSoup.data.TexText",
"TexSoup/data.py::TexSoup.data.TexText.__contains__",
"TexSoup/data.py::TexSoup.data.TexText.__eq__",
"TexSoup/data.py::TexSoup.data.TexText.__repr__",
"TexSoup/data.py::TexSoup.data.TexText.__str__",
"TexSoup/reader.py::TexSoup.reader.next_token",
"TexSoup/reader.py::TexSoup.reader.tokenize",
"TexSoup/reader.py::TexSoup.reader.tokenize_line_comment",
"TexSoup/reader.py::TexSoup.reader.tokenize_math",
"TexSoup/reader.py::TexSoup.reader.tokenize_string",
"TexSoup/utils.py::TexSoup.utils.Buffer",
"TexSoup/utils.py::TexSoup.utils.Buffer.__getitem__",
"TexSoup/utils.py::TexSoup.utils.Buffer.backward",
"TexSoup/utils.py::TexSoup.utils.Buffer.forward",
"TexSoup/utils.py::TexSoup.utils.CharToLineOffset",
"TexSoup/utils.py::TexSoup.utils.TokenWithPosition.__add__",
"TexSoup/utils.py::TexSoup.utils.TokenWithPosition.__contains__",
"TexSoup/utils.py::TexSoup.utils.TokenWithPosition.__eq__",
"TexSoup/utils.py::TexSoup.utils.TokenWithPosition.__getitem__",
"TexSoup/utils.py::TexSoup.utils.TokenWithPosition.__iadd__",
"TexSoup/utils.py::TexSoup.utils.TokenWithPosition.__iter__",
"TexSoup/utils.py::TexSoup.utils.TokenWithPosition.__radd__",
"TexSoup/utils.py::TexSoup.utils.TokenWithPosition.lstrip",
"TexSoup/utils.py::TexSoup.utils.TokenWithPosition.rstrip",
"tests/test_api.py::test_navigation_attributes",
"tests/test_api.py::test_navigation_parent",
"tests/test_api.py::test_navigation_children",
"tests/test_api.py::test_navigation_descendants",
"tests/test_api.py::test_navigation_positions",
"tests/test_api.py::test_find_basic",
"tests/test_api.py::test_find_by_command",
"tests/test_api.py::test_find_env",
"tests/test_api.py::test_delete",
"tests/test_api.py::test_delete_arg",
"tests/test_api.py::test_delete_token",
"tests/test_api.py::test_replace_single",
"tests/test_api.py::test_replace_multiple",
"tests/test_api.py::test_append",
"tests/test_api.py::test_insert",
"tests/test_api.py::test_text",
"tests/test_api.py::test_search_regex",
"tests/test_api.py::test_search_regex_precompiled_pattern"
]
| {
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
} | 2020-03-08 10:45:39+00:00 | bsd-2-clause | 1,068 |
|
amalfra__docker-hub-15 | diff --git a/src/__init__.py b/docker_hub/__init__.py
similarity index 100%
rename from src/__init__.py
rename to docker_hub/__init__.py
diff --git a/src/__main__.py b/docker_hub/__main__.py
similarity index 100%
rename from src/__main__.py
rename to docker_hub/__main__.py
diff --git a/src/cli.py b/docker_hub/cli.py
similarity index 97%
rename from src/cli.py
rename to docker_hub/cli.py
index 0019395..cac0384 100644
--- a/src/cli.py
+++ b/docker_hub/cli.py
@@ -70,7 +70,7 @@ def main():
sys.exit(1)
# Execute the command provided by user
- command = importlib.import_module('src.commands.' + args.method)
+ command = importlib.import_module('docker_hub.commands.' + args.method)
command.run(docker_hub_client, args)
if args.method not in NO_TIP_METHODS and args.format not in \
diff --git a/setup.py b/setup.py
index 9a8d1bd..5998da8 100644
--- a/setup.py
+++ b/setup.py
@@ -35,7 +35,7 @@ long_descr = open('README.md').read()
setup(
name='docker-hub',
- version=find_version('src', '__init__.py'),
+ version=find_version('docker_hub', '__init__.py'),
description='Access docker hub from your terminal',
long_description=long_descr,
long_description_content_type='text/markdown',
@@ -49,7 +49,7 @@ setup(
setup_requires=['pytest-runner', 'setuptools>=38.6.0'],
tests_require=['pytest'],
entry_points={
- 'console_scripts': ['docker-hub=src.__main__:main']
+ 'console_scripts': ['docker-hub=docker_hub.__main__:main']
},
classifiers=[
'Development Status :: 5 - Production/Stable',
| amalfra/docker-hub | de18c8c675513c4784e5cd86a7a8fd27b4f2bd3d | diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index 22c8d41..3f7094a 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -1,4 +1,4 @@
-on: [push, pull_request]
+on: [push, pull_request, workflow_dispatch]
name: Test
jobs:
test:
@@ -21,7 +21,7 @@ jobs:
- name: Lint
run: pycodestyle .
- name: Test
- run: coverage run --source=src setup.py test
+ run: coverage run --source=docker_hub setup.py test
- name: After success
if: ${{ success() }}
env:
diff --git a/src/commands/__init__.py b/docker_hub/commands/__init__.py
similarity index 100%
rename from src/commands/__init__.py
rename to docker_hub/commands/__init__.py
diff --git a/src/commands/builds.py b/docker_hub/commands/builds.py
similarity index 100%
rename from src/commands/builds.py
rename to docker_hub/commands/builds.py
diff --git a/src/commands/config.py b/docker_hub/commands/config.py
similarity index 100%
rename from src/commands/config.py
rename to docker_hub/commands/config.py
diff --git a/src/commands/login.py b/docker_hub/commands/login.py
similarity index 100%
rename from src/commands/login.py
rename to docker_hub/commands/login.py
diff --git a/src/commands/queue.py b/docker_hub/commands/queue.py
similarity index 100%
rename from src/commands/queue.py
rename to docker_hub/commands/queue.py
diff --git a/src/commands/repos.py b/docker_hub/commands/repos.py
similarity index 100%
rename from src/commands/repos.py
rename to docker_hub/commands/repos.py
diff --git a/src/commands/tags.py b/docker_hub/commands/tags.py
similarity index 100%
rename from src/commands/tags.py
rename to docker_hub/commands/tags.py
diff --git a/src/commands/users.py b/docker_hub/commands/users.py
similarity index 100%
rename from src/commands/users.py
rename to docker_hub/commands/users.py
diff --git a/src/commands/version.py b/docker_hub/commands/version.py
similarity index 100%
rename from src/commands/version.py
rename to docker_hub/commands/version.py
diff --git a/src/consts.py b/docker_hub/consts.py
similarity index 100%
rename from src/consts.py
rename to docker_hub/consts.py
diff --git a/src/libs/__init__.py b/docker_hub/libs/__init__.py
similarity index 100%
rename from src/libs/__init__.py
rename to docker_hub/libs/__init__.py
diff --git a/src/libs/config.py b/docker_hub/libs/config.py
similarity index 100%
rename from src/libs/config.py
rename to docker_hub/libs/config.py
diff --git a/src/libs/docker_client.py b/docker_hub/libs/docker_client.py
similarity index 100%
rename from src/libs/docker_client.py
rename to docker_hub/libs/docker_client.py
diff --git a/src/libs/docker_hub_client.py b/docker_hub/libs/docker_hub_client.py
similarity index 100%
rename from src/libs/docker_hub_client.py
rename to docker_hub/libs/docker_hub_client.py
diff --git a/src/libs/utils.py b/docker_hub/libs/utils.py
similarity index 100%
rename from src/libs/utils.py
rename to docker_hub/libs/utils.py
diff --git a/src/tests/__init__.py b/docker_hub/tests/__init__.py
similarity index 100%
rename from src/tests/__init__.py
rename to docker_hub/tests/__init__.py
diff --git a/src/tests/docker_hub_client.py b/docker_hub/tests/docker_hub_client.py
similarity index 100%
rename from src/tests/docker_hub_client.py
rename to docker_hub/tests/docker_hub_client.py
diff --git a/src/tests/helpers.py b/docker_hub/tests/helpers.py
similarity index 100%
rename from src/tests/helpers.py
rename to docker_hub/tests/helpers.py
diff --git a/src/tests/test_commands/__init__.py b/docker_hub/tests/test_commands/__init__.py
similarity index 100%
rename from src/tests/test_commands/__init__.py
rename to docker_hub/tests/test_commands/__init__.py
diff --git a/src/tests/test_commands/test_tags.py b/docker_hub/tests/test_commands/test_tags.py
similarity index 98%
rename from src/tests/test_commands/test_tags.py
rename to docker_hub/tests/test_commands/test_tags.py
index 30a33e6..61e2ce8 100644
--- a/src/tests/test_commands/test_tags.py
+++ b/docker_hub/tests/test_commands/test_tags.py
@@ -3,7 +3,7 @@ from collections import namedtuple
from ..docker_hub_client import \
NoResultsTestingDockerHubClient, WithResultsTestingDockerHubClient
-from src.commands.tags import run
+from docker_hub.commands.tags import run
from ..helpers import convert_key_to_result_format, generate_results
| Fix `site-packages/src` installation directory
Thanks for providing this tool. I was looking at the possibility of using it as a library for our own python projects and I noticed that it does not get pip installed cleanly.
The `src` directory get installed in `site-packages` as `src` which means that if someone else does the same thing the directories will clash.
I strongly recommend that you rename the `src` directory to `docker_hub` and update the referenced accordingly.
Today we can do:
```
import src.libs.docker_hub_client
```
And being able to do:
```
import docker_hub.libs.docker_hub_client
```
would be much cleaner.
I can send a PR if you are interested. The changes should be minimal.
| 0.0 | de18c8c675513c4784e5cd86a7a8fd27b4f2bd3d | [
"docker_hub/tests/test_commands/test_tags.py::test_no_tags",
"docker_hub/tests/test_commands/test_tags.py::test_no_tags_and_all_pages",
"docker_hub/tests/test_commands/test_tags.py::test_with_tags",
"docker_hub/tests/test_commands/test_tags.py::test_with_tags_and_all_pages"
]
| []
| {
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
} | 2021-07-20 16:41:24+00:00 | mit | 1,069 |
|
ambv__retype-3 | diff --git a/retype.py b/retype.py
index eee4b8e..d59e1b0 100644
--- a/retype.py
+++ b/retype.py
@@ -12,6 +12,7 @@ from pathlib import Path
import re
import sys
import threading
+import tokenize
import traceback
import click
@@ -138,9 +139,9 @@ def retype_file(src, pyi_dir, targets, *, quiet=False, hg=False):
Type comments in sources are normalized to type annotations.
"""
- with open(src) as src_file:
- src_txt = src_file.read()
- src_node = lib2to3_parse(src_txt)
+ with tokenize.open(src) as src_buffer:
+ src_encoding = src_buffer.encoding
+ src_node = lib2to3_parse(src_buffer.read())
try:
with open((pyi_dir / src.name).with_suffix('.pyi')) as pyi_file:
pyi_txt = pyi_file.read()
@@ -156,7 +157,7 @@ def retype_file(src, pyi_dir, targets, *, quiet=False, hg=False):
reapply_all(pyi_ast.body, src_node)
fix_remaining_type_comments(src_node)
targets.mkdir(parents=True, exist_ok=True)
- with open(targets / src.name, 'w') as target_file:
+ with open(targets / src.name, 'w', encoding=src_encoding) as target_file:
target_file.write(lib2to3_unparse(src_node, hg=hg))
return targets / src.name
@@ -169,7 +170,11 @@ def lib2to3_parse(src_txt):
result = drv.parse_string(src_txt, True)
except ParseError as pe:
lineno, column = pe.context[1]
- faulty_line = src_txt.splitlines()[lineno - 1]
+ lines = src_txt.splitlines()
+ if src_txt[-1] != '\n':
+ faulty_line = "The source is missing a trailing newline."
+ else:
+ faulty_line = lines[lineno - 1]
raise ValueError(f"Cannot parse: {lineno}:{column}: {faulty_line}") from None
if isinstance(result, Leaf):
| ambv/retype | 3fb46555d76dd5432481936c4101f7f50b584b88 | diff --git a/tests/test_retype.py b/tests/test_retype.py
index 5142782..dfb4d99 100644
--- a/tests/test_retype.py
+++ b/tests/test_retype.py
@@ -2134,6 +2134,17 @@ class PrintStmtTestCase(RetypeTestCase):
str(exception),
)
+class ParseErrorTestCase(RetypeTestCase):
+ def test_missing_trailing_newline_crash(self) -> None:
+ pyi_txt = "def f() -> None: ...\n"
+ src_txt = """
+ def f():
+ pass"""
+ exception = self.assertReapplyRaises(pyi_txt, src_txt, ValueError)
+ self.assertEqual(
+ 'Cannot parse: 4:0: The source is missing a trailing newline.',
+ str(exception),
+ )
class PostProcessTestCase(RetypeTestCase):
def test_straddling_variable_comments(self) -> None:
| lib2to3_parse assumes that the ParseError will always refer to an existing line
There seems to be a case where `ParseError` will report the line number after the last line number, causing an `IndexError` in retype:
Example file (core.py):
```
def get_message():
return '123'
```
Example stub (types/core.pyi):
```
def get_message() -> str: ...
```
```
$>retype --traceback core.py
error: core.py: list index out of range
Traceback (most recent call last):
File "retype.py", line 110, in retype_path
retype_file(src, pyi_dir, targets, quiet=quiet, hg=hg)
File "retype.py", line 132, in retype_file
src_node = lib2to3_parse(src_txt)
File "retype.py", line 161, in lib2to3_parse
faulty_line = src_txt.splitlines()[lineno - 1]
IndexError: list index out of range
```
I haven't gone digging yet to see why the `Driver` is failing to parse this, but it seems that this should be fixed as well.
**Tested using:**
- `Python 3.6.1 (v3.6.1:69c0db5, Mar 21 2017, 17:54:52) [MSC v.1900 32 bit (Intel)] on win32`
- `Python 3.6.1 (default, May 11 2017, 22:14:44) [GCC 4.9.2] on linux`
| 0.0 | 3fb46555d76dd5432481936c4101f7f50b584b88 | [
"tests/test_retype.py::ParseErrorTestCase::test_missing_trailing_newline_crash"
]
| [
"tests/test_retype.py::ImportTestCase::test_equal",
"tests/test_retype.py::ImportTestCase::test_matched1",
"tests/test_retype.py::ImportTestCase::test_matched2",
"tests/test_retype.py::ImportTestCase::test_matched3",
"tests/test_retype.py::ImportTestCase::test_src_empty",
"tests/test_retype.py::ImportTestCase::test_unmatched1",
"tests/test_retype.py::ImportTestCase::test_unmatched2",
"tests/test_retype.py::ImportTestCase::test_unmatched3",
"tests/test_retype.py::ImportTestCase::test_unmatched4",
"tests/test_retype.py::ImportTestCase::test_unmatched5",
"tests/test_retype.py::ImportTestCase::test_unmatched6",
"tests/test_retype.py::ImportTestCase::test_unmatched7",
"tests/test_retype.py::FromImportTestCase::test_equal",
"tests/test_retype.py::FromImportTestCase::test_matched1",
"tests/test_retype.py::FromImportTestCase::test_matched2",
"tests/test_retype.py::FromImportTestCase::test_matched3",
"tests/test_retype.py::FromImportTestCase::test_src_empty",
"tests/test_retype.py::FromImportTestCase::test_unmatched1",
"tests/test_retype.py::FromImportTestCase::test_unmatched2",
"tests/test_retype.py::FromImportTestCase::test_unmatched3",
"tests/test_retype.py::FromImportTestCase::test_unmatched4",
"tests/test_retype.py::FromImportTestCase::test_unmatched5",
"tests/test_retype.py::FromImportTestCase::test_unmatched6",
"tests/test_retype.py::FromImportTestCase::test_unmatched7",
"tests/test_retype.py::FromImportTestCase::test_unmatched8",
"tests/test_retype.py::FunctionReturnTestCase::test_complex_return_value",
"tests/test_retype.py::FunctionReturnTestCase::test_complex_return_value2",
"tests/test_retype.py::FunctionReturnTestCase::test_complex_return_value3",
"tests/test_retype.py::FunctionReturnTestCase::test_complex_return_value_spurious_type_comment",
"tests/test_retype.py::FunctionReturnTestCase::test_complex_return_value_type_comment",
"tests/test_retype.py::FunctionReturnTestCase::test_mismatched_return_value",
"tests/test_retype.py::FunctionReturnTestCase::test_missing_return_value_both",
"tests/test_retype.py::FunctionReturnTestCase::test_missing_return_value_both_incremental",
"tests/test_retype.py::FunctionReturnTestCase::test_missing_return_value_pyi",
"tests/test_retype.py::FunctionReturnTestCase::test_missing_return_value_pyi_incremental",
"tests/test_retype.py::FunctionReturnTestCase::test_missing_return_value_src",
"tests/test_retype.py::FunctionReturnTestCase::test_missing_return_value_src_incremental",
"tests/test_retype.py::FunctionArgumentTestCase::test_complex_ann",
"tests/test_retype.py::FunctionArgumentTestCase::test_complex_ann_with_default",
"tests/test_retype.py::FunctionArgumentTestCase::test_complex_sig1",
"tests/test_retype.py::FunctionArgumentTestCase::test_complex_sig1_type_comment",
"tests/test_retype.py::FunctionArgumentTestCase::test_complex_sig2",
"tests/test_retype.py::FunctionArgumentTestCase::test_complex_sig2_type_comment",
"tests/test_retype.py::FunctionArgumentTestCase::test_complex_sig3_type_comment",
"tests/test_retype.py::FunctionArgumentTestCase::test_complex_sig4_spurious_type_comment",
"tests/test_retype.py::FunctionArgumentTestCase::test_complex_sig4_type_comment",
"tests/test_retype.py::FunctionArgumentTestCase::test_complex_sig_async",
"tests/test_retype.py::FunctionArgumentTestCase::test_extra_arg1",
"tests/test_retype.py::FunctionArgumentTestCase::test_extra_arg2",
"tests/test_retype.py::FunctionArgumentTestCase::test_extra_arg_kwonly",
"tests/test_retype.py::FunctionArgumentTestCase::test_missing_ann_both",
"tests/test_retype.py::FunctionArgumentTestCase::test_missing_ann_both_incremental",
"tests/test_retype.py::FunctionArgumentTestCase::test_missing_ann_both_multiple_args_incremental",
"tests/test_retype.py::FunctionArgumentTestCase::test_missing_ann_pyi",
"tests/test_retype.py::FunctionArgumentTestCase::test_missing_ann_src",
"tests/test_retype.py::FunctionArgumentTestCase::test_missing_arg",
"tests/test_retype.py::FunctionArgumentTestCase::test_missing_arg2",
"tests/test_retype.py::FunctionArgumentTestCase::test_missing_arg_kwonly",
"tests/test_retype.py::FunctionArgumentTestCase::test_missing_default_arg_pyi",
"tests/test_retype.py::FunctionArgumentTestCase::test_missing_default_arg_src",
"tests/test_retype.py::FunctionArgumentTestCase::test_no_args",
"tests/test_retype.py::FunctionVariableTestCase::test_basic",
"tests/test_retype.py::FunctionVariableTestCase::test_complex",
"tests/test_retype.py::FunctionVariableTestCase::test_complex_type",
"tests/test_retype.py::FunctionVariableTestCase::test_default_type",
"tests/test_retype.py::FunctionVariableTestCase::test_no_value",
"tests/test_retype.py::FunctionVariableTestCase::test_type_mismatch",
"tests/test_retype.py::FunctionVariableTypeCommentTestCase::test_basic",
"tests/test_retype.py::FunctionVariableTypeCommentTestCase::test_complex",
"tests/test_retype.py::FunctionVariableTypeCommentTestCase::test_complex_type",
"tests/test_retype.py::FunctionVariableTypeCommentTestCase::test_default_type",
"tests/test_retype.py::FunctionVariableTypeCommentTestCase::test_no_value",
"tests/test_retype.py::FunctionVariableTypeCommentTestCase::test_no_value_type_comment",
"tests/test_retype.py::FunctionVariableTypeCommentTestCase::test_type_mismatch",
"tests/test_retype.py::MethodTestCase::test_basic",
"tests/test_retype.py::MethodTestCase::test_complex_sig1_type_comment",
"tests/test_retype.py::MethodTestCase::test_complex_sig2_type_comment",
"tests/test_retype.py::MethodTestCase::test_complex_sig3_type_comment",
"tests/test_retype.py::MethodTestCase::test_complex_sig4_type_comment",
"tests/test_retype.py::MethodTestCase::test_complex_sig5_type_comment",
"tests/test_retype.py::MethodTestCase::test_decorator_mismatch",
"tests/test_retype.py::MethodTestCase::test_decorator_mismatch2",
"tests/test_retype.py::MethodTestCase::test_decorator_mismatch3",
"tests/test_retype.py::MethodTestCase::test_function",
"tests/test_retype.py::MethodTestCase::test_missing_class",
"tests/test_retype.py::MethodTestCase::test_staticmethod",
"tests/test_retype.py::MethodTestCase::test_two_classes",
"tests/test_retype.py::MethodVariableTestCase::test_basic",
"tests/test_retype.py::MethodVariableTestCase::test_complex",
"tests/test_retype.py::MethodVariableTestCase::test_default_type",
"tests/test_retype.py::MethodVariableTestCase::test_no_value",
"tests/test_retype.py::MethodVariableTestCase::test_type_mismatch",
"tests/test_retype.py::MethodVariableTypeCommentTestCase::test_basic",
"tests/test_retype.py::MethodVariableTypeCommentTestCase::test_complex",
"tests/test_retype.py::MethodVariableTypeCommentTestCase::test_default_type",
"tests/test_retype.py::MethodVariableTypeCommentTestCase::test_no_value",
"tests/test_retype.py::MethodVariableTypeCommentTestCase::test_no_value_type_comment",
"tests/test_retype.py::MethodVariableTypeCommentTestCase::test_type_mismatch",
"tests/test_retype.py::ModuleLevelVariableTestCase::test_alias_basic",
"tests/test_retype.py::ModuleLevelVariableTestCase::test_alias_many",
"tests/test_retype.py::ModuleLevelVariableTestCase::test_alias_typevar",
"tests/test_retype.py::ModuleLevelVariableTestCase::test_alias_typevar_typing",
"tests/test_retype.py::ModuleLevelVariableTestCase::test_basic",
"tests/test_retype.py::ModuleLevelVariableTestCase::test_complex",
"tests/test_retype.py::ModuleLevelVariableTestCase::test_complex_with_imports",
"tests/test_retype.py::ModuleLevelVariableTestCase::test_default_type",
"tests/test_retype.py::ModuleLevelVariableTestCase::test_no_value",
"tests/test_retype.py::ModuleLevelVariableTestCase::test_type_mismatch",
"tests/test_retype.py::ClassVariableTestCase::test_basic",
"tests/test_retype.py::ClassVariableTestCase::test_complex",
"tests/test_retype.py::ClassVariableTestCase::test_default_type",
"tests/test_retype.py::ClassVariableTestCase::test_instance_fields_assignment_docstring",
"tests/test_retype.py::ClassVariableTestCase::test_instance_fields_assignment_no_docstring",
"tests/test_retype.py::ClassVariableTestCase::test_instance_fields_no_assignment",
"tests/test_retype.py::ClassVariableTestCase::test_instance_fields_no_assignment_docstring",
"tests/test_retype.py::ClassVariableTestCase::test_instance_fields_no_assignment_no_docstring",
"tests/test_retype.py::ClassVariableTestCase::test_no_value",
"tests/test_retype.py::ClassVariableTestCase::test_type_mismatch",
"tests/test_retype.py::SerializeTestCase::test_serialize_attribute",
"tests/test_retype.py::SerializeTestCase::test_serialize_name",
"tests/test_retype.py::PrintStmtTestCase::test_print_stmt_crash",
"tests/test_retype.py::PostProcessTestCase::test_straddling_function_signature_type_comments1",
"tests/test_retype.py::PostProcessTestCase::test_straddling_function_signature_type_comments2",
"tests/test_retype.py::PostProcessTestCase::test_straddling_function_signature_type_comments3",
"tests/test_retype.py::PostProcessTestCase::test_straddling_variable_comments"
]
| {
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2017-06-08 12:07:19+00:00 | mit | 1,070 |
|
ameily__cincoconfig-26 | diff --git a/cincoconfig/fields.py b/cincoconfig/fields.py
index 49b618c..a56555a 100644
--- a/cincoconfig/fields.py
+++ b/cincoconfig/fields.py
@@ -1071,8 +1071,8 @@ class SecureField(Field):
super().__init__(**kwargs)
self.method = method
- def to_basic(self, cfg: BaseConfig, value: str) -> dict:
- if value is None:
+ def to_basic(self, cfg: BaseConfig, value: str) -> Optional[dict]:
+ if not value:
return None
with cfg._keyfile as ctx:
| ameily/cincoconfig | a4efa8aeb094899216ca505d493fc3e2ed88f7ab | diff --git a/tests/test_fields/test_secure.py b/tests/test_fields/test_secure.py
index 51dd307..be54396 100644
--- a/tests/test_fields/test_secure.py
+++ b/tests/test_fields/test_secure.py
@@ -33,6 +33,10 @@ class TestSecureField:
field = SecureField()
assert field.to_basic(None, None) is None
+ def test_to_basic_empty_string(self):
+ field = SecureField()
+ assert field.to_basic(None, "") is None
+
def test_to_basic(self):
cfg = StubConfig()
field = SecureField(method='test')
@@ -48,6 +52,10 @@ class TestSecureField:
field = SecureField()
assert field.to_python(None, None) is None
+ def test_to_python_empty_string(self):
+ field = SecureField()
+ assert field.to_python(None, "") == ""
+
def test_to_python_str(self):
field = SecureField()
assert field.to_python(None, 'hello') == 'hello'
| SecureField - Don't encrypt empty stirngs
Empty strings should probably be treated as None for `SecureField` | 0.0 | a4efa8aeb094899216ca505d493fc3e2ed88f7ab | [
"tests/test_fields/test_secure.py::TestSecureField::test_to_basic_empty_string"
]
| [
"tests/test_fields/test_secure.py::TestSecureField::test_to_basic_none",
"tests/test_fields/test_secure.py::TestSecureField::test_to_basic",
"tests/test_fields/test_secure.py::TestSecureField::test_to_python_none",
"tests/test_fields/test_secure.py::TestSecureField::test_to_python_empty_string",
"tests/test_fields/test_secure.py::TestSecureField::test_to_python_str",
"tests/test_fields/test_secure.py::TestSecureField::test_to_python_dict_invalid_method",
"tests/test_fields/test_secure.py::TestSecureField::test_to_python_dict_invalid_ciphertext_base64",
"tests/test_fields/test_secure.py::TestSecureField::test_to_python_dict_invalid_ciphertext",
"tests/test_fields/test_secure.py::TestSecureField::test_to_python_dict_invalid_ciphertext_int",
"tests/test_fields/test_secure.py::TestSecureField::test_to_python_dict_valid",
"tests/test_fields/test_secure.py::TestSecureField::test_to_python_invalid_type"
]
| {
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
} | 2020-10-29 16:14:35+00:00 | isc | 1,071 |
|
ameily__cincoconfig-40 | diff --git a/cincoconfig/config.py b/cincoconfig/config.py
index 119738e..2657aa7 100644
--- a/cincoconfig/config.py
+++ b/cincoconfig/config.py
@@ -5,6 +5,7 @@
# this source code package.
#
+import os
import sys
from typing import Union, Any, Iterator, Tuple, Callable, List
from argparse import Namespace
@@ -498,6 +499,7 @@ class Config(BaseConfig):
:param format: output format
'''
content = self.dumps(format)
+ filename = os.path.expanduser(filename)
with open(filename, 'wb') as file:
file.write(content)
@@ -508,7 +510,7 @@ class Config(BaseConfig):
:param filename: source filename
:param format: source format
'''
-
+ filename = os.path.expanduser(filename)
with open(filename, 'rb') as file:
content = file.read()
diff --git a/cincoconfig/encryption.py b/cincoconfig/encryption.py
index e404c8e..0a5405e 100644
--- a/cincoconfig/encryption.py
+++ b/cincoconfig/encryption.py
@@ -104,8 +104,9 @@ class KeyFile:
'''
INTERNAL METHOD. Load configuration key.
'''
+ filename = os.path.expanduser(self.filename)
try:
- with open(self.filename, 'rb') as fp:
+ with open(filename, 'rb') as fp:
self.__key = fp.read()
except OSError:
self.__key = self.__generate_key()
@@ -119,7 +120,8 @@ class KeyFile:
:returns: the generated key
'''
key = os.urandom(32)
- with open(self.filename, 'wb') as fp:
+ filename = os.path.expanduser(self.filename)
+ with open(filename, 'wb') as fp:
fp.write(key)
return key
diff --git a/cincoconfig/fields.py b/cincoconfig/fields.py
index bf64778..3ecdc31 100644
--- a/cincoconfig/fields.py
+++ b/cincoconfig/fields.py
@@ -286,6 +286,15 @@ class IPv4NetworkField(StringField):
'''
storage_type = str
+ def __init__(self, min_prefix_len: int = None, max_prefix_len: int = None, **kwargs):
+ '''
+ :param min_prefix_len: minimum subnet prefix length (/X), in bits
+ :param max_prefix_len: maximum subnet prefix length (/X), in bits
+ '''
+ super().__init__(**kwargs)
+ self.min_prefix_len = min_prefix_len
+ self.max_prefix_len = max_prefix_len
+
def _validate(self, cfg: BaseConfig, value: str) -> str:
'''
Validate a value.
@@ -299,6 +308,13 @@ class IPv4NetworkField(StringField):
net = IPv4Network(value)
except ValueError as err:
raise ValueError('value is not a valid IPv4 Network (CIDR)') from err
+
+ if self.min_prefix_len and net.prefixlen < self.min_prefix_len:
+ raise ValueError('value must be at least a /%d subnet' % self.min_prefix_len)
+
+ if self.max_prefix_len and net.prefixlen > self.max_prefix_len:
+ raise ValueError('value must be smaller than a /%d subnet' % self.max_prefix_len)
+
return str(net)
@@ -398,7 +414,7 @@ class FilenameField(StringField):
return value
if not os.path.isabs(value) and self.startdir:
- value = os.path.abspath(os.path.join(self.startdir, value))
+ value = os.path.abspath(os.path.expanduser(os.path.join(self.startdir, value)))
if os.path.sep == '\\':
value = value.replace('/', '\\')
@@ -1217,7 +1233,7 @@ class IncludeField(FilenameField):
:returns: the new basic value tree containing the base tree and the included tree
'''
filename = self.validate(config, filename)
- with open(filename, 'rb') as fp:
+ with open(os.path.expanduser(filename), 'rb') as fp:
content = fp.read()
child = fmt.loads(config, content)
| ameily/cincoconfig | d17a6ef7cd1528bba34797716150119342f9d915 | diff --git a/tests/test_config.py b/tests/test_config.py
index c3b45ec..255e8c3 100644
--- a/tests/test_config.py
+++ b/tests/test_config.py
@@ -483,3 +483,23 @@ class TestConfig:
mock_validate.assert_called_once_with(config, 2)
assert excinfo.value is orig_exc
+
+ @patch('cincoconfig.config.open', new_callable=mock_open)
+ @patch('cincoconfig.config.os.path.expanduser')
+ def test_save_expanduser(self, expanduser, mop):
+ expanduser.return_value = 'path/to/blah.txt'
+ config = Config(Schema())
+ config.save('~/blah.txt', format='json')
+
+ expanduser.assert_called_once_with('~/blah.txt')
+ mop.assert_called_once_with('path/to/blah.txt', 'wb')
+
+ @patch('cincoconfig.config.open', new_callable=mock_open, read_data=b'{}')
+ @patch('cincoconfig.config.os.path.expanduser')
+ def test_load_expanduser(self, expanduser, mop):
+ expanduser.return_value = 'path/to/blah.txt'
+ config = Config(Schema())
+
+ config.load('~/blah.txt', format='json')
+ expanduser.assert_called_once_with('~/blah.txt')
+ mop.assert_called_once_with('path/to/blah.txt', 'rb')
diff --git a/tests/test_fields/test_include.py b/tests/test_fields/test_include.py
index b9ec29f..414bc05 100644
--- a/tests/test_fields/test_include.py
+++ b/tests/test_fields/test_include.py
@@ -49,7 +49,17 @@ class TestIncludeField:
field.combine_trees.assert_called_once_with(base, fmt.retval)
fmt.loads.assert_called_once_with(cfg, b'hello')
+ @patch('cincoconfig.fields.open', new_callable=mock_open, read_data=b'{}')
+ @patch('cincoconfig.config.os.path.expanduser')
+ def test_include_expanduser(self, expanduser, mop):
+ field = IncludeField()
+ fmt = MockConfigFormat()
+ cfg = MockConfig()
+ expanduser.return_value = '/home/asdf/blah.txt'
+ base = {'b': 2}
-
-
-
+ field.combine_trees = MagicMock(return_value={'a': 1})
+ field.validate = MagicMock(return_value='blah.txt')
+ field.include(cfg, fmt, 'blah.txt', base)
+ mop.assert_called_once_with('/home/asdf/blah.txt', 'rb')
+ expanduser.assert_called_once_with('blah.txt')
diff --git a/tests/test_fields/test_net.py b/tests/test_fields/test_net.py
index 97720a2..7cebfe0 100644
--- a/tests/test_fields/test_net.py
+++ b/tests/test_fields/test_net.py
@@ -41,6 +41,24 @@ class TestIPv4NetworkField:
with pytest.raises(ValueError):
field.validate(MockConfig(), '300.1.2.a/42')
+ def test_min_prefix_good(self):
+ field = IPv4NetworkField(min_prefix_len=8)
+ assert field._validate(MockConfig(), '192.168.0.0/16') == '192.168.0.0/16'
+
+ def test_min_prefix_bad(self):
+ field = IPv4NetworkField(min_prefix_len=16)
+ with pytest.raises(ValueError):
+ field._validate(MockConfig(), '10.0.0.0/8')
+
+ def test_max_prefix_good(self):
+ field = IPv4NetworkField(max_prefix_len=16)
+ assert field._validate(MockConfig(), '10.0.0.0/8') == '10.0.0.0/8'
+
+ def test_max_prefix_bad(self):
+ field = IPv4NetworkField(max_prefix_len=31)
+ with pytest.raises(ValueError):
+ field._validate(MockConfig(), '10.10.10.1/32')
+
class TestHostnameField:
| IPv4Network min/max prefix length
Implement options for validating an IPv4 network with a minimum and maximum prefix length. For example, these options could be used to filter out single IP addresses (`max_prefix_length = 31`) and filter out class A networks (`min_prefix_length = 9`). | 0.0 | d17a6ef7cd1528bba34797716150119342f9d915 | [
"tests/test_config.py::TestConfig::test_save_expanduser",
"tests/test_config.py::TestConfig::test_load_expanduser",
"tests/test_fields/test_include.py::TestIncludeField::test_include_expanduser",
"tests/test_fields/test_net.py::TestIPv4NetworkField::test_min_prefix_good",
"tests/test_fields/test_net.py::TestIPv4NetworkField::test_min_prefix_bad",
"tests/test_fields/test_net.py::TestIPv4NetworkField::test_max_prefix_good",
"tests/test_fields/test_net.py::TestIPv4NetworkField::test_max_prefix_bad"
]
| [
"tests/test_config.py::TestConfig::test_setdefault",
"tests/test_config.py::TestConfig::test_subschema",
"tests/test_config.py::TestConfig::test_setattr_field",
"tests/test_config.py::TestConfig::test_setattr_dynamic",
"tests/test_config.py::TestConfig::test_setattr_non_dynamic",
"tests/test_config.py::TestConfig::test_setattr_config_dict",
"tests/test_config.py::TestConfig::test_setattr_value",
"tests/test_config.py::TestConfig::test_setattr_config_no_dict",
"tests/test_config.py::TestConfig::test_getitem",
"tests/test_config.py::TestConfig::test_to_tree",
"tests/test_config.py::TestConfig::test_to_tree_include_virtual",
"tests/test_config.py::TestConfig::test_to_tree_exclude_virtual",
"tests/test_config.py::TestConfig::test_to_tree_empty_mask_secure",
"tests/test_config.py::TestConfig::test_to_tree_sensitive_mask_single",
"tests/test_config.py::TestConfig::test_to_tree_sensitive_mask_multi",
"tests/test_config.py::TestConfig::test_to_tree_sensitive_mask_empty",
"tests/test_config.py::TestConfig::test_dumps_to_tree_args",
"tests/test_config.py::TestConfig::test_iter",
"tests/test_config.py::TestConfig::test_getattr_error",
"tests/test_config.py::TestConfig::test_getattr_dynamic",
"tests/test_config.py::TestConfig::test_setitem",
"tests/test_config.py::TestConfig::test_dumps",
"tests/test_config.py::TestConfig::test_loads",
"tests/test_config.py::TestConfig::test_load",
"tests/test_config.py::TestConfig::test_save",
"tests/test_config.py::TestConfig::test_version",
"tests/test_config.py::TestConfig::test_getitem_nested",
"tests/test_config.py::TestConfig::test_setitem_nested",
"tests/test_config.py::TestConfig::test_include_field",
"tests/test_config.py::TestConfig::test_nested_include",
"tests/test_config.py::TestConfig::test_set_config",
"tests/test_config.py::TestConfig::test_validate",
"tests/test_config.py::TestConfig::test_load_tree_validate",
"tests/test_config.py::TestConfig::test_validator",
"tests/test_config.py::TestConfig::test_cmdline_args_override",
"tests/test_config.py::TestConfig::test_cmdline_args_ocverride_single_ignore",
"tests/test_config.py::TestConfig::test_in_flat",
"tests/test_config.py::TestConfig::test_in_nested",
"tests/test_config.py::TestConfig::test_not_in",
"tests/test_config.py::TestConfig::test_in_not_config",
"tests/test_config.py::TestConfig::test_wrap_validation_error",
"tests/test_config.py::TestConfig::test_validation_error_str",
"tests/test_config.py::TestConfig::test_setattr_validation_error_reraise",
"tests/test_config.py::TestConfig::test_valdiate_wrap_validation_error",
"tests/test_config.py::TestConfig::test_validate_reraise_validation_error",
"tests/test_fields/test_include.py::TestIncludeField::test_combine_trees",
"tests/test_fields/test_include.py::TestIncludeField::test_include",
"tests/test_fields/test_net.py::TestIPv4AddressField::test_invalid_ipv4",
"tests/test_fields/test_net.py::TestIPv4NetworkField::test_invalid_net",
"tests/test_fields/test_net.py::TestHostnameField::test_no_ipv4",
"tests/test_fields/test_net.py::TestHostnameField::test_valid_hostname_resolve",
"tests/test_fields/test_net.py::TestHostnameField::test_valid_dnsname",
"tests/test_fields/test_net.py::TestHostnameField::test_valid_netbios",
"tests/test_fields/test_net.py::TestHostnameField::test_invalid_dnsname",
"tests/test_fields/test_net.py::TestHostnameField::test_resolve_failed",
"tests/test_fields/test_net.py::TestPortField::test_port_valid"
]
| {
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2021-02-03 00:15:04+00:00 | isc | 1,072 |
|
ameily__cincoconfig-45 | diff --git a/CHANGELOG.md b/CHANGELOG.md
index a4e0499..5c9760b 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -5,6 +5,24 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project
adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+## [v0.7.0] - 2020-02-09
+### Added
+- Support for the `~` home directory symbol. All filenames are passed through the
+ `os.path.expanduser` function.
+- `IPv4NetworkField` now support setting a minimum and maximum prefix length, in bits.
+- `Field.help` attribute to document the field, similar to a docstring. The `help` and
+ autogenerated `short_help` attribute, can be used in UI to display information and
+ documentation about the field.
+- `BaseSchema.generate_argparse_parser` method to autogenerate an `argparse.ArgumentParser`
+ object to parse command line arguments.
+- `Field.env` and `Schema.env` to control automatically loading configuration values from
+ environment variables.
+
+### Changed
+- Improved error reporting with the `ValidationError` exception that contains the offending
+ field's full name and path.
+
+
## [v0.6.0] - 2020-11-05
### Added
- `Field.sensitive` property to mark a value as sensitive.
diff --git a/cincoconfig/abc.py b/cincoconfig/abc.py
index 9da847f..f433e84 100644
--- a/cincoconfig/abc.py
+++ b/cincoconfig/abc.py
@@ -111,13 +111,52 @@ class Field: # pylint: disable=too-many-instance-attributes
Each Field subclass can define a class or instance level ``storage_type`` which holds the
annotation of the value being stored in memory.
+
+ .. _field-env-variables:
+
+ **Environment Variables**
+
+ Fields can load their default value from an environment variable. The Schema and Field accept
+ an ``env`` argument in the constructor that controls whether and how environment variables are
+ loaded. The default behavior is to not load any environment variables and to honor the
+ :attr:`Field.default` value.
+
+ There are two ways to load a field's default value from an environment variable.
+
+ - ``Schema.env``: Provide ``True`` or a string.
+ - ``Field.env``: Provide ``True`` or a string.
+
+ When ``Schema.env`` or ``Field.env`` is ``None`` (the default), the environment variable
+ configuration is inherited from the parent schema. A value of ``True`` will load the the
+ field's default value from an autogenerated environment variable name, based on the field's
+ full path. For example:
+
+ .. code-block:: python
+
+ schema = Schema(env=True)
+ schema.mode = ApplicationModeField(env="APP_MODE")
+ schema.port = PortField(env=False)
+
+ schema.db.host = HostnameField()
+
+ schema.auth = Schema(env="SECRET")
+ schema.auth.username = StringField()
+
+ - The top-level schema is configured to autogenerate and load environment variables for all
+ fields.
+ - ``mode`` is loaded from the ``APP_MODE`` environment variable.
+ - ``port`` is not loaded from any the environment variabale.
+ - ``db.host`` is loaded from the ``DB_HOST`` environment variable.
+ - The ``auth`` schema has a environment variable prefix of ``SECRET``. All children and nested
+ fields/schemas will start with ``SECRET_``.
+ - The ``auth.username`` field is loaded from the ``SECRET_USERNAME`` environment variable.
'''
storage_type = Any
def __init__(self, *, name: str = None, key: str = None, required: bool = False,
default: Union[Callable, Any] = None,
validator: Callable[['BaseConfig', Any], Any] = None, sensitive: bool = False,
- description: str = None, help: str = None):
+ description: str = None, help: str = None, env: Union[bool, str] = None):
'''
All builtin Fields accept the following keyword parameters.
@@ -140,6 +179,7 @@ class Field: # pylint: disable=too-many-instance-attributes
self.sensitive = sensitive
self.description = description
self.help = help.strip() if help else None
+ self.env = env
@property
def short_help(self) -> Optional[str]:
@@ -236,14 +276,43 @@ class Field: # pylint: disable=too-many-instance-attributes
'''
self.key = key
- def __setdefault__(self, cfg: 'BaseConfig'):
+ if self.env is False:
+ return
+
+ if self.env is True or (self.env is None and isinstance(schema._env_prefix, str)):
+ # Set our environment variable name based on the schema's prefix and our key
+ if isinstance(schema._env_prefix, str) and schema._env_prefix:
+ prefix = schema._env_prefix + '_'
+ else:
+ prefix = ''
+
+ self.env = prefix + self.key.upper()
+
+ def __setdefault__(self, cfg: 'BaseConfig') -> None:
'''
Set the default value of the field in the config. This is called when the config is first
created.
:param cfg: current config
'''
- cfg._data[self.key] = self.default
+ value = None
+
+ if isinstance(self.env, str) and self.env:
+ env_value = os.environ.get(self.env)
+ if env_value:
+ try:
+ env_value = self.validate(cfg, env_value)
+ except ValidationError:
+ raise
+ except Exception as exc:
+ raise ValidationError(cfg, self, exc) from exc
+ else:
+ value = env_value
+
+ if value is None:
+ value = self.default
+
+ cfg._data[self.key] = value
def to_python(self, cfg: 'BaseConfig', value: Any) -> Any:
'''
@@ -345,15 +414,19 @@ class BaseSchema:
'''
storage_type = 'BaseSchema'
- def __init__(self, key: str = None, dynamic: bool = False):
+ def __init__(self, key: str = None, dynamic: bool = False, env: Union[str, bool] = None):
'''
:param key: the schema key, only used for sub-schemas, and stored in the instance as
*_key*
:param dynamic: the schema is dynamic and can contain fields not originally specified in
the schema and stored in the instance as *_dynamic*
+ :param env: the environment variable prefix for this schema and all children schemas, for
+ information, see :ref:`Field Environment Variables <field-env-variables>`
'''
self._key = key
self._dynamic = dynamic
+ self._env_prefix = '' if env is True else env
+
self._fields = OrderedDict() # type: Dict[str, SchemaField]
self.__post_init__()
@@ -370,6 +443,14 @@ class BaseSchema:
'''
self._key = key
+ if self._env_prefix is False:
+ return
+
+ if self._env_prefix is None and isinstance(parent._env_prefix, str):
+ # Set our environment variable prefix to be "{parent}_{key}"
+ prefix = (parent._env_prefix + '_') if parent._env_prefix else ''
+ self._env_prefix = prefix + self._key.upper()
+
def _add_field(self, key: str, field: SchemaField) -> SchemaField:
'''
Add a field to the schema. This method will call ``field.__setkey__(self, key)``.
@@ -379,6 +460,7 @@ class BaseSchema:
self._fields[key] = field
if isinstance(field, (Field, BaseSchema)):
field.__setkey__(self, key)
+
return field
def _get_field(self, key: str) -> Optional[SchemaField]:
diff --git a/cincoconfig/config.py b/cincoconfig/config.py
index 8338c80..be7cc90 100644
--- a/cincoconfig/config.py
+++ b/cincoconfig/config.py
@@ -127,7 +127,7 @@ class Schema(BaseSchema):
'''
field = self._fields.get(name)
if field is None:
- field = self._fields[name] = Schema(name)
+ field = self._add_field(name, Schema())
return field
def __iter__(self) -> Iterator[Tuple[str, SchemaField]]:
@@ -662,6 +662,9 @@ class Config(BaseConfig):
for key, value in tree.items():
field = self._get_field(key)
if isinstance(field, Field):
+ if isinstance(field.env, str) and field.env and os.environ.get(field.env):
+ continue
+
value = field.to_python(self, value)
self.__setattr__(key, value)
diff --git a/cincoconfig/version.py b/cincoconfig/version.py
index ef7eb44..a71c5c7 100644
--- a/cincoconfig/version.py
+++ b/cincoconfig/version.py
@@ -1,1 +1,1 @@
-__version__ = '0.6.0'
+__version__ = '0.7.0'
| ameily/cincoconfig | 7ec669a672397989b7d7131a987d1e6e8faa9f4e | diff --git a/tests/test_baseschema.py b/tests/test_baseschema.py
index 579d216..6b17e63 100644
--- a/tests/test_baseschema.py
+++ b/tests/test_baseschema.py
@@ -6,7 +6,7 @@ class TestBaseSchema:
def test_setkey(self):
schema = BaseSchema()
- schema.__setkey__(None, 'hello')
+ schema.__setkey__(BaseSchema(), 'hello')
assert schema._key == 'hello'
def test_add_field_field(self):
@@ -35,3 +35,25 @@ class TestBaseSchema:
def test_get_field_no_exists(self):
schema = BaseSchema()
assert schema._get_field('hello') is None
+
+ def test_env_true(self):
+ schema = BaseSchema(env=True)
+ assert schema._env_prefix == '' and isinstance(schema._env_prefix, str)
+
+ def test_setkey_inherit_env(self):
+ schema = BaseSchema(env=True)
+ child = BaseSchema()
+ child.__setkey__(schema, 'child')
+ assert child._env_prefix == 'CHILD'
+
+ def test_setkey_inherit_env_append(self):
+ schema = BaseSchema(env='ASDF')
+ child = BaseSchema()
+ child.__setkey__(schema, 'child')
+ assert child._env_prefix == 'ASDF_CHILD'
+
+ def test_setkey_env_false(self):
+ schema = BaseSchema(env='ASDF')
+ child = BaseSchema(env=False)
+ child.__setkey__(schema, 'child')
+ assert child._env_prefix is False
diff --git a/tests/test_config.py b/tests/test_config.py
index 255e8c3..b97e085 100644
--- a/tests/test_config.py
+++ b/tests/test_config.py
@@ -1,4 +1,5 @@
import argparse
+import os
from unittest.mock import MagicMock, patch, mock_open
import pytest
@@ -325,6 +326,17 @@ class TestConfig:
config.load_tree({'x': 1})
mock_validate.assert_called_once_with()
+ @patch('cincoconfig.config.os')
+ def test_load_tree_ignore_env(self, mock_os):
+ env = mock_os.environ.get.return_value = object()
+ schema = Schema()
+ schema.x = Field(env='ASDF')
+ cfg = schema()
+ cfg._data = {'x': 'qwer'}
+ cfg.load_tree({'x': 'asdf'})
+ assert cfg._data == {'x': 'qwer'}
+ mock_os.environ.get.assert_called_once_with('ASDF')
+
def test_validator(self):
validator = MagicMock()
schema = Schema()
diff --git a/tests/test_fields/test_field.py b/tests/test_fields/test_field.py
index b216784..ae5acad 100644
--- a/tests/test_fields/test_field.py
+++ b/tests/test_fields/test_field.py
@@ -4,10 +4,11 @@
# This file is subject to the terms and conditions defined in the file 'LICENSE', which is part of
# this source code package.
#
+import os
from unittest.mock import patch, MagicMock
import pytest
-from cincoconfig.abc import Field
+from cincoconfig.abc import Field, BaseSchema, BaseConfig, ValidationError
class MockConfig:
@@ -16,6 +17,7 @@ class MockConfig:
self._data = data or {}
self._parent = parent
self._key = key
+ self._schema = BaseSchema()
def _full_path(self):
return ''
@@ -54,7 +56,7 @@ class TestBaseField:
def test_setkey(self):
field = Field()
- field.__setkey__(self.cfg, 'key')
+ field.__setkey__(self.cfg._schema, 'key')
assert field.key == 'key'
def test_setdefault(self):
@@ -152,3 +154,85 @@ class TestBaseField:
field = Field(help='\n\nfirst\nsecond\nthird.\n\nmore\n\n')
assert field.short_help == 'first\nsecond\nthird.'
assert field.help == 'first\nsecond\nthird.\n\nmore'
+
+ def test_env_true(self):
+ schema = BaseSchema()
+ field = Field(env=True)
+ field.__setkey__(schema, 'field')
+ assert field.env == 'FIELD'
+
+ def test_setkey_inherit_env(self):
+ schema = BaseSchema(env=True)
+ field = Field()
+ field.__setkey__(schema, 'field')
+ assert field.env == 'FIELD'
+
+ def test_setkey_inherit_env_append(self):
+ schema = BaseSchema(env='APP')
+ field = Field()
+ field.__setkey__(schema, 'field')
+ assert field.env == 'APP_FIELD'
+
+ def test_setkey_env_false(self):
+ schema = BaseSchema(env=True)
+ field = Field(env=False)
+ field.__setkey__(schema, 'field')
+ assert field.env is False
+
+ @patch.object(os.environ, 'get')
+ def test_setdefault_env_exists(self, mock_environ_get):
+ retval = mock_environ_get.return_value = object()
+ cfg = BaseConfig(schema=BaseSchema())
+ field = Field(env='ASDF', key='field')
+ field.__setdefault__(cfg)
+ assert cfg._data == {'field': retval}
+ mock_environ_get.assert_called_once_with('ASDF')
+
+ @patch.object(os.environ, 'get')
+ def test_setdefault_env_exists_valid(self, mock_environ_get):
+ env = mock_environ_get.return_value = object()
+ retval = object()
+ cfg = BaseConfig(schema=BaseSchema())
+ field = Field(env='ASDF', key='field')
+ field.validate = MagicMock(return_value=retval)
+ field.__setdefault__(cfg)
+ field.validate.assert_called_once_with(cfg, env)
+ assert cfg._data == {'field': retval}
+
+ @patch.object(os.environ, 'get')
+ def test_setdefault_env_exists_invalid(self, mock_environ_get):
+ env = mock_environ_get.return_value = object()
+ retval = object()
+ cfg = BaseConfig(schema=BaseSchema())
+ field = Field(env='ASDF', key='field')
+ field.validate = MagicMock(side_effect=ValueError())
+ field._default = retval
+ with pytest.raises(ValidationError):
+ field.__setdefault__(cfg)
+
+ field.validate.assert_called_once_with(cfg, env)
+
+ @patch.object(os.environ, 'get')
+ def test_setdefault_env_exists_invalid_validationerror(self, mock_environ_get):
+ env = mock_environ_get.return_value = object()
+ retval = object()
+ cfg = BaseConfig(schema=BaseSchema())
+ field = Field(env='ASDF', key='field')
+ err = ValidationError(cfg, field, ValueError('asdf'))
+ field.validate = MagicMock(side_effect=err)
+ field._default = retval
+ with pytest.raises(ValidationError) as exc:
+ field.__setdefault__(cfg)
+
+ assert exc.value is err
+
+ @patch.object(os.environ, 'get')
+ def test_setdefault_env_not_exists(self, mock_environ_get):
+ mock_environ_get.return_value = None
+ retval = object()
+ cfg = BaseConfig(schema=BaseSchema())
+ field = Field(env='ASDF', key='field')
+ field._default = retval
+ field.__setdefault__(cfg)
+ assert cfg._data == {'field': retval}
+ mock_environ_get.assert_called_once_with('ASDF')
diff --git a/tests/test_fields/test_string.py b/tests/test_fields/test_string.py
index 9f9207d..858d069 100644
--- a/tests/test_fields/test_string.py
+++ b/tests/test_fields/test_string.py
@@ -24,6 +24,7 @@ class MockSchema:
def __init__(self):
self._fields = {}
+ self._env_prefix = False
def _add_field(self, name, field):
self._fields[name] = field
diff --git a/tests/test_schema.py b/tests/test_schema.py
index 5ea55ba..074385e 100644
--- a/tests/test_schema.py
+++ b/tests/test_schema.py
@@ -116,6 +116,13 @@ class TestSchema:
with pytest.raises(KeyError):
y = schema['x.y']
+ def test_getattr_add_field(self):
+ schema = Schema()
+ mock_add_field = MagicMock(return_value=Schema())
+ object.__setattr__(schema, '_add_field', mock_add_field)
+ schema.x.y = 2
+ mock_add_field.assert_called_once()
+
@patch('cincoconfig.config.ArgumentParser')
def test_generate_argparse_parser(self, mock_argparse):
parser = MagicMock()
| Field Environment Variable
Create a new attribute for the `Field` class, `Field.env`, that specifies the environment variable that overrides the config value. The env variable would override both the `Field.default` and any values loaded from a configuration file. So, if a configuration file sets a field to `X` but the corresponding environment variable is set to `Y`, the config's value would be `Y` (the env variable.)
To make it easier, there will be a helper value that will autogenerate the environment variable based on the `Field.key`:
```python
# these two lines are equivalent
schema.db.host = HostnameField(env='DB_HOST')
schema.db.host = HostnameField(env=True)
``` | 0.0 | 7ec669a672397989b7d7131a987d1e6e8faa9f4e | [
"tests/test_baseschema.py::TestBaseSchema::test_env_true",
"tests/test_baseschema.py::TestBaseSchema::test_setkey_inherit_env",
"tests/test_baseschema.py::TestBaseSchema::test_setkey_inherit_env_append",
"tests/test_baseschema.py::TestBaseSchema::test_setkey_env_false",
"tests/test_config.py::TestConfig::test_load_tree_ignore_env",
"tests/test_fields/test_field.py::TestBaseField::test_env_true",
"tests/test_fields/test_field.py::TestBaseField::test_setkey_inherit_env",
"tests/test_fields/test_field.py::TestBaseField::test_setkey_inherit_env_append",
"tests/test_fields/test_field.py::TestBaseField::test_setkey_env_false",
"tests/test_fields/test_field.py::TestBaseField::test_setdefault_env_exists",
"tests/test_fields/test_field.py::TestBaseField::test_setdefault_env_exists_valid",
"tests/test_fields/test_field.py::TestBaseField::test_setdefault_env_exists_invalid",
"tests/test_fields/test_field.py::TestBaseField::test_setdefault_env_exists_invalid_validationerror",
"tests/test_fields/test_field.py::TestBaseField::test_setdefault_env_not_exists",
"tests/test_schema.py::TestSchema::test_getattr_add_field"
]
| [
"tests/test_baseschema.py::TestBaseSchema::test_setkey",
"tests/test_baseschema.py::TestBaseSchema::test_add_field_field",
"tests/test_baseschema.py::TestBaseSchema::test_add_field_schema",
"tests/test_baseschema.py::TestBaseSchema::test_add_field_other",
"tests/test_baseschema.py::TestBaseSchema::test_get_field_exists",
"tests/test_baseschema.py::TestBaseSchema::test_get_field_no_exists",
"tests/test_config.py::TestConfig::test_setdefault",
"tests/test_config.py::TestConfig::test_subschema",
"tests/test_config.py::TestConfig::test_setattr_field",
"tests/test_config.py::TestConfig::test_setattr_dynamic",
"tests/test_config.py::TestConfig::test_setattr_non_dynamic",
"tests/test_config.py::TestConfig::test_setattr_config_dict",
"tests/test_config.py::TestConfig::test_setattr_value",
"tests/test_config.py::TestConfig::test_setattr_config_no_dict",
"tests/test_config.py::TestConfig::test_getitem",
"tests/test_config.py::TestConfig::test_to_tree",
"tests/test_config.py::TestConfig::test_to_tree_include_virtual",
"tests/test_config.py::TestConfig::test_to_tree_exclude_virtual",
"tests/test_config.py::TestConfig::test_to_tree_empty_mask_secure",
"tests/test_config.py::TestConfig::test_to_tree_sensitive_mask_single",
"tests/test_config.py::TestConfig::test_to_tree_sensitive_mask_multi",
"tests/test_config.py::TestConfig::test_to_tree_sensitive_mask_empty",
"tests/test_config.py::TestConfig::test_dumps_to_tree_args",
"tests/test_config.py::TestConfig::test_iter",
"tests/test_config.py::TestConfig::test_getattr_error",
"tests/test_config.py::TestConfig::test_getattr_dynamic",
"tests/test_config.py::TestConfig::test_setitem",
"tests/test_config.py::TestConfig::test_dumps",
"tests/test_config.py::TestConfig::test_loads",
"tests/test_config.py::TestConfig::test_load",
"tests/test_config.py::TestConfig::test_save",
"tests/test_config.py::TestConfig::test_version",
"tests/test_config.py::TestConfig::test_getitem_nested",
"tests/test_config.py::TestConfig::test_setitem_nested",
"tests/test_config.py::TestConfig::test_include_field",
"tests/test_config.py::TestConfig::test_nested_include",
"tests/test_config.py::TestConfig::test_set_config",
"tests/test_config.py::TestConfig::test_validate",
"tests/test_config.py::TestConfig::test_load_tree_validate",
"tests/test_config.py::TestConfig::test_validator",
"tests/test_config.py::TestConfig::test_cmdline_args_override",
"tests/test_config.py::TestConfig::test_cmdline_args_ocverride_single_ignore",
"tests/test_config.py::TestConfig::test_in_flat",
"tests/test_config.py::TestConfig::test_in_nested",
"tests/test_config.py::TestConfig::test_not_in",
"tests/test_config.py::TestConfig::test_in_not_config",
"tests/test_config.py::TestConfig::test_wrap_validation_error",
"tests/test_config.py::TestConfig::test_validation_error_str",
"tests/test_config.py::TestConfig::test_setattr_validation_error_reraise",
"tests/test_config.py::TestConfig::test_valdiate_wrap_validation_error",
"tests/test_config.py::TestConfig::test_validate_reraise_validation_error",
"tests/test_config.py::TestConfig::test_save_expanduser",
"tests/test_config.py::TestConfig::test_load_expanduser",
"tests/test_fields/test_field.py::TestBaseField::test_default_value",
"tests/test_fields/test_field.py::TestBaseField::test_default_callable",
"tests/test_fields/test_field.py::TestBaseField::test_name",
"tests/test_fields/test_field.py::TestBaseField::test_key_name",
"tests/test_fields/test_field.py::TestBaseField::test_setval",
"tests/test_fields/test_field.py::TestBaseField::test_getval",
"tests/test_fields/test_field.py::TestBaseField::test_setkey",
"tests/test_fields/test_field.py::TestBaseField::test_setdefault",
"tests/test_fields/test_field.py::TestBaseField::test_to_python",
"tests/test_fields/test_field.py::TestBaseField::test_to_basic",
"tests/test_fields/test_field.py::TestBaseField::test__validate",
"tests/test_fields/test_field.py::TestBaseField::test_required",
"tests/test_fields/test_field.py::TestBaseField::test_not_required",
"tests/test_fields/test_field.py::TestBaseField::test_validate_value",
"tests/test_fields/test_field.py::TestBaseField::test_validate_validator_valid",
"tests/test_fields/test_field.py::TestBaseField::test_validate_validator_invalid",
"tests/test_fields/test_field.py::TestBaseField::test_friendly_name_with_name",
"tests/test_fields/test_field.py::TestBaseField::test_friendly_name_same",
"tests/test_fields/test_field.py::TestBaseField::test_friendly_name_no_name",
"tests/test_fields/test_field.py::TestBaseField::test_full_path_flat",
"tests/test_fields/test_field.py::TestBaseField::test_full_path_nested",
"tests/test_fields/test_field.py::TestBaseField::test_short_help_none",
"tests/test_fields/test_field.py::TestBaseField::test_short_help_everything",
"tests/test_fields/test_field.py::TestBaseField::test_short_help_paragraph",
"tests/test_fields/test_string.py::TestStringField::test_invalid_case",
"tests/test_fields/test_string.py::TestStringField::test_case_lower",
"tests/test_fields/test_string.py::TestStringField::test_case_upper",
"tests/test_fields/test_string.py::TestStringField::test_case_preserve",
"tests/test_fields/test_string.py::TestStringField::test_min_length_valid",
"tests/test_fields/test_string.py::TestStringField::test_min_length_invalid",
"tests/test_fields/test_string.py::TestStringField::test_max_length_valid",
"tests/test_fields/test_string.py::TestStringField::test_max_length_invalid",
"tests/test_fields/test_string.py::TestStringField::test_regex_match",
"tests/test_fields/test_string.py::TestStringField::test_regex_no_match",
"tests/test_fields/test_string.py::TestStringField::test_strip_preserve",
"tests/test_fields/test_string.py::TestStringField::test_strip_whitespace",
"tests/test_fields/test_string.py::TestStringField::test_strip_custom",
"tests/test_fields/test_string.py::TestStringField::test_choice_valid",
"tests/test_fields/test_string.py::TestStringField::test_choice_invalid",
"tests/test_fields/test_string.py::TestStringField::test_choice_lower_valid",
"tests/test_fields/test_string.py::TestStringField::test_choice_error_message_list",
"tests/test_fields/test_string.py::TestStringField::test_choice_error_message_too_many",
"tests/test_fields/test_string.py::TestStringField::test_non_string",
"tests/test_fields/test_string.py::TestStringField::test_empty_string_requied",
"tests/test_fields/test_string.py::TestStringField::test_empty_string_not_required",
"tests/test_fields/test_string.py::TestLogLevelField::test_default_levels",
"tests/test_fields/test_string.py::TestLogLevelField::test_custom_levels",
"tests/test_fields/test_string.py::TestLogLevelField::test_custom_case",
"tests/test_fields/test_string.py::TestLogLevelField::test_custom_strip",
"tests/test_fields/test_string.py::TestApplicationModeField::test_default_levels",
"tests/test_fields/test_string.py::TestApplicationModeField::test_create_helpers",
"tests/test_fields/test_string.py::TestApplicationModeField::test_call_helpers",
"tests/test_fields/test_string.py::TestApplicationModeField::test_no_helpers",
"tests/test_fields/test_string.py::TestApplicationModeField::test_invalid_mode_name[he",
"tests/test_fields/test_string.py::TestApplicationModeField::test_invalid_mode_name[hel-lo]",
"tests/test_fields/test_string.py::TestApplicationModeField::test_invalid_mode_name[$hello]",
"tests/test_fields/test_string.py::TestApplicationModeField::test_invalid_mode_name[>hello]",
"tests/test_fields/test_string.py::TestUrlField::test_valid_url",
"tests/test_fields/test_string.py::TestUrlField::test_invalid_url",
"tests/test_schema.py::TestSchema::test_setattr_field",
"tests/test_schema.py::TestSchema::test_getattr",
"tests/test_schema.py::TestSchema::test_getattr_new",
"tests/test_schema.py::TestSchema::test_iter",
"tests/test_schema.py::TestSchema::test_call",
"tests/test_schema.py::TestSchema::test_make_type",
"tests/test_schema.py::TestSchema::test_instance_method_decorator",
"tests/test_schema.py::TestSchema::test_validate_ignore_methods",
"tests/test_schema.py::TestSchema::test_get_all_fields",
"tests/test_schema.py::TestSchema::test_getitem",
"tests/test_schema.py::TestSchema::test_getitem_keyerror",
"tests/test_schema.py::TestSchema::test_getitem_keyerror_not_schema",
"tests/test_schema.py::TestSchema::test_generate_argparse_parser"
]
| {
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2021-02-07 16:36:03+00:00 | isc | 1,073 |
|
ameserole__Akeso-14 | diff --git a/Akeso/AttackWorkers.py b/Akeso/AttackWorkers.py
index 97afaaf..87def44 100644
--- a/Akeso/AttackWorkers.py
+++ b/Akeso/AttackWorkers.py
@@ -9,14 +9,6 @@ from ServiceManager import ServiceInfo
logger = structlog.get_logger()
-def challenge_mapper(challenge):
- return {
- 'maze': ('maze', 'mazeAttack', 'maze', 31337),
- 'SQL': ('sqlisimple', 'SQLi', 'SQLiSimple', 80),
- 'shell': ('shell', 'shellAttack', 'shell', 4001),
- }[challenge]
-
-
def attackCallback(ch, method, properties, body):
"""Pull service off of attack queue and run selected attack against it"""
@@ -26,7 +18,7 @@ def attackCallback(ch, method, properties, body):
resultChannel.queue_declare(queue='resultQueue', durable=True)
body = json.loads(body)
- info = challenge_mapper(body['chal'])
+ info = config.challenge_mapper(body['chal'])
if 'serviceName' in body:
serviceName = body['serviceName']
@@ -37,8 +29,8 @@ def attackCallback(ch, method, properties, body):
'serviceName': serviceName,
'imageName': info[0],
'userInfo': body['userInfo'],
- 'exploitModule': info[1],
- 'serviceCheckName': info[2],
+ 'exploitModules': info[1],
+ 'serviceCheckNames': info[2],
'serviceHost': body['serviceHost'],
'servicePort': info[3]
}
@@ -51,48 +43,69 @@ def attackCallback(ch, method, properties, body):
log = logger.bind(service=service.__dict__)
userMsg = "Starting Attack on {} {}\n".format(service.imageName, service.userInfo)
-
- # Get the Service module for this service and check that it is running correctly
- serviceCheckModuleName = 'Services.' + service.serviceCheckName + '.' + service.serviceCheckName
- serviceModule = importlib.import_module(serviceCheckModuleName, package=None)
- serviceCheckObject = serviceModule.ServiceCheck(service)
-
- if serviceCheckObject.checkService():
- log.info('attackCallback', msg="Service Check Succeeded")
- userMsg = "Service Check Succeeded"
- else:
- log.info('attackCallback', msg="Service Check Failed")
- userMsg = "Service Check Failed"
- resultChannel.basic_publish(exchange='resultX',
- routing_key=str(service.userInfo),
- body=json.dumps({'msg': userMsg, 'service': service.__dict__}))
- ch.basic_ack(delivery_tag=method.delivery_tag)
- return -1
-
- # If the service is running correctly grab the selected exploit module and run it against the current service
- exploitModuleName = 'Exploits.' + service.exploitModule
- exploitModule = importlib.import_module(exploitModuleName, package=None)
- exploitObject = exploitModule.Exploit(service)
- exploitObject.exploit()
-
- exploitSuccess = exploitObject.exploitSuccess()
-
- if exploitSuccess:
- userMsg = "Your Code/Config was exploited."
- log.info("attackCallback", msg="Exploit Success")
- resultChannel.basic_publish(exchange='resultX',
- routing_key=str(service.userInfo),
- body=json.dumps({'msg': userMsg, 'service': service.__dict__}))
-
- ch.basic_ack(delivery_tag=method.delivery_tag)
- return -1
-
- else:
- userMsg = "Attack Failed"
- log.info("attackCallback", msg=userMsg)
+ for serviceCheckName in service.serviceCheckNames:
+ # Get the Service module for this service and check that it is running correctly
+ serviceCheckModuleName = 'Services.' + serviceCheckName + '.' + serviceCheckName
+ serviceModule = importlib.import_module(serviceCheckModuleName, package=None)
+ serviceCheckObject = serviceModule.ServiceCheck(service)
+
+ if serviceCheckObject.checkService():
+ log.info('attackCallback', msg="Service Check Succeeded")
+ userMsg = "Service Check Succeeded"
+ else:
+ log.info('attackCallback', msg="Service Check Failed")
+ userMsg = "Service Check Failed"
+ resultChannel.basic_publish(exchange='resultX',
+ routing_key=str(service.userInfo),
+ body=json.dumps({'msg': userMsg, 'service': service.__dict__}))
+ ch.basic_ack(delivery_tag=method.delivery_tag)
+ return -1
+
+ for exploitModule in service.exploitModules:
+ # If the service is running correctly grab the selected exploit module and run it against the current service
+ exploitModuleName = 'Exploits.' + exploitModule
+ exploitModule = importlib.import_module(exploitModuleName, package=None)
+ exploitObject = exploitModule.Exploit(service)
+ exploitObject.exploit()
+
+ exploitSuccess = exploitObject.exploitSuccess()
+
+ if exploitSuccess:
+ userMsg = "Your Code/Config was exploited."
+ log.info("attackCallback", msg="Exploit Success")
+ resultChannel.basic_publish(exchange='resultX',
+ routing_key=str(service.userInfo),
+ body=json.dumps({'msg': userMsg, 'service': service.__dict__}))
+
+ ch.basic_ack(delivery_tag=method.delivery_tag)
+ return -1
+
+ else:
+ userMsg = "Attack Failed"
+ log.info("attackCallback", msg=userMsg)
# Check to see if the service is still up after the exploit was run
- checkService = serviceCheckObject.checkService()
+ # checkService = serviceCheckObject.checkService()
+
+ checkService = False
+ for serviceCheckName in service.serviceCheckNames:
+ # Get the Service module for this service and check that it is running correctly
+ serviceCheckModuleName = 'Services.' + serviceCheckName + '.' + serviceCheckName
+ serviceModule = importlib.import_module(serviceCheckModuleName, package=None)
+ serviceCheckObject = serviceModule.ServiceCheck(service)
+
+ checkService = serviceCheckObject.checkService()
+ if checkService:
+ log.info('attackCallback', msg="Service Check Succeeded")
+ userMsg = "Service Check Succeeded"
+ else:
+ log.info('attackCallback', msg="Service Check Failed After Attack")
+ userMsg = "Service Check Failed"
+ resultChannel.basic_publish(exchange='resultX',
+ routing_key=str(service.userInfo),
+ body=json.dumps({'msg': userMsg, 'service': service.__dict__}))
+ ch.basic_ack(delivery_tag=method.delivery_tag)
+ return -1
# If Service is still up and exploit did not work return the flag to the user
if not exploitSuccess and checkService:
@@ -102,17 +115,6 @@ def attackCallback(ch, method, properties, body):
routing_key=str(service.userInfo),
body=json.dumps({'msg': userMsg, 'service': service.__dict__}))
- # No flag for you :(
- elif not exploitSuccess and not checkService:
- log.info('attackCallback', msg="Service Check Failed After Attack")
- userMsg = "Service Check Failed After Attack"
- resultChannel.basic_publish(exchange='resultX',
- routing_key=str(service.userInfo),
- body=json.dumps({'msg': userMsg, 'service': service.__dict__}))
-
- ch.basic_ack(delivery_tag=method.delivery_tag)
- return -1
-
ch.basic_ack(delivery_tag=method.delivery_tag)
return 1
diff --git a/Akeso/ServiceManager.py b/Akeso/ServiceManager.py
index 503e486..6557d8d 100644
--- a/Akeso/ServiceManager.py
+++ b/Akeso/ServiceManager.py
@@ -10,6 +10,6 @@ class ServiceInfo(object):
self.imageName = info['imageName']
self.serviceHost = info['serviceHost']
self.servicePort = info['servicePort']
- self.exploitModule = info['exploitModule']
- self.serviceCheckName = info['serviceCheckName']
+ self.exploitModules = info['exploitModules']
+ self.serviceCheckNames = info['serviceCheckNames']
self.userInfo = info['userInfo']
diff --git a/Akeso/config.py b/Akeso/config.py
index 2e6e884..a1718c7 100644
--- a/Akeso/config.py
+++ b/Akeso/config.py
@@ -8,3 +8,12 @@ SERVICE_PATH = os.path.join(os.getcwd(), 'Services/')
# Address of the RabbitMQ server
RABBITMQ_SERVER = '172.17.0.2'
+
+
+def challenge_mapper(challenge):
+ return {
+ 'maze': ('maze', ['mazeAttack'], ['maze'], 31337),
+ 'SQL': ('sqlisimple', ['SQLi'], ['SQLiSimple'], 80),
+ 'shell': ('shell', ['shellAttack'], ['shell'], 4001),
+ 'nginx': ('nginx', ['DirectoryTraversal'], ['ApacheDirectoryTraversal'], 80)
+ }[challenge]
| ameserole/Akeso | 3dff3ad4c12918a3fc353f8a9a7a4bea461e8da3 | diff --git a/tests/conftest.py b/tests/conftest.py
index 3b56e0c..b11e9b4 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -10,8 +10,8 @@ def fake_service():
'volumeLocation': 'fakeVolume',
'serviceHost': '127.0.0.1',
'servicePort': 80,
- 'exploitModule': 'fakeExploit',
- 'serviceCheckName': 'fakeCheck',
+ 'exploitModules': ['fakeExploit'],
+ 'serviceCheckNames': ['fakeCheck'],
'userInfo': '1'}
return ServiceInfo(fakeInfo)
diff --git a/tests/test_ServiceManager.py b/tests/test_ServiceManager.py
index c714d7b..28c991f 100644
--- a/tests/test_ServiceManager.py
+++ b/tests/test_ServiceManager.py
@@ -7,8 +7,8 @@ def test_ServiceInfo():
'imageName': 'fakeImage',
'serviceHost': '127.0.0.1',
'servicePort': 80,
- 'exploitModule': 'fakeExploit',
- 'serviceCheckName': 'fakeCheck',
+ 'exploitModules': ['fakeExploit'],
+ 'serviceCheckNames': ['fakeCheck'],
'userInfo': 'fakeInfo'}
service = ServiceInfo(fakeInfo)
diff --git a/tests/test_config.py b/tests/test_config.py
new file mode 100644
index 0000000..6434cad
--- /dev/null
+++ b/tests/test_config.py
@@ -0,0 +1,13 @@
+import pytest
+from Akeso.config import challenge_mapper
+
+
[email protected]("challenge, expected_ret", [
+ ('maze', ('maze', ['mazeAttack'], ['maze'], 31337)),
+ ('SQL', ('sqlisimple', ['SQLi'], ['SQLiSimple'], 80)),
+ ('shell', ('shell', ['shellAttack'], ['shell'], 4001)),
+ ('nginx', ('nginx', ['DirectoryTraversal'], ['ApacheDirectoryTraversal'], 80))
+])
+def test_challenge_mapper(challenge, expected_ret):
+ ret = challenge_mapper(challenge)
+ assert ret == expected_ret
| Add Support for Multiple Service and Exploit Checks
Currently only allows for one Service and Exploit check at a time. | 0.0 | 3dff3ad4c12918a3fc353f8a9a7a4bea461e8da3 | [
"tests/test_ServiceManager.py::test_ServiceInfo",
"tests/test_config.py::test_challenge_mapper[maze-expected_ret0]",
"tests/test_config.py::test_challenge_mapper[SQL-expected_ret1]",
"tests/test_config.py::test_challenge_mapper[shell-expected_ret2]",
"tests/test_config.py::test_challenge_mapper[nginx-expected_ret3]"
]
| []
| {
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2018-05-30 13:48:44+00:00 | mit | 1,074 |
|
amgedr__webchk-21 | diff --git a/setup.py b/setup.py
index 2dd5cd5..456ffbf 100644
--- a/setup.py
+++ b/setup.py
@@ -11,7 +11,7 @@ setup(
version='1.1.0',
packages=['webchk'],
test_suite='test',
- url='https://codehill.com/projects/webchk',
+ url='https://webchk.codehill.com',
license="MIT license",
author='Amged Rustom',
author_email='[email protected]',
diff --git a/webchk/__init__.py b/webchk/__init__.py
index 12f478c..9997150 100644
--- a/webchk/__init__.py
+++ b/webchk/__init__.py
@@ -2,4 +2,4 @@ __version__ = '1.1.0'
__cmd_description__ = """Check HTTP status codes, response headers and redirects.
This is free software and it comes with absolutely no warranty.
You can distribute and modify it under terms of MIT License.
-Homepage: https://codehill.com/projects/webchk"""
+Homepage: https://webchk.codehill.com"""
diff --git a/webchk/__main__.py b/webchk/__main__.py
index ff2671f..f35ff14 100644
--- a/webchk/__main__.py
+++ b/webchk/__main__.py
@@ -1,20 +1,18 @@
import sys
import threading
-from .utils import get_parser, read_input_file
+from .utils import get_parser, read_input_file, format_headers
from .http import http_response, HTTPRequests
from . import __version__
-def _process_url(url, requests, get_request):
- resp = http_response(
- url=url,
- timeout=requests.timeout,
- parse=requests.parse_xml,
- get_request=get_request,
- )
+def _process_url(url, requests: HTTPRequests):
+ resp = http_response(url, requests)
print(resp, file=requests.output_file)
+ if requests.show_headers:
+ print('{}\n'.format(format_headers(resp.headers)))
+
follow = resp.redirect
while follow:
print(' {}'.format(follow), file=requests.output_file)
@@ -44,11 +42,8 @@ def process_urls(requests: HTTPRequests):
continue
thread = threading.Thread(
- target=_process_url, args=(
- url,
- requests,
- requests.get_request,
- )
+ target=_process_url,
+ args=(url, requests)
)
thread.start()
threads.append(thread)
@@ -77,7 +72,10 @@ def main():
list_only=args.list,
parse_xml=args.parse,
timeout=args.timeout,
+ show_headers=args.all,
get_request=args.get,
+ user_agent=args.agent,
+ auth=args.auth,
)
if args.urls:
diff --git a/webchk/http.py b/webchk/http.py
index b6e0528..85491ea 100644
--- a/webchk/http.py
+++ b/webchk/http.py
@@ -1,20 +1,33 @@
-import collections
import http.client
from urllib.parse import urlparse
import socket
import ssl
+import sys
import timeit
+from . import __version__
from webchk.utils import urls_from_xml
-HTTPRequests = collections.namedtuple(
- 'HTTPRequests',
- [
- 'urls', 'output_file', 'list_only', 'parse_xml', 'timeout',
- 'get_request',
- ]
-)
+class HTTPRequests:
+ def __init__(self, urls, output_file=sys.stdout, list_only=False,
+ parse_xml=False, timeout=3, show_headers=False,
+ headers=None, get_request=False, auth=None,
+ user_agent=None) -> None:
+ self.urls = urls
+ self.output_file = output_file
+ self.list_only = list_only
+ self.parse_xml = parse_xml
+ self.timeout = timeout
+ self.get_request = get_request
+ self.show_headers = show_headers
+
+ self.headers = headers if headers is not None else {}
+ if not user_agent:
+ user_agent = f'webchk v{__version__}'
+ self.headers['User-Agent'] = user_agent
+ if auth:
+ self.headers['Authorization'] = auth
class Result:
@@ -79,16 +92,14 @@ def _http_connect(loc, timeout):
return http.client.HTTPConnection(loc.netloc, timeout=timeout)
-def _http_request(loc, timeout, get_request=False):
- """Performs a HTTP request and return response in a Result object.
-
- Does a HEAD HTTP request if get_request is False and GET if True.
- """
+def _http_request(loc, req: HTTPRequests):
+ """Performs a HTTP request and return response in a Result object."""
+ conn = None
try:
- conn = _http_connect(loc, timeout)
- method = 'GET' if get_request else 'HEAD'
+ conn = _http_connect(loc, req.timeout)
+ method = 'GET' if req.get_request or req.parse_xml else 'HEAD'
- conn.request(method, loc.path)
+ conn.request(method, loc.path, headers=req.headers)
resp = conn.getresponse()
result = Result(loc.geturl())
@@ -97,17 +108,19 @@ def _http_request(loc, timeout, get_request=False):
result.fill_headers(resp.getheaders())
# status code is not 204 (no content) and not a redirect
- if get_request and resp.status not in (204, 301, 302, 303, 307, 308):
- result.content = resp.read().decode('utf-8')
+ is_not_redirect = resp.status not in (204, 301, 302, 303, 307, 308)
+ if (req.get_request or req.parse_xml) and is_not_redirect:
+ result.content = resp.read()
except TimeoutError:
raise
finally:
- conn.close()
+ if conn:
+ conn.close()
return result
-def http_response(url, timeout, parse=False, get_request=False):
+def http_response(url, requests: HTTPRequests):
"""Returns the HTTP response code.
If the response code is a temporary or permanent redirect then it
@@ -125,10 +138,7 @@ def http_response(url, timeout, parse=False, get_request=False):
try:
start = timeit.default_timer()
- # true if user wants HTTP GET or asked for the content to be parsed
- force_get = get_request or (parse and url.endswith('.xml'))
-
- result = _http_request(loc, timeout, get_request=force_get)
+ result = _http_request(loc, requests)
result.latency = '{:2.3}'.format(timeit.default_timer() - start)
if 400 <= result.status < 500:
@@ -152,25 +162,26 @@ def http_response(url, timeout, parse=False, get_request=False):
if new_url.startswith('/'):
new_url = '{}://{}{}'.format(
loc.scheme, loc.netloc, new_url)
- result.redirect = http_response(
- new_url, timeout, parse=parse, get_request=get_request)
+ result.redirect = http_response(new_url, requests)
- if result.content and parse:
+ if result.content and requests.parse_xml:
+ requests.parse_xml = False
sitemap = urls_from_xml(result.content)
result.sitemap_urls = []
for s_url in sitemap:
# some sites include the sitemap's url in the sitemap
if s_url == result.url:
continue
- result.sitemap_urls.append(
- http_response(s_url, timeout, get_request=get_request))
+ result.sitemap_urls.append(http_response(s_url, requests))
except socket.gaierror:
result.desc = 'Could not resolve'
except (TimeoutError, socket.timeout):
result.desc = 'Operation timed out'
- except (http.client.RemoteDisconnected) as exc:
+ except http.client.RemoteDisconnected as exc:
result.desc = str(exc)
+ except http.client.InvalidURL:
+ result.desc = 'Invalid URL'
except (ConnectionRefusedError, ConnectionResetError) as exc:
result.desc = exc.strerror
except ssl.SSLCertVerificationError as exc:
diff --git a/webchk/utils.py b/webchk/utils.py
index e8508cf..2343445 100644
--- a/webchk/utils.py
+++ b/webchk/utils.py
@@ -29,6 +29,18 @@ def get_parser():
help='Perform HTTP GET request instead of HEAD',
action='store_true',
)
+ parser.add_argument(
+ '--auth',
+ help='Set Authentication header',
+ type=str,
+ default='',
+ )
+ parser.add_argument(
+ '--agent',
+ help='Set a custom user-agent',
+ type=str,
+ default='',
+ )
parser.add_argument(
'-l', '--list',
help='Print URLs without checking them',
@@ -77,3 +89,14 @@ def urls_from_xml(data):
if j.tag.endswith("loc"):
urls.append(j.text.strip())
return urls
+
+
+def format_headers(headers: dict) -> str:
+ if not isinstance(headers, dict):
+ raise ValueError
+
+ indent = ' '
+ formatted = []
+ for key, val in headers.items():
+ formatted.append('{}{}: {}'.format(indent, key, val))
+ return '\n'.join(formatted)
| amgedr/webchk | 706ba64c9000900f6937d4131856e33da3fca99e | diff --git a/test/test_http.py b/test/test_http.py
index af6ac8e..f1a16da 100644
--- a/test/test_http.py
+++ b/test/test_http.py
@@ -1,6 +1,6 @@
import unittest
-from webchk.http import http_response, parse_url
+from webchk.http import http_response, parse_url, HTTPRequests
TIMEOUT = 3
@@ -26,12 +26,15 @@ class Http(unittest.TestCase):
def test_http_response(self):
for url, result in self.urls.items():
- resp_code = http_response(url, TIMEOUT).status
+ req = HTTPRequests(url, timeout=TIMEOUT)
+ resp_code = http_response(url, req).status
self.assertEqual(resp_code, result[0], url)
def test_redirect_follows(self):
url = 'https://httpstat.us/307'
- resp = http_response(url, TIMEOUT)
+
+ req = HTTPRequests(url, timeout=TIMEOUT)
+ resp = http_response(url, req)
total = 0
while resp.redirect:
fmt = '{} ... {} {} ({})'.format(
@@ -42,10 +45,12 @@ class Http(unittest.TestCase):
self.assertEqual(total, 1)
def test_unresolvable_domains(self):
- resp = http_response('http://!.c', TIMEOUT)
+ req = HTTPRequests(None, timeout=TIMEOUT)
+ resp = http_response('http://!.c', req)
self.assertEqual(str(resp), 'http://!.c ... Could not resolve')
def test_timeouts(self):
url = 'http://httpbin.org/delay/5'
- resp = http_response(url, timeout=1)
+ req = HTTPRequests(url, timeout=1)
+ resp = http_response(url, req)
self.assertEqual(resp.desc, 'Operation timed out')
diff --git a/test/test_utils.py b/test/test_utils.py
index 496022e..2ca9591 100644
--- a/test/test_utils.py
+++ b/test/test_utils.py
@@ -1,6 +1,8 @@
import unittest
-from webchk.utils import read_input_file, get_parser, urls_from_xml
+from webchk.utils import (
+ read_input_file, get_parser, urls_from_xml, format_headers
+)
class FileIOTest(unittest.TestCase):
@@ -46,3 +48,29 @@ class XmlParserTest(unittest.TestCase):
]
urls = urls_from_xml(self.xml)
self.assertEqual(urls, urls_list)
+
+
+class HeadersFormatterTest(unittest.TestCase):
+ def test_valid_headers(self):
+ cases = {
+ ' Connection: keep-alive\n Content-Length: 5386':
+ {
+ 'Connection': 'keep-alive',
+ 'Content-Length': '5386',
+ },
+ ' Cache-Control: no-cache\n Content-Type: text/html':
+ {
+ 'Cache-Control': 'no-cache',
+ 'Content-Type': 'text/html',
+ }
+ }
+
+ for expected, case in cases.items():
+ self.assertEqual(format_headers(case), expected)
+
+ def test_invalid_value(self):
+ cases = [[], 123, 'abc']
+
+ for case in cases:
+ with self.assertRaises(ValueError):
+ format_headers(case)
| Add option to specify a header for all requests
I would like to be able to specify an authentication header for all of the URLs in my list. Could this feature be added? Thanks! | 0.0 | 706ba64c9000900f6937d4131856e33da3fca99e | [
"test/test_http.py::Http::test_parse_url",
"test/test_http.py::Http::test_redirect_follows",
"test/test_http.py::Http::test_timeouts",
"test/test_http.py::Http::test_unresolvable_domains",
"test/test_utils.py::FileIOTest::test_read_input_file",
"test/test_utils.py::CommandParserTest::test_strings",
"test/test_utils.py::XmlParserTest::test_parse_xml_data",
"test/test_utils.py::HeadersFormatterTest::test_invalid_value",
"test/test_utils.py::HeadersFormatterTest::test_valid_headers"
]
| []
| {
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
} | 2022-02-18 03:22:33+00:00 | mit | 1,075 |
|
amperser__proselint-1212 | diff --git a/proselint/.proselintrc b/proselint/.proselintrc
deleted file mode 100644
index 5a87592..0000000
--- a/proselint/.proselintrc
+++ /dev/null
@@ -1,85 +0,0 @@
-{
- "max_errors": 1000,
- "checks": {
- "airlinese.misc" : true,
- "annotations.misc" : true,
- "archaism.misc" : true,
- "cliches.hell" : true,
- "cliches.misc" : true,
- "consistency.spacing" : true,
- "consistency.spelling" : true,
- "corporate_speak.misc" : true,
- "cursing.filth" : true,
- "cursing.nfl" : false,
- "cursing.nword" : true,
- "dates_times.am_pm" : true,
- "dates_times.dates" : true,
- "hedging.misc" : true,
- "hyperbole.misc" : true,
- "jargon.misc" : true,
- "lexical_illusions.misc" : true,
- "lgbtq.offensive_terms" : true,
- "lgbtq.terms" : true,
- "links.broken" : false,
- "malapropisms.misc" : true,
- "misc.apologizing" : true,
- "misc.back_formations" : true,
- "misc.bureaucratese" : true,
- "misc.but" : true,
- "misc.capitalization" : true,
- "misc.chatspeak" : true,
- "misc.commercialese" : true,
- "misc.composition" : true,
- "misc.currency" : true,
- "misc.debased" : true,
- "misc.false_plurals" : true,
- "misc.illogic" : true,
- "misc.inferior_superior" : true,
- "misc.institution_name" : true,
- "misc.latin" : true,
- "misc.many_a" : true,
- "misc.metaconcepts" : true,
- "misc.metadiscourse" : true,
- "misc.narcissism" : true,
- "misc.not_guilty" : true,
- "misc.phrasal_adjectives" : true,
- "misc.preferred_forms" : true,
- "misc.pretension" : true,
- "misc.professions" : true,
- "misc.punctuation" : true,
- "misc.scare_quotes" : true,
- "misc.suddenly" : true,
- "misc.tense_present" : true,
- "misc.waxed" : true,
- "misc.whence" : true,
- "mixed_metaphors.misc" : true,
- "mondegreens.misc" : true,
- "needless_variants.misc" : true,
- "nonwords.misc" : true,
- "oxymorons.misc" : true,
- "psychology.misc" : true,
- "redundancy.misc" : true,
- "redundancy.ras_syndrome" : true,
- "skunked_terms.misc" : true,
- "spelling.able_atable" : true,
- "spelling.able_ible" : true,
- "spelling.athletes" : true,
- "spelling.em_im_en_in" : true,
- "spelling.er_or" : true,
- "spelling.in_un" : true,
- "spelling.misc" : true,
- "security.credit_card" : true,
- "security.password" : true,
- "sexism.misc" : true,
- "terms.animal_adjectives" : true,
- "terms.denizen_labels" : true,
- "terms.eponymous_adjectives" : true,
- "terms.venery" : true,
- "typography.diacritical_marks" : true,
- "typography.exclamation" : true,
- "typography.symbols" : true,
- "uncomparables.misc" : true,
- "weasel_words.misc" : true,
- "weasel_words.very" : true
- }
-}
diff --git a/proselint/command_line.py b/proselint/command_line.py
index e4ff598..3cdeb2f 100644
--- a/proselint/command_line.py
+++ b/proselint/command_line.py
@@ -1,5 +1,6 @@
"""Command line utility for proselint."""
+import json
import os
import shutil
import subprocess
@@ -8,11 +9,12 @@ import traceback
import click
+from .config import default
from .tools import (close_cache_shelves, close_cache_shelves_after,
- errors_to_json, lint)
+ errors_to_json, lint, load_options)
from .version import __version__
-CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
+CONTEXT_SETTINGS = {"help_option_names": ['-h', '--help']}
base_url = "proselint.com/"
proselint_path = os.path.dirname(os.path.realpath(__file__))
demo_file = os.path.join(proselint_path, "demo.md")
@@ -95,11 +97,23 @@ def print_errors(filename, errors, output_json=False, compact=False):
@click.option('--time', '-t', is_flag=True, help="Time on a corpus.")
@click.option('--demo', is_flag=True, help="Run over demo file.")
@click.option('--compact', is_flag=True, help="Shorten output.")
[email protected]('--dump-config', is_flag=True, help="Prints current config.")
[email protected]('--dump-default-config', is_flag=True,
+ help="Prints default config.")
@click.argument('paths', nargs=-1, type=click.Path())
@close_cache_shelves_after
-def proselint(paths=None, config=None, version=None, clean=None, debug=None,
- output_json=None, time=None, demo=None, compact=None):
+def proselint(paths=None, config=None, version=None, clean=None,
+ debug=None, output_json=None, time=None, demo=None, compact=None,
+ dump_config=None, dump_default_config=None):
"""Create the CLI for proselint, a linter for prose."""
+ if dump_default_config:
+ return print(json.dumps(default, sort_keys=True, indent=4))
+
+ config = load_options(config, default)
+ if dump_config:
+ print(json.dumps(config, sort_keys=True, indent=4))
+ return
+
if time:
# click.echo(timing_test())
print("This option does not work for the time being.")
@@ -129,14 +143,13 @@ def proselint(paths=None, config=None, version=None, clean=None, debug=None,
f = sys.stdin
else:
try:
- f = click.open_file(
- fp, 'r', encoding="utf-8", errors="replace")
+ f = click.open_file(fp, 'r', "utf-8", "replace")
except Exception:
traceback.print_exc()
sys.exit(2)
- errors = lint(f, debug=debug, config_file_path=config)
+ errors = lint(f, debug, config)
num_errors += len(errors)
- print_errors(fp, errors, output_json, compact=compact)
+ print_errors(fp, errors, output_json, compact)
# Return an exit code
close_cache_shelves()
diff --git a/proselint/config.py b/proselint/config.py
new file mode 100644
index 0000000..7f00eb1
--- /dev/null
+++ b/proselint/config.py
@@ -0,0 +1,87 @@
+"""Proselint config - replacement for default .proselintrc since #1212."""
+
+default = {
+ "max_errors": 1000,
+ "checks": {
+ "airlinese.misc": True,
+ "annotations.misc": True,
+ "archaism.misc": True,
+ "cliches.hell": True,
+ "cliches.misc": True,
+ "consistency.spacing": True,
+ "consistency.spelling": True,
+ "corporate_speak.misc": True,
+ "cursing.filth": True,
+ "cursing.nfl": False,
+ "cursing.nword": True,
+ "dates_times.am_pm": True,
+ "dates_times.dates": True,
+ "hedging.misc": True,
+ "hyperbole.misc": True,
+ "jargon.misc": True,
+ "lexical_illusions.misc": True,
+ "lgbtq.offensive_terms": True,
+ "lgbtq.terms": True,
+ "links.broken": False,
+ "malapropisms.misc": True,
+ "misc.apologizing": True,
+ "misc.back_formations": True,
+ "misc.bureaucratese": True,
+ "misc.but": True,
+ "misc.capitalization": True,
+ "misc.chatspeak": True,
+ "misc.commercialese": True,
+ "misc.composition": True,
+ "misc.currency": True,
+ "misc.debased": True,
+ "misc.false_plurals": True,
+ "misc.illogic": True,
+ "misc.inferior_superior": True,
+ "misc.institution_name": True,
+ "misc.latin": True,
+ "misc.many_a": True,
+ "misc.metaconcepts": True,
+ "misc.metadiscourse": True,
+ "misc.narcissism": True,
+ "misc.not_guilty": True,
+ "misc.phrasal_adjectives": True,
+ "misc.preferred_forms": True,
+ "misc.pretension": True,
+ "misc.professions": True,
+ "misc.punctuation": True,
+ "misc.scare_quotes": True,
+ "misc.suddenly": True,
+ "misc.tense_present": True,
+ "misc.waxed": True,
+ "misc.whence": True,
+ "mixed_metaphors.misc": True,
+ "mondegreens.misc": True,
+ "needless_variants.misc": True,
+ "nonwords.misc": True,
+ "oxymorons.misc": True,
+ "psychology.misc": True,
+ "redundancy.misc": True,
+ "redundancy.ras_syndrome": True,
+ "skunked_terms.misc": True,
+ "spelling.able_atable": True,
+ "spelling.able_ible": True,
+ "spelling.athletes": True,
+ "spelling.em_im_en_in": True,
+ "spelling.er_or": True,
+ "spelling.in_un": True,
+ "spelling.misc": True,
+ "security.credit_card": True,
+ "security.password": True,
+ "sexism.misc": True,
+ "terms.animal_adjectives": True,
+ "terms.denizen_labels": True,
+ "terms.eponymous_adjectives": True,
+ "terms.venery": True,
+ "typography.diacritical_marks": True,
+ "typography.exclamation": True,
+ "typography.symbols": True,
+ "uncomparables.misc": True,
+ "weasel_words.misc": True,
+ "weasel_words.very": True
+ }
+}
diff --git a/proselint/tools.py b/proselint/tools.py
index f4ee7ad..ec43ecf 100644
--- a/proselint/tools.py
+++ b/proselint/tools.py
@@ -1,6 +1,5 @@
"""General-purpose tools shared across linting checks."""
-
import copy
import dbm
import functools
@@ -13,6 +12,7 @@ import re
import shelve
import sys
import traceback
+from warnings import showwarning as warn
_cache_shelves = dict()
proselint_path = os.path.dirname(os.path.realpath(__file__))
@@ -22,7 +22,7 @@ cwd = os.getcwd()
def close_cache_shelves():
"""Close previously opened cache shelves."""
- for _, cache in _cache_shelves.items():
+ for cache in _cache_shelves.values():
cache.close()
_cache_shelves.clear()
@@ -138,8 +138,7 @@ def get_checks(options):
"""Extract the checks."""
sys.path.append(proselint_path)
checks = []
- check_names = [key for (key, val)
- in list(options["checks"].items()) if val]
+ check_names = [key for (key, val) in options["checks"].items() if val]
for check_name in check_names:
module = importlib.import_module("checks." + check_name)
@@ -163,24 +162,18 @@ def deepmerge_dicts(dict1, dict2):
return result
-def load_options(config_file_path=None):
+def load_options(config_file_path=None, conf_default=None):
"""Read various proselintrc files, allowing user overrides."""
- system_config_paths = (
- '/etc/proselintrc',
- os.path.join(proselint_path, '.proselintrc'),
- )
-
- system_options = {}
- for path in system_config_paths:
- if os.path.isfile(path):
- system_options = json.load(open(path))
- break
+ conf_default = conf_default or {}
+ if os.path.isfile("/etc/proselintrc"):
+ conf_default = json.load(open("/etc/proselintrc"))
user_config_paths = [
- os.path.join(cwd, '.proselintrc'),
- os.path.join(_get_xdg_config_home(), 'proselint', 'config'),
- os.path.join(home_dir, '.proselintrc')
+ os.path.join(cwd, '.proselintrc.json'),
+ os.path.join(_get_xdg_config_home(), 'proselint', 'config.json'),
+ os.path.join(home_dir, '.proselintrc.json')
]
+
if config_file_path:
if not os.path.isfile(config_file_path):
raise FileNotFoundError(
@@ -192,10 +185,14 @@ def load_options(config_file_path=None):
if os.path.isfile(path):
user_options = json.load(open(path))
break
+ oldpath = path.replace(".json", "")
+ if os.path.isfile(oldpath):
+ warn(f"{oldpath} was found instead of a JSON file."
+ f" Rename to {path}.", DeprecationWarning, "", 0)
+ user_options = json.load(open(oldpath))
+ break
- options = deepmerge_dicts(system_options, user_options)
-
- return options
+ return deepmerge_dicts(conf_default, user_options)
def errors_to_json(errors):
@@ -215,7 +212,7 @@ def errors_to_json(errors):
})
return json.dumps(
- dict(status="success", data={"errors": out}), sort_keys=True)
+ {"status": "success", "data": {"errors": out}}, sort_keys=True)
def line_and_column(text, position):
@@ -230,17 +227,16 @@ def line_and_column(text, position):
return (line_no, position - position_counter)
-def lint(input_file, debug=False, config_file_path=None):
+def lint(input_file, debug=False, config=None):
"""Run the linter on the input file."""
- options = load_options(config_file_path)
-
+ config = config or {}
if isinstance(input_file, str):
text = input_file
else:
text = input_file.read()
# Get the checks.
- checks = get_checks(options)
+ checks = get_checks(config)
# Apply all the checks.
errors = []
@@ -255,11 +251,11 @@ def lint(input_file, debug=False, config_file_path=None):
errors += [(check, message, line, column, start, end,
end - start, "warning", replacements)]
- if len(errors) > options["max_errors"]:
+ if len(errors) > config["max_errors"]:
break
# Sort the errors by line and column number.
- errors = sorted(errors[:options["max_errors"]], key=lambda e: (e[2], e[3]))
+ errors = sorted(errors[:config["max_errors"]], key=lambda e: (e[2], e[3]))
return errors
| amperser/proselint | 935d53ab07a6e0dd08a4af8dbc31a33976f37d50 | diff --git a/tests/test_config_flag.py b/tests/test_config_flag.py
index e9fc265..64ad547 100644
--- a/tests/test_config_flag.py
+++ b/tests/test_config_flag.py
@@ -1,9 +1,17 @@
"""Test user option overrides using --config and load_options"""
+import json
+import os
+from unittest import TestCase
+from unittest.mock import patch
+
from click.testing import CliRunner
from proselint.command_line import proselint
+from proselint.config import default
from proselint.tools import deepmerge_dicts, load_options
+runner = CliRunner()
+
def test_deepmerge_dicts():
"""Test deepmerge_dicts"""
@@ -12,17 +20,23 @@ def test_deepmerge_dicts():
assert deepmerge_dicts(d1, d2) == {'a': 2, 'b': {'c': 3, 'd': 3, 'e': 4}}
-def test_load_options_function():
+@patch("os.path.isfile")
+def test_load_options_function(isfile):
"""Test load_options by specifying a user options path"""
- overrides = load_options("tests/test_config_flag_proselintrc")
- assert load_options()["checks"]["uncomparables.misc"]
+
+ isfile.side_effect = "tests/test_config_flag_proselintrc".__eq__
+
+ overrides = load_options("tests/test_config_flag_proselintrc", default)
+ assert load_options(conf_default=default)["checks"]["uncomparables.misc"]
assert not overrides["checks"]["uncomparables.misc"]
+ isfile.side_effect = os.path.join(os.getcwd(), ".proselintrc").__eq__
+
+ TestCase().assertRaises(FileNotFoundError, load_options)
+
def test_config_flag():
"""Test the --config CLI argument"""
- runner = CliRunner()
-
output = runner.invoke(proselint, "--demo")
assert "uncomparables.misc" in output.stdout
@@ -36,3 +50,14 @@ def test_config_flag():
output = runner.invoke(proselint, "non_existent_file")
assert output.exit_code == 2
+
+
+def test_dump_config():
+ """Test --dump-default-config and --dump-config"""
+ output = runner.invoke(proselint, "--dump-default-config")
+ assert json.loads(output.stdout) == default
+
+ output = runner.invoke(
+ proselint, "--dump-config --config tests/test_config_flag_proselintrc")
+ assert json.loads(output.stdout) == json.load(
+ open("tests/test_config_flag_proselintrc"))
diff --git a/tests/test_tools.py b/tests/test_tools.py
index b5408e2..ec110d6 100644
--- a/tests/test_tools.py
+++ b/tests/test_tools.py
@@ -1,11 +1,16 @@
"""Test the tools module."""
-from proselint.tools import lint
+from proselint.config import default
+from proselint.tools import lint as proselint, load_options
from .check import Check
+def lint(text):
+ return proselint(text, config=load_options(conf_default=default))
+
+
class TestLint(Check):
"""The test class for tools.lint."""
| Problems packaging .proselintrc
Hey :wave:
I'm running into issues packaging this for Arch due to the system config file `.proselintrc`. Since the project migrated from `setuptools` to `poetry` I use [dephell](https://github.com/dephell/dephell) to convert it back to `setuptools` during installtion, as is recommended by Arch community. It does seem like this conversion does not produce the correct `package_data` in `setup.py` (probably due to this not being supported) resulting in `.proselintrc` not being installed at all.
While I could patch the package and do a number of work-arounds, I wonder if we could get rid of `.proselintrc` altogether by hardcoding the system defaults directly in `proselint`. Having `.proselintrc` as a Python file or a part of `proselint/tools.py` would make packaging for distros less complex and it would not hurt `pip` distribution as far as I can tell.
Not distributing the default config in file format would make it a bit harder to make a custom config from scratch, but this could be rectified by a `--dump-config` flag which would print the current config. This is how the Clang tools works, for example. | 0.0 | 935d53ab07a6e0dd08a4af8dbc31a33976f37d50 | [
"tests/test_config_flag.py::test_deepmerge_dicts",
"tests/test_config_flag.py::test_load_options_function",
"tests/test_config_flag.py::test_config_flag",
"tests/test_config_flag.py::test_dump_config",
"tests/test_tools.py::TestLint::test_errors_sorted",
"tests/test_tools.py::TestLint::test_on_no_newlines"
]
| []
| {
"failed_lite_validators": [
"has_hyperlinks",
"has_added_files",
"has_removed_files",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
} | 2021-08-02 21:06:34+00:00 | bsd-3-clause | 1,076 |
|
amplify-education__python-hcl2-73 | diff --git a/hcl2/transformer.py b/hcl2/transformer.py
index b03aaee..74430f7 100644
--- a/hcl2/transformer.py
+++ b/hcl2/transformer.py
@@ -1,6 +1,7 @@
"""A Lark Transformer for transforming a Lark parse tree into a Python dict"""
import re
import sys
+from collections import namedtuple
from typing import List, Dict, Any
from lark import Transformer, Discard
@@ -8,6 +9,8 @@ from lark import Transformer, Discard
HEREDOC_PATTERN = re.compile(r'<<([a-zA-Z][a-zA-Z0-9._-]+)\n((.|\n)*?)\n\s*\1', re.S)
HEREDOC_TRIM_PATTERN = re.compile(r'<<-([a-zA-Z][a-zA-Z0-9._-]+)\n((.|\n)*?)\n\s*\1', re.S)
+Attribute = namedtuple("Attribute", ("key", "value"))
+
# pylint: disable=missing-docstring,unused-argument
class DictTransformer(Transformer):
@@ -103,15 +106,12 @@ class DictTransformer(Transformer):
def one_line_block(self, args: List) -> Dict:
return self.block(args)
- def attribute(self, args: List) -> Dict:
+ def attribute(self, args: List) -> Attribute:
key = str(args[0])
if key.startswith('"') and key.endswith('"'):
key = key[1:-1]
value = self.to_string_dollar(args[1])
-
- return {
- key: value
- }
+ return Attribute(key, value)
def conditional(self, args: List) -> str:
args = self.strip_new_line_tokens(args)
@@ -128,26 +128,42 @@ class DictTransformer(Transformer):
return " ".join([str(arg) for arg in args])
def body(self, args: List) -> Dict[str, List]:
- # A body can have multiple attributes with the same name
- # For example multiple Statement attributes in a IAM resource body
- # So This returns a dict of attribute names to lists
- # The attribute values will always be lists even if they aren't repeated
- # and only contain a single entry
+ # See https://github.com/hashicorp/hcl/blob/main/hclsyntax/spec.md#bodies
+ # ---
+ # A body is a collection of associated attributes and blocks.
+ #
+ # An attribute definition assigns a value to a particular attribute
+ # name within a body. Each distinct attribute name may be defined no
+ # more than once within a single body.
+ #
+ # A block creates a child body that is annotated with a block type and
+ # zero or more block labels. Blocks create a structural hierarchy which
+ # can be interpreted by the calling application.
+ # ---
+ #
+ # There can be more than one child body with the same block type and
+ # labels. This means that all blocks (even when there is only one)
+ # should be transformed into lists of blocks.
args = self.strip_new_line_tokens(args)
+ attributes = set()
result: Dict[str, Any] = {}
for arg in args:
- for key, value in arg.items():
- key = str(key)
- if key not in result:
- result[key] = [value]
- else:
- if isinstance(result[key], list):
- if isinstance(value, list):
- result[key].extend(value)
- else:
- result[key].append(value)
+ if isinstance(arg, Attribute):
+ if arg.key in result:
+ raise RuntimeError("{} already defined".format(arg.key))
+ result[arg.key] = arg.value
+ attributes.add(arg.key)
+ else:
+ # This is a block.
+ for key, value in arg.items():
+ key = str(key)
+ if key in result:
+ if key in attributes:
+ raise RuntimeError("{} already defined".format(key))
+ result[key].append(value)
else:
- result[key] = [result[key], value]
+ result[key] = [value]
+
return result
def start(self, args: List) -> Dict:
| amplify-education/python-hcl2 | c9869c1373ea2401a4a43d8b429bd70fc33683ec | diff --git a/test/helpers/terraform-config-json/backend.json b/test/helpers/terraform-config-json/backend.json
index 7bb6df4..7c0bcd4 100644
--- a/test/helpers/terraform-config-json/backend.json
+++ b/test/helpers/terraform-config-json/backend.json
@@ -2,27 +2,19 @@
"provider": [
{
"aws": {
- "region": [
- "${var.region}"
- ]
+ "region": "${var.region}"
}
},
{
"aws": {
- "region": [
- "${var.backup_region}"
- ],
- "alias": [
- "backup"
- ]
+ "region": "${var.backup_region}",
+ "alias": "backup"
}
}
],
"terraform": [
{
- "required_version": [
- "0.12"
- ]
+ "required_version": "0.12"
},
{
"backend": [
@@ -32,21 +24,15 @@
],
"required_providers": [
{
- "aws": [
- {
- "source": "hashicorp/aws"
- }
- ],
- "null": [
- {
- "source": "hashicorp/null"
- }
- ],
- "template": [
- {
- "source": "hashicorp/template"
- }
- ]
+ "aws": {
+ "source": "hashicorp/aws"
+ },
+ "null": {
+ "source": "hashicorp/null"
+ },
+ "template": {
+ "source": "hashicorp/template"
+ }
}
]
}
diff --git a/test/helpers/terraform-config-json/cloudwatch.json b/test/helpers/terraform-config-json/cloudwatch.json
index c8733dd..72a344a 100644
--- a/test/helpers/terraform-config-json/cloudwatch.json
+++ b/test/helpers/terraform-config-json/cloudwatch.json
@@ -3,36 +3,24 @@
{
"aws_cloudwatch_event_rule": {
"aws_cloudwatch_event_rule": {
- "name": [
- "name"
- ],
- "event_pattern": [
- " {\n \"foo\": \"bar\"\n }"
- ]
+ "name": "name",
+ "event_pattern": " {\n \"foo\": \"bar\"\n }"
}
}
},
{
"aws_cloudwatch_event_rule": {
"aws_cloudwatch_event_rule2": {
- "name": [
- "name"
- ],
- "event_pattern": [
- "{\n \"foo\": \"bar\"\n}"
- ]
+ "name": "name",
+ "event_pattern": "{\n \"foo\": \"bar\"\n}"
}
}
},
{
"aws_cloudwatch_event_rule": {
"aws_cloudwatch_event_rule2": {
- "name": [
- "name"
- ],
- "event_pattern": [
- "${jsonencode(var.cloudwatch_pattern_deploytool)}"
- ]
+ "name": "name",
+ "event_pattern": "${jsonencode(var.cloudwatch_pattern_deploytool)}"
}
}
}
diff --git a/test/helpers/terraform-config-json/data_sources.json b/test/helpers/terraform-config-json/data_sources.json
index d1356ce..f1f939d 100644
--- a/test/helpers/terraform-config-json/data_sources.json
+++ b/test/helpers/terraform-config-json/data_sources.json
@@ -3,12 +3,8 @@
{
"terraform_remote_state": {
"map": {
- "for_each": [
- "${{for s3_bucket_key in data.aws_s3_bucket_objects.remote_state_objects.keys : regex(local.remote_state_regex,s3_bucket_key)[\"account_alias\"] => s3_bucket_key if length(regexall(local.remote_state_regex,s3_bucket_key)) > 0}}"
- ],
- "backend": [
- "s3"
- ]
+ "for_each": "${{for s3_bucket_key in data.aws_s3_bucket_objects.remote_state_objects.keys : regex(local.remote_state_regex,s3_bucket_key)[\"account_alias\"] => s3_bucket_key if length(regexall(local.remote_state_regex,s3_bucket_key)) > 0}}",
+ "backend": "s3"
}
}
}
diff --git a/test/helpers/terraform-config-json/iam.json b/test/helpers/terraform-config-json/iam.json
index 599b3cd..a6b5339 100644
--- a/test/helpers/terraform-config-json/iam.json
+++ b/test/helpers/terraform-config-json/iam.json
@@ -5,29 +5,19 @@
"policy": {
"statement": [
{
- "effect": [
- "Deny"
- ],
+ "effect": "Deny",
"principals": [
{
- "type": [
- "AWS"
- ],
+ "type": "AWS",
"identifiers": [
- [
- "*"
- ]
+ "*"
]
}
],
"actions": [
- [
- "s3:PutObjectAcl"
- ]
+ "s3:PutObjectAcl"
],
- "resources": [
- "${aws_s3_bucket.bucket.*.arn}"
- ]
+ "resources": "${aws_s3_bucket.bucket.*.arn}"
}
]
}
@@ -39,13 +29,9 @@
"statement": [
{
"actions": [
- [
- "s3:GetObject"
- ]
+ "s3:GetObject"
],
- "resources": [
- "${[for bucket_name in local.buckets_to_proxy : \"arn:aws:s3:::${bucket_name}/*\" if substr(bucket_name,0,1) == \"l\"]}"
- ]
+ "resources": "${[for bucket_name in local.buckets_to_proxy : \"arn:aws:s3:::${bucket_name}/*\" if substr(bucket_name,0,1) == \"l\"]}"
}
]
}
diff --git a/test/helpers/terraform-config-json/route_table.json b/test/helpers/terraform-config-json/route_table.json
index 7c41788..b07ed40 100644
--- a/test/helpers/terraform-config-json/route_table.json
+++ b/test/helpers/terraform-config-json/route_table.json
@@ -3,36 +3,20 @@
{
"aws_route": {
"tgw": {
- "count": [
- "${var.tgw_name == \"\" ? 0 : var.number_of_az}"
- ],
- "route_table_id": [
- "${aws_route_table.rt[count.index].id}"
- ],
- "destination_cidr_block": [
- "10.0.0.0/8"
- ],
- "transit_gateway_id": [
- "${data.aws_ec2_transit_gateway.tgw[0].id}"
- ]
+ "count": "${var.tgw_name == \"\" ? 0 : var.number_of_az}",
+ "route_table_id": "${aws_route_table.rt[count.index].id}",
+ "destination_cidr_block": "10.0.0.0/8",
+ "transit_gateway_id": "${data.aws_ec2_transit_gateway.tgw[0].id}"
}
}
},
{
"aws_route": {
"tgw-dot-index": {
- "count": [
- "${var.tgw_name == \"\" ? 0 : var.number_of_az}"
- ],
- "route_table_id": [
- "${aws_route_table.rt[count.index].id}"
- ],
- "destination_cidr_block": [
- "10.0.0.0/8"
- ],
- "transit_gateway_id": [
- "${data.aws_ec2_transit_gateway.tgw[0].id}"
- ]
+ "count": "${var.tgw_name == \"\" ? 0 : var.number_of_az}",
+ "route_table_id": "${aws_route_table.rt[count.index].id}",
+ "destination_cidr_block": "10.0.0.0/8",
+ "transit_gateway_id": "${data.aws_ec2_transit_gateway.tgw[0].id}"
}
}
}
diff --git a/test/helpers/terraform-config-json/s3.json b/test/helpers/terraform-config-json/s3.json
index 5b4982d..6fd2687 100644
--- a/test/helpers/terraform-config-json/s3.json
+++ b/test/helpers/terraform-config-json/s3.json
@@ -3,43 +3,27 @@
{
"aws_s3_bucket": {
"name": {
- "bucket": [
- "name"
- ],
- "acl": [
- "log-delivery-write"
- ],
+ "bucket": "name",
+ "acl": "log-delivery-write",
"lifecycle_rule": [
{
- "id": [
- "to_glacier"
- ],
- "prefix": [
- ""
- ],
- "enabled": [
- true
- ],
+ "id": "to_glacier",
+ "prefix": "",
+ "enabled": true,
"expiration": [
{
- "days": [
- 365
- ]
+ "days": 365
}
],
- "transition": [
- {
- "days": 30,
- "storage_class": "GLACIER"
- }
- ]
+ "transition": {
+ "days": 30,
+ "storage_class": "GLACIER"
+ }
}
],
"versioning": [
{
- "enabled": [
- true
- ]
+ "enabled": true
}
]
}
@@ -49,18 +33,10 @@
"module": [
{
"bucket_name": {
- "source": [
- "s3_bucket_name"
- ],
- "name": [
- "audit"
- ],
- "account": [
- "${var.account}"
- ],
- "region": [
- "${var.region}"
- ]
+ "source": "s3_bucket_name",
+ "name": "audit",
+ "account": "${var.account}",
+ "region": "${var.region}"
}
}
]
diff --git a/test/helpers/terraform-config-json/variables.json b/test/helpers/terraform-config-json/variables.json
index 56f5874..13afddb 100644
--- a/test/helpers/terraform-config-json/variables.json
+++ b/test/helpers/terraform-config-json/variables.json
@@ -8,44 +8,34 @@
},
{
"azs": {
- "default": [
- {
- "us-west-1": "us-west-1c,us-west-1b",
- "us-west-2": "us-west-2c,us-west-2b,us-west-2a",
- "us-east-1": "us-east-1c,us-east-1b,us-east-1a",
- "eu-central-1": "eu-central-1a,eu-central-1b,eu-central-1c",
- "sa-east-1": "sa-east-1a,sa-east-1c",
- "ap-northeast-1": "ap-northeast-1a,ap-northeast-1c,ap-northeast-1d",
- "ap-southeast-1": "ap-southeast-1a,ap-southeast-1b,ap-southeast-1c",
- "ap-southeast-2": "ap-southeast-2a,ap-southeast-2b,ap-southeast-2c"
- }
- ]
+ "default": {
+ "us-west-1": "us-west-1c,us-west-1b",
+ "us-west-2": "us-west-2c,us-west-2b,us-west-2a",
+ "us-east-1": "us-east-1c,us-east-1b,us-east-1a",
+ "eu-central-1": "eu-central-1a,eu-central-1b,eu-central-1c",
+ "sa-east-1": "sa-east-1a,sa-east-1c",
+ "ap-northeast-1": "ap-northeast-1a,ap-northeast-1c,ap-northeast-1d",
+ "ap-southeast-1": "ap-southeast-1a,ap-southeast-1b,ap-southeast-1c",
+ "ap-southeast-2": "ap-southeast-2a,ap-southeast-2b,ap-southeast-2c"
+ }
}
},
{
"options": {
- "default": [{}]
+ "default": {}
}
}
],
"locals": [
{
- "foo": [
- "${var.account}_bar"
- ],
- "bar": [
- {
- "baz": 1
- }
- ]
+ "foo": "${var.account}_bar",
+ "bar": {
+ "baz": 1
+ }
},
{
- "route53_forwarding_rule_shares": [
- "${{for forwarding_rule_key in keys(var.route53_resolver_forwarding_rule_shares) : \"${forwarding_rule_key}\" => {'aws_account_ids': '${[for account_name in var.route53_resolver_forwarding_rule_shares[forwarding_rule_key].aws_account_names : module.remote_state_subaccounts.map[account_name].outputs[\"aws_account_id\"]]}'}}}"
- ],
- "has_valid_forwarding_rules_template_inputs": [
- "${length(keys(var.forwarding_rules_template.copy_resolver_rules)) > 0 && length(var.forwarding_rules_template.replace_with_target_ips) > 0 && length(var.forwarding_rules_template.exclude_cidrs) > 0}"
- ]
+ "route53_forwarding_rule_shares": "${{for forwarding_rule_key in keys(var.route53_resolver_forwarding_rule_shares) : \"${forwarding_rule_key}\" => {'aws_account_ids': '${[for account_name in var.route53_resolver_forwarding_rule_shares[forwarding_rule_key].aws_account_names : module.remote_state_subaccounts.map[account_name].outputs[\"aws_account_id\"]]}'}}}",
+ "has_valid_forwarding_rules_template_inputs": "${length(keys(var.forwarding_rules_template.copy_resolver_rules)) > 0 && length(var.forwarding_rules_template.replace_with_target_ips) > 0 && length(var.forwarding_rules_template.exclude_cidrs) > 0}"
}
]
-}
+}
\ No newline at end of file
diff --git a/test/helpers/terraform-config-json/vars.auto.json b/test/helpers/terraform-config-json/vars.auto.json
index 60e5235..fee0e28 100644
--- a/test/helpers/terraform-config-json/vars.auto.json
+++ b/test/helpers/terraform-config-json/vars.auto.json
@@ -1,11 +1,7 @@
{
- "foo": [
- "bar"
- ],
+ "foo": "bar",
"arr": [
- [
- "foo",
- "bar"
- ]
+ "foo",
+ "bar"
]
}
\ No newline at end of file
| Incorrectly transforms attributes into lists
Hi,
I was excited to try this package but it seems to turn everything into a list. There is a test for this behaviour, which I think is wrong:
https://github.com/amplify-education/python-hcl2/blob/a4b29a76e34bbbd4bcac8d073f96392f451f79b3/test/helpers/terraform-config/vars.auto.tfvars#L1
https://github.com/amplify-education/python-hcl2/blob/a4b29a76e34bbbd4bcac8d073f96392f451f79b3/test/helpers/terraform-config-json/vars.auto.json#L2-L4
I think the JSON should be:
```json
"foo": "bar",
```
https://github.com/hashicorp/hcl2#information-model-and-syntax has examples with top-level strings.
I would like to get confirmation that this is a bug before I have a go at fixing it and trying to use this in my project. Thanks! | 0.0 | c9869c1373ea2401a4a43d8b429bd70fc33683ec | [
"test/unit/test_load.py::TestLoad::test_load_terraform",
"test/unit/test_load.py::TestLoad::test_load_terraform_from_cache"
]
| []
| {
"failed_lite_validators": [
"has_hyperlinks",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2021-07-14 15:19:48+00:00 | mit | 1,077 |
|
andialbrecht__sqlparse-231 | diff --git a/sqlparse/engine/grouping.py b/sqlparse/engine/grouping.py
index 4e45f65..68960d5 100644
--- a/sqlparse/engine/grouping.py
+++ b/sqlparse/engine/grouping.py
@@ -135,7 +135,8 @@ def group_comparison(tlist):
T.Name, T.Number, T.Number.Float,
T.Number.Integer, T.Literal,
T.Literal.Number.Integer, T.Name.Placeholder)
- or isinstance(token, (sql.Identifier, sql.Parenthesis))
+ or isinstance(token, (sql.Identifier, sql.Parenthesis,
+ sql.Function))
or (token.ttype is T.Keyword
and token.value.upper() in ['NULL', ]))
_group_left_right(tlist, T.Operator.Comparison, None, sql.Comparison,
| andialbrecht/sqlparse | ee5799fbb60e9739e42922861cd9f24990fc52dd | diff --git a/tests/test_grouping.py b/tests/test_grouping.py
index e846176..a6c4028 100644
--- a/tests/test_grouping.py
+++ b/tests/test_grouping.py
@@ -325,6 +325,29 @@ def test_comparison_with_strings(): # issue148
assert p.tokens[0].right.ttype == T.String.Single
+def test_comparison_with_functions(): # issue230
+ p = sqlparse.parse('foo = DATE(bar.baz)')[0]
+ assert len(p.tokens) == 1
+ assert isinstance(p.tokens[0], sql.Comparison)
+ assert len(p.tokens[0].tokens) == 5
+ assert p.tokens[0].left.value == 'foo'
+ assert p.tokens[0].right.value == 'DATE(bar.baz)'
+
+ p = sqlparse.parse('DATE(foo.bar) = DATE(bar.baz)')[0]
+ assert len(p.tokens) == 1
+ assert isinstance(p.tokens[0], sql.Comparison)
+ assert len(p.tokens[0].tokens) == 5
+ assert p.tokens[0].left.value == 'DATE(foo.bar)'
+ assert p.tokens[0].right.value == 'DATE(bar.baz)'
+
+ p = sqlparse.parse('DATE(foo.bar) = bar.baz')[0]
+ assert len(p.tokens) == 1
+ assert isinstance(p.tokens[0], sql.Comparison)
+ assert len(p.tokens[0].tokens) == 5
+ assert p.tokens[0].left.value == 'DATE(foo.bar)'
+ assert p.tokens[0].right.value == 'bar.baz'
+
+
@pytest.mark.parametrize('start', ['FOR', 'FOREACH'])
def test_forloops(start):
p = sqlparse.parse('%s foo in bar LOOP foobar END LOOP' % start)[0]
| Functions are not grouped into a Comparison
I.e. `foo = DATE(bar.baz)` is not grouped. | 0.0 | ee5799fbb60e9739e42922861cd9f24990fc52dd | [
"tests/test_grouping.py::test_comparison_with_functions"
]
| [
"tests/test_grouping.py::TestGrouping::test_alias",
"tests/test_grouping.py::TestGrouping::test_alias_case",
"tests/test_grouping.py::TestGrouping::test_alias_returns_none",
"tests/test_grouping.py::TestGrouping::test_assignment",
"tests/test_grouping.py::TestGrouping::test_comments",
"tests/test_grouping.py::TestGrouping::test_comparison_exclude",
"tests/test_grouping.py::TestGrouping::test_function",
"tests/test_grouping.py::TestGrouping::test_function_not_in",
"tests/test_grouping.py::TestGrouping::test_identifier_as_invalid",
"tests/test_grouping.py::TestGrouping::test_identifier_extended",
"tests/test_grouping.py::TestGrouping::test_identifier_function",
"tests/test_grouping.py::TestGrouping::test_identifier_invalid",
"tests/test_grouping.py::TestGrouping::test_identifier_list",
"tests/test_grouping.py::TestGrouping::test_identifier_list_case",
"tests/test_grouping.py::TestGrouping::test_identifier_list_other",
"tests/test_grouping.py::TestGrouping::test_identifier_list_with_inline_comments",
"tests/test_grouping.py::TestGrouping::test_identifier_name_wildcard",
"tests/test_grouping.py::TestGrouping::test_identifier_wildcard",
"tests/test_grouping.py::TestGrouping::test_identifiers",
"tests/test_grouping.py::TestGrouping::test_idlist_function",
"tests/test_grouping.py::TestGrouping::test_parenthesis",
"tests/test_grouping.py::TestGrouping::test_typecast",
"tests/test_grouping.py::TestGrouping::test_varchar",
"tests/test_grouping.py::TestGrouping::test_where",
"tests/test_grouping.py::TestStatement::test_get_type",
"tests/test_grouping.py::test_identifier_with_operators",
"tests/test_grouping.py::test_identifier_with_op_trailing_ws",
"tests/test_grouping.py::test_identifier_with_string_literals",
"tests/test_grouping.py::test_identifier_consumes_ordering",
"tests/test_grouping.py::test_comparison_with_keywords",
"tests/test_grouping.py::test_comparison_with_floats",
"tests/test_grouping.py::test_comparison_with_parenthesis",
"tests/test_grouping.py::test_comparison_with_strings",
"tests/test_grouping.py::test_forloops[FOR]",
"tests/test_grouping.py::test_forloops[FOREACH]",
"tests/test_grouping.py::test_nested_for",
"tests/test_grouping.py::test_begin",
"tests/test_grouping.py::test_nested_begin",
"tests/test_grouping.py::test_aliased_column_without_as",
"tests/test_grouping.py::test_qualified_function",
"tests/test_grouping.py::test_aliased_function_without_as",
"tests/test_grouping.py::test_aliased_literal_without_as"
]
| {
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
} | 2016-04-05 08:28:29+00:00 | bsd-3-clause | 1,078 |
|
andialbrecht__sqlparse-323 | diff --git a/CHANGELOG b/CHANGELOG
index 5ffe811..b9b80a0 100644
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -8,6 +8,7 @@ Enhancements
Bug Fixes
* Fix some edge-cases when parsing invalid SQL statements.
+* Fix indentation of LIMIT (by romainr, pr321).
Internal Changes
diff --git a/sqlparse/keywords.py b/sqlparse/keywords.py
index 1fd07c1..d68b4ae 100644
--- a/sqlparse/keywords.py
+++ b/sqlparse/keywords.py
@@ -167,6 +167,7 @@ KEYWORDS = {
'COMMIT': tokens.Keyword.DML,
'COMMITTED': tokens.Keyword,
'COMPLETION': tokens.Keyword,
+ 'CONCURRENTLY': tokens.Keyword,
'CONDITION_NUMBER': tokens.Keyword,
'CONNECT': tokens.Keyword,
'CONNECTION': tokens.Keyword,
| andialbrecht/sqlparse | d67c442db4fd8b60a97440e84b9c21e80e4e958c | diff --git a/tests/test_regressions.py b/tests/test_regressions.py
index cf88419..cc553c2 100644
--- a/tests/test_regressions.py
+++ b/tests/test_regressions.py
@@ -343,3 +343,16 @@ def test_issue315_utf8_by_default():
if PY2:
tformatted = tformatted.decode('utf-8')
assert formatted == tformatted
+
+
+def test_issue322_concurrently_is_keyword():
+ s = 'CREATE INDEX CONCURRENTLY myindex ON mytable(col1);'
+ p = sqlparse.parse(s)[0]
+
+ assert len(p.tokens) == 12
+ assert p.tokens[0].ttype is T.Keyword.DDL # CREATE
+ assert p.tokens[2].ttype is T.Keyword # INDEX
+ assert p.tokens[4].ttype is T.Keyword # CONCURRENTLY
+ assert p.tokens[4].value == 'CONCURRENTLY'
+ assert isinstance(p.tokens[6], sql.Identifier)
+ assert p.tokens[6].value == 'myindex'
| Support CONCURRENTLY keyword in CREATE INDEX statements
When parsing a statement like `CREATE INDEX CONCURRENTLY name ON ...`, "CONCURRENTLY name" is returned as a single identifier | 0.0 | d67c442db4fd8b60a97440e84b9c21e80e4e958c | [
"tests/test_regressions.py::test_issue322_concurrently_is_keyword"
]
| [
"tests/test_regressions.py::test_issue9",
"tests/test_regressions.py::test_issue13",
"tests/test_regressions.py::test_issue26[--hello]",
"tests/test_regressions.py::test_issue26[--",
"tests/test_regressions.py::test_issue26[--hello\\n]",
"tests/test_regressions.py::test_issue26[--]",
"tests/test_regressions.py::test_issue26[--\\n]",
"tests/test_regressions.py::test_issue34[create]",
"tests/test_regressions.py::test_issue34[CREATE]",
"tests/test_regressions.py::test_issue35",
"tests/test_regressions.py::test_issue38",
"tests/test_regressions.py::test_issue39",
"tests/test_regressions.py::test_issue40",
"tests/test_regressions.py::test_issue78[get_name-z-select",
"tests/test_regressions.py::test_issue78[get_real_name-y-select",
"tests/test_regressions.py::test_issue78[get_parent_name-x-select",
"tests/test_regressions.py::test_issue78[get_alias-z-select",
"tests/test_regressions.py::test_issue78[get_typecast-text-select",
"tests/test_regressions.py::test_issue83",
"tests/test_regressions.py::test_comment_encoding_when_reindent",
"tests/test_regressions.py::test_parse_sql_with_binary",
"tests/test_regressions.py::test_dont_alias_keywords",
"tests/test_regressions.py::test_format_accepts_encoding",
"tests/test_regressions.py::test_stream",
"tests/test_regressions.py::test_issue90",
"tests/test_regressions.py::test_except_formatting",
"tests/test_regressions.py::test_null_with_as",
"tests/test_regressions.py::test_issue190_open_file",
"tests/test_regressions.py::test_issue193_splitting_function",
"tests/test_regressions.py::test_issue194_splitting_function",
"tests/test_regressions.py::test_issue186_get_type",
"tests/test_regressions.py::test_issue212_py2unicode",
"tests/test_regressions.py::test_issue213_leadingws",
"tests/test_regressions.py::test_issue227_gettype_cte",
"tests/test_regressions.py::test_issue207_runaway_format",
"tests/test_regressions.py::test_token_next_doesnt_ignore_skip_cm",
"tests/test_regressions.py::test_issue284_as_grouping[SELECT",
"tests/test_regressions.py::test_issue284_as_grouping[AS]",
"tests/test_regressions.py::test_issue315_utf8_by_default"
]
| {
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
} | 2017-02-09 09:16:34+00:00 | bsd-3-clause | 1,079 |
|
andialbrecht__sqlparse-445 | diff --git a/sqlparse/filters/others.py b/sqlparse/filters/others.py
index df4d861..b0bb898 100644
--- a/sqlparse/filters/others.py
+++ b/sqlparse/filters/others.py
@@ -26,6 +26,13 @@ class StripCommentsFilter(object):
if (prev_ is None or next_ is None or
prev_.is_whitespace or prev_.match(T.Punctuation, '(') or
next_.is_whitespace or next_.match(T.Punctuation, ')')):
+ # Insert a whitespace to ensure the following SQL produces
+ # a valid SQL. For example:
+ #
+ # Before: select a--comment\nfrom foo
+ # After: select a from foo
+ if prev_ is not None and next_ is None:
+ tlist.tokens.insert(tidx, sql.Token(T.Whitespace, ' '))
tlist.tokens.remove(token)
else:
tlist.tokens[tidx] = sql.Token(T.Whitespace, ' ')
| andialbrecht/sqlparse | 488505f6c448e7eb0e4a1915bdc5b6130d44a68a | diff --git a/tests/test_format.py b/tests/test_format.py
index 72af62e..aff07c1 100644
--- a/tests/test_format.py
+++ b/tests/test_format.py
@@ -50,6 +50,19 @@ class TestFormat(object):
sql = 'select-- foo\nfrom -- bar\nwhere'
res = sqlparse.format(sql, strip_comments=True)
assert res == 'select from where'
+ sql = 'select *-- statement starts here\n\nfrom foo'
+ res = sqlparse.format(sql, strip_comments=True)
+ assert res == 'select * from foo'
+ sql = 'select * from foo-- statement starts here\nwhere'
+ res = sqlparse.format(sql, strip_comments=True)
+ assert res == 'select * from foo where'
+ sql = 'select a-- statement starts here\nfrom foo'
+ res = sqlparse.format(sql, strip_comments=True)
+ assert res == 'select a from foo'
+ sql = '--comment\nselect a-- statement starts here\n' \
+ 'from foo--comment\nf'
+ res = sqlparse.format(sql, strip_comments=True)
+ assert res == 'select a from foo f'
def test_strip_comments_invalid_option(self):
sql = 'select-- foo\nfrom -- bar\nwhere'
| strip_comments causing syntax error
If there is no space between comments and keyword, the output causes a syntax error. Here's an example:
```python
import sqlparse
sql='''select * from table1--this is a comment
inner join table2 on table1.id = table2.id--this is a comment
where table1.a=1'''
sqlparse.format(sql,strip_comments=True)
```
This gives the following as output:
`select * from table1inner join table2 on table1.id = table2.idwhere table1.a=1` | 0.0 | 488505f6c448e7eb0e4a1915bdc5b6130d44a68a | [
"tests/test_format.py::TestFormat::test_strip_comments_single"
]
| [
"tests/test_format.py::TestFormat::test_keywordcase",
"tests/test_format.py::TestFormat::test_keywordcase_invalid_option",
"tests/test_format.py::TestFormat::test_identifiercase",
"tests/test_format.py::TestFormat::test_identifiercase_invalid_option",
"tests/test_format.py::TestFormat::test_identifiercase_quotes",
"tests/test_format.py::TestFormat::test_strip_comments_invalid_option",
"tests/test_format.py::TestFormat::test_strip_comments_multi",
"tests/test_format.py::TestFormat::test_strip_ws",
"tests/test_format.py::TestFormat::test_strip_ws_invalid_option",
"tests/test_format.py::TestFormat::test_preserve_ws",
"tests/test_format.py::TestFormat::test_notransform_of_quoted_crlf",
"tests/test_format.py::TestFormatReindentAligned::test_basic",
"tests/test_format.py::TestFormatReindentAligned::test_joins",
"tests/test_format.py::TestFormatReindentAligned::test_case_statement",
"tests/test_format.py::TestFormatReindentAligned::test_case_statement_with_between",
"tests/test_format.py::TestFormatReindentAligned::test_group_by",
"tests/test_format.py::TestFormatReindentAligned::test_group_by_subquery",
"tests/test_format.py::TestFormatReindentAligned::test_window_functions",
"tests/test_format.py::TestSpacesAroundOperators::test_basic",
"tests/test_format.py::TestSpacesAroundOperators::test_bools",
"tests/test_format.py::TestSpacesAroundOperators::test_nested",
"tests/test_format.py::TestSpacesAroundOperators::test_wildcard_vs_mult",
"tests/test_format.py::TestFormatReindent::test_option",
"tests/test_format.py::TestFormatReindent::test_stmts",
"tests/test_format.py::TestFormatReindent::test_keywords",
"tests/test_format.py::TestFormatReindent::test_keywords_between",
"tests/test_format.py::TestFormatReindent::test_parenthesis",
"tests/test_format.py::TestFormatReindent::test_where",
"tests/test_format.py::TestFormatReindent::test_join",
"tests/test_format.py::TestFormatReindent::test_identifier_list",
"tests/test_format.py::TestFormatReindent::test_identifier_list_with_wrap_after",
"tests/test_format.py::TestFormatReindent::test_identifier_list_comment_first",
"tests/test_format.py::TestFormatReindent::test_identifier_list_with_functions",
"tests/test_format.py::TestFormatReindent::test_long_identifier_list_with_functions",
"tests/test_format.py::TestFormatReindent::test_case",
"tests/test_format.py::TestFormatReindent::test_case2",
"tests/test_format.py::TestFormatReindent::test_nested_identifier_list",
"tests/test_format.py::TestFormatReindent::test_duplicate_linebreaks",
"tests/test_format.py::TestFormatReindent::test_keywordfunctions",
"tests/test_format.py::TestFormatReindent::test_identifier_and_functions",
"tests/test_format.py::TestOutputFormat::test_python",
"tests/test_format.py::TestOutputFormat::test_python_multiple_statements",
"tests/test_format.py::TestOutputFormat::test_php",
"tests/test_format.py::TestOutputFormat::test_sql",
"tests/test_format.py::TestOutputFormat::test_invalid_option",
"tests/test_format.py::test_format_column_ordering",
"tests/test_format.py::test_truncate_strings",
"tests/test_format.py::test_truncate_strings_invalid_option2[bar]",
"tests/test_format.py::test_truncate_strings_invalid_option2[-1]",
"tests/test_format.py::test_truncate_strings_invalid_option2[0]",
"tests/test_format.py::test_truncate_strings_doesnt_truncate_identifiers[select",
"tests/test_format.py::test_having_produces_newline",
"tests/test_format.py::test_format_right_margin_invalid_option[ten]",
"tests/test_format.py::test_format_right_margin_invalid_option[2]"
]
| {
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
} | 2018-11-22 00:14:00+00:00 | bsd-3-clause | 1,080 |
|
andialbrecht__sqlparse-633 | diff --git a/sqlparse/sql.py b/sqlparse/sql.py
index 586cd21..1ccfbdb 100644
--- a/sqlparse/sql.py
+++ b/sqlparse/sql.py
@@ -413,27 +413,28 @@ class Statement(TokenList):
Whitespaces and comments at the beginning of the statement
are ignored.
"""
- first_token = self.token_first(skip_cm=True)
- if first_token is None:
+ token = self.token_first(skip_cm=True)
+ if token is None:
# An "empty" statement that either has not tokens at all
# or only whitespace tokens.
return 'UNKNOWN'
- elif first_token.ttype in (T.Keyword.DML, T.Keyword.DDL):
- return first_token.normalized
+ elif token.ttype in (T.Keyword.DML, T.Keyword.DDL):
+ return token.normalized
- elif first_token.ttype == T.Keyword.CTE:
+ elif token.ttype == T.Keyword.CTE:
# The WITH keyword should be followed by either an Identifier or
# an IdentifierList containing the CTE definitions; the actual
# DML keyword (e.g. SELECT, INSERT) will follow next.
- fidx = self.token_index(first_token)
- tidx, token = self.token_next(fidx, skip_ws=True)
- if isinstance(token, (Identifier, IdentifierList)):
- _, dml_keyword = self.token_next(tidx, skip_ws=True)
-
- if dml_keyword is not None \
- and dml_keyword.ttype == T.Keyword.DML:
- return dml_keyword.normalized
+ tidx = self.token_index(token)
+ while tidx is not None:
+ tidx, token = self.token_next(tidx, skip_ws=True)
+ if isinstance(token, (Identifier, IdentifierList)):
+ tidx, token = self.token_next(tidx, skip_ws=True)
+
+ if token is not None \
+ and token.ttype == T.Keyword.DML:
+ return token.normalized
# Hmm, probably invalid syntax, so return unknown.
return 'UNKNOWN'
| andialbrecht/sqlparse | 907fb496f90f2719095a1f01fe24db1e5c0e15a8 | diff --git a/tests/test_regressions.py b/tests/test_regressions.py
index 4ffc69f..bc8b7dd 100644
--- a/tests/test_regressions.py
+++ b/tests/test_regressions.py
@@ -427,3 +427,12 @@ def test_splitting_at_and_backticks_issue588():
'grant foo to user1@`myhost`; grant bar to user1@`myhost`;')
assert len(splitted) == 2
assert splitted[-1] == 'grant bar to user1@`myhost`;'
+
+
+def test_comment_between_cte_clauses_issue632():
+ p, = sqlparse.parse("""
+ WITH foo AS (),
+ -- A comment before baz subquery
+ baz AS ()
+ SELECT * FROM baz;""")
+ assert p.get_type() == "SELECT"
| Statement.get_type() does not skip comments between subqueries.
SQL query can contain comment in between WITH multiple query but the `get_type()` implementation doesn't skip them
```python
>>> query, = sqlparse.parse("""
WITH A AS (),
-- A comment about the B subquery...
B AS ()
SELECT * FROM B
""")
>>> query.get_type()
'UNKNOWN'
```
Without the comment:
```python
>>> query, = sqlparse.parse("""
WITH A AS (),
B AS ()
SELECT * FROM B
""")
>>> query.get_type()
'SELECT'
``` | 0.0 | 907fb496f90f2719095a1f01fe24db1e5c0e15a8 | [
"tests/test_regressions.py::test_comment_between_cte_clauses_issue632"
]
| [
"tests/test_regressions.py::test_issue9",
"tests/test_regressions.py::test_issue13",
"tests/test_regressions.py::test_issue26[--hello]",
"tests/test_regressions.py::test_issue26[--",
"tests/test_regressions.py::test_issue26[--hello\\n]",
"tests/test_regressions.py::test_issue26[--]",
"tests/test_regressions.py::test_issue26[--\\n]",
"tests/test_regressions.py::test_issue34[create]",
"tests/test_regressions.py::test_issue34[CREATE]",
"tests/test_regressions.py::test_issue35",
"tests/test_regressions.py::test_issue38",
"tests/test_regressions.py::test_issue39",
"tests/test_regressions.py::test_issue40",
"tests/test_regressions.py::test_issue78[get_name-z-select",
"tests/test_regressions.py::test_issue78[get_real_name-y-select",
"tests/test_regressions.py::test_issue78[get_parent_name-x-select",
"tests/test_regressions.py::test_issue78[get_alias-z-select",
"tests/test_regressions.py::test_issue78[get_typecast-text-select",
"tests/test_regressions.py::test_issue83",
"tests/test_regressions.py::test_comment_encoding_when_reindent",
"tests/test_regressions.py::test_parse_sql_with_binary",
"tests/test_regressions.py::test_dont_alias_keywords",
"tests/test_regressions.py::test_format_accepts_encoding",
"tests/test_regressions.py::test_stream",
"tests/test_regressions.py::test_issue90",
"tests/test_regressions.py::test_except_formatting",
"tests/test_regressions.py::test_null_with_as",
"tests/test_regressions.py::test_issue190_open_file",
"tests/test_regressions.py::test_issue193_splitting_function",
"tests/test_regressions.py::test_issue194_splitting_function",
"tests/test_regressions.py::test_issue186_get_type",
"tests/test_regressions.py::test_issue212_py2unicode",
"tests/test_regressions.py::test_issue213_leadingws",
"tests/test_regressions.py::test_issue227_gettype_cte",
"tests/test_regressions.py::test_issue207_runaway_format",
"tests/test_regressions.py::test_token_next_doesnt_ignore_skip_cm",
"tests/test_regressions.py::test_issue284_as_grouping[SELECT",
"tests/test_regressions.py::test_issue284_as_grouping[AS]",
"tests/test_regressions.py::test_issue315_utf8_by_default",
"tests/test_regressions.py::test_issue322_concurrently_is_keyword",
"tests/test_regressions.py::test_issue359_index_error_assignments[SELECT",
"tests/test_regressions.py::test_issue469_copy_as_psql_command",
"tests/test_regressions.py::test_issue485_split_multi",
"tests/test_regressions.py::test_issue489_tzcasts",
"tests/test_regressions.py::test_issue562_tzcasts",
"tests/test_regressions.py::test_as_in_parentheses_indents",
"tests/test_regressions.py::test_format_invalid_where_clause",
"tests/test_regressions.py::test_splitting_at_and_backticks_issue588"
]
| {
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
} | 2021-07-19 12:02:34+00:00 | bsd-3-clause | 1,081 |
|
andialbrecht__sqlparse-676 | diff --git a/sqlparse/engine/grouping.py b/sqlparse/engine/grouping.py
index 175ae8e..2fb0a4c 100644
--- a/sqlparse/engine/grouping.py
+++ b/sqlparse/engine/grouping.py
@@ -91,13 +91,20 @@ def group_tzcasts(tlist):
def match(token):
return token.ttype == T.Keyword.TZCast
- def valid(token):
+ def valid_prev(token):
return token is not None
+ def valid_next(token):
+ return token is not None and (
+ token.is_whitespace
+ or token.match(T.Keyword, 'AS')
+ or token.match(*sql.TypedLiteral.M_CLOSE)
+ )
+
def post(tlist, pidx, tidx, nidx):
return pidx, nidx
- _group(tlist, sql.Identifier, match, valid, valid, post)
+ _group(tlist, sql.Identifier, match, valid_prev, valid_next, post)
def group_typed_literal(tlist):
| andialbrecht/sqlparse | 9d2cb6fc950386e9e59f29faf0d3742c4b12572c | diff --git a/tests/test_regressions.py b/tests/test_regressions.py
index 38d1840..4ffc69f 100644
--- a/tests/test_regressions.py
+++ b/tests/test_regressions.py
@@ -401,6 +401,15 @@ def test_issue489_tzcasts():
assert p.tokens[-1].get_alias() == 'foo'
+def test_issue562_tzcasts():
+ # Test that whitespace between 'from' and 'bar' is retained
+ formatted = sqlparse.format(
+ 'SELECT f(HOUR from bar AT TIME ZONE \'UTC\') from foo', reindent=True
+ )
+ assert formatted == \
+ 'SELECT f(HOUR\n from bar AT TIME ZONE \'UTC\')\nfrom foo'
+
+
def test_as_in_parentheses_indents():
# did raise NoneType has no attribute is_group in _process_parentheses
formatted = sqlparse.format('(as foo)', reindent=True)
| Space removed in the extract presto function after query formatting.
```
sqlparse.format( "SELECT extract(HOUR from from_unixtime(hour_ts) AT TIME ZONE 'America/Los_Angeles') from table", reindent=True)
```
removes space between `from` and `from_unixtime(hour_ts` in the 0.3.1 version, works fine in 0.3.0
Related superset PR: https://github.com/apache/incubator-superset/pull/10165
Expected
```
SELECT extract(HOUR
from from_unixtime(hour_ts) AT TIME ZONE 'America/Los_Angeles')
from table
```
Actual
```
SELECT extract(HOUR
fromfrom_unixtime(hour_ts) AT TIME ZONE 'America/Los_Angeles')
from table
``` | 0.0 | 9d2cb6fc950386e9e59f29faf0d3742c4b12572c | [
"tests/test_regressions.py::test_issue562_tzcasts"
]
| [
"tests/test_regressions.py::test_issue9",
"tests/test_regressions.py::test_issue13",
"tests/test_regressions.py::test_issue26[--hello]",
"tests/test_regressions.py::test_issue26[--",
"tests/test_regressions.py::test_issue26[--hello\\n]",
"tests/test_regressions.py::test_issue26[--]",
"tests/test_regressions.py::test_issue26[--\\n]",
"tests/test_regressions.py::test_issue34[create]",
"tests/test_regressions.py::test_issue34[CREATE]",
"tests/test_regressions.py::test_issue35",
"tests/test_regressions.py::test_issue38",
"tests/test_regressions.py::test_issue39",
"tests/test_regressions.py::test_issue40",
"tests/test_regressions.py::test_issue78[get_name-z-select",
"tests/test_regressions.py::test_issue78[get_real_name-y-select",
"tests/test_regressions.py::test_issue78[get_parent_name-x-select",
"tests/test_regressions.py::test_issue78[get_alias-z-select",
"tests/test_regressions.py::test_issue78[get_typecast-text-select",
"tests/test_regressions.py::test_issue83",
"tests/test_regressions.py::test_comment_encoding_when_reindent",
"tests/test_regressions.py::test_parse_sql_with_binary",
"tests/test_regressions.py::test_dont_alias_keywords",
"tests/test_regressions.py::test_format_accepts_encoding",
"tests/test_regressions.py::test_stream",
"tests/test_regressions.py::test_issue90",
"tests/test_regressions.py::test_except_formatting",
"tests/test_regressions.py::test_null_with_as",
"tests/test_regressions.py::test_issue190_open_file",
"tests/test_regressions.py::test_issue193_splitting_function",
"tests/test_regressions.py::test_issue194_splitting_function",
"tests/test_regressions.py::test_issue186_get_type",
"tests/test_regressions.py::test_issue212_py2unicode",
"tests/test_regressions.py::test_issue213_leadingws",
"tests/test_regressions.py::test_issue227_gettype_cte",
"tests/test_regressions.py::test_issue207_runaway_format",
"tests/test_regressions.py::test_token_next_doesnt_ignore_skip_cm",
"tests/test_regressions.py::test_issue284_as_grouping[SELECT",
"tests/test_regressions.py::test_issue284_as_grouping[AS]",
"tests/test_regressions.py::test_issue315_utf8_by_default",
"tests/test_regressions.py::test_issue322_concurrently_is_keyword",
"tests/test_regressions.py::test_issue359_index_error_assignments[SELECT",
"tests/test_regressions.py::test_issue469_copy_as_psql_command",
"tests/test_regressions.py::test_issue485_split_multi",
"tests/test_regressions.py::test_issue489_tzcasts",
"tests/test_regressions.py::test_as_in_parentheses_indents",
"tests/test_regressions.py::test_format_invalid_where_clause",
"tests/test_regressions.py::test_splitting_at_and_backticks_issue588"
]
| {
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
} | 2022-08-16 11:53:07+00:00 | bsd-3-clause | 1,082 |
|
andialbrecht__sqlparse-746 | diff --git a/AUTHORS b/AUTHORS
index 4617b7d..934bbe3 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -31,9 +31,11 @@ Alphabetical list of contributors:
* Florian Bauer <[email protected]>
* Fredy Wijaya <[email protected]>
* Gavin Wahl <[email protected]>
+* Georg Traar <[email protected]>
* Hugo van Kemenade <[email protected]>
* hurcy <[email protected]>
* Ian Robertson <[email protected]>
+* Igor Khrol <[email protected]>
* JacekPliszka <[email protected]>
* JavierPan <[email protected]>
* Jean-Martin Archer <[email protected]>
diff --git a/CHANGELOG b/CHANGELOG
index 0ede280..cbfbcf2 100644
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -10,10 +10,12 @@ Enhancements:
* Splitting statements now allows to remove the semicolon at the end.
Some database backends love statements without semicolon (issue742).
+* Support TypedLiterals in get_parameters (pr649, by Khrol).
Bug Fixes
* Ignore dunder attributes when creating Tokens (issue672).
+* Allow operators to precede dollar-quoted strings (issue763).
Release 0.4.4 (Apr 18, 2023)
diff --git a/docs/source/extending.rst b/docs/source/extending.rst
index 0c10924..866303b 100644
--- a/docs/source/extending.rst
+++ b/docs/source/extending.rst
@@ -70,7 +70,7 @@ a keyword to the lexer:
lex.add_keywords(keywords.KEYWORDS)
# add a custom keyword dictionary
- lex.add_keywords({'BAR', sqlparse.tokens.Keyword})
+ lex.add_keywords({'BAR': sqlparse.tokens.Keyword})
# no configuration is passed here. The lexer is used as a singleton.
sqlparse.parse("select * from foo zorder by bar;")
diff --git a/sqlparse/engine/grouping.py b/sqlparse/engine/grouping.py
index 57d257e..c486318 100644
--- a/sqlparse/engine/grouping.py
+++ b/sqlparse/engine/grouping.py
@@ -360,6 +360,7 @@ def group_functions(tlist):
tidx, token = tlist.token_next_by(t=T.Name, idx=tidx)
+@recurse(sql.Identifier)
def group_order(tlist):
"""Group together Identifier and Asc/Desc token"""
tidx, token = tlist.token_next_by(t=T.Keyword.Order)
diff --git a/sqlparse/keywords.py b/sqlparse/keywords.py
index b45f3e0..d3794fd 100644
--- a/sqlparse/keywords.py
+++ b/sqlparse/keywords.py
@@ -30,7 +30,7 @@ SQL_REGEX = [
(r"`(``|[^`])*`", tokens.Name),
(r"´(´´|[^´])*´", tokens.Name),
- (r'((?<!\S)\$(?:[_A-ZÀ-Ü]\w*)?\$)[\s\S]*?\1', tokens.Literal),
+ (r'((?<![\w\"\$])\$(?:[_A-ZÀ-Ü]\w*)?\$)[\s\S]*?\1', tokens.Literal),
(r'\?', tokens.Name.Placeholder),
(r'%(\(\w+\))?s', tokens.Name.Placeholder),
diff --git a/sqlparse/sql.py b/sqlparse/sql.py
index 1ccfbdb..41606dd 100644
--- a/sqlparse/sql.py
+++ b/sqlparse/sql.py
@@ -619,12 +619,14 @@ class Function(NameAliasMixin, TokenList):
def get_parameters(self):
"""Return a list of parameters."""
parenthesis = self.tokens[-1]
+ result = []
for token in parenthesis.tokens:
if isinstance(token, IdentifierList):
return token.get_identifiers()
- elif imt(token, i=(Function, Identifier), t=T.Literal):
- return [token, ]
- return []
+ elif imt(token, i=(Function, Identifier, TypedLiteral),
+ t=T.Literal):
+ result.append(token)
+ return result
class Begin(TokenList):
| andialbrecht/sqlparse | f101546dafa921edfea5b3107731504665b758ea | diff --git a/tests/test_grouping.py b/tests/test_grouping.py
index 03d16c5..e90243b 100644
--- a/tests/test_grouping.py
+++ b/tests/test_grouping.py
@@ -247,6 +247,14 @@ def test_grouping_identifier_list_with_order():
assert str(p.tokens[0].tokens[3]) == '2 desc'
+def test_grouping_nested_identifier_with_order():
+ # issue745
+ p = sqlparse.parse('(a desc)')[0]
+ assert isinstance(p.tokens[0], sql.Parenthesis)
+ assert isinstance(p.tokens[0].tokens[1], sql.Identifier)
+ assert str(p.tokens[0].tokens[1]) == 'a desc'
+
+
def test_grouping_where():
s = 'select * from foo where bar = 1 order by id desc'
p = sqlparse.parse(s)[0]
diff --git a/tests/test_parse.py b/tests/test_parse.py
index 5feef5a..be416ef 100644
--- a/tests/test_parse.py
+++ b/tests/test_parse.py
@@ -133,6 +133,11 @@ def test_parse_nested_function():
assert type(t[0]) is sql.Function
+def test_parse_casted_params():
+ t = sqlparse.parse("foo(DATE '2023-11-14', TIMESTAMP '2023-11-15')")[0].tokens[0].get_parameters()
+ assert len(t) == 2
+
+
def test_parse_div_operator():
p = sqlparse.parse('col1 DIV 5 AS div_col1')[0].tokens
assert p[0].tokens[0].tokens[2].ttype is T.Operator
@@ -180,6 +185,14 @@ def test_psql_quotation_marks():
$PROC_2$ LANGUAGE plpgsql;""")
assert len(t) == 2
+ # operators are valid infront of dollar quoted strings
+ t = sqlparse.split("""UPDATE SET foo =$$bar;SELECT bar$$""")
+ assert len(t) == 1
+
+ # identifiers must be separated by whitespace
+ t = sqlparse.split("""UPDATE SET foo TO$$bar;SELECT bar$$""")
+ assert len(t) == 2
+
def test_double_precision_is_builtin():
s = 'DOUBLE PRECISION'
| The group_order() function fails to identify an ordered identifier in the context when nested
**Describe the bug**
The [`group_order()`](https://github.com/andialbrecht/sqlparse/blob/f101546dafa921edfea5b3107731504665b758ea/sqlparse/engine/grouping.py#L363-L371) function does not leverage the `@recurse` decorator and thus the identifier and `ASC`/`DESC` token aren't grouped together in the context of a subquery.
**To Reproduce**
The following correctly groups `bar DESC` as a single identifier,
```python
>>> from sqlparse import parse
>>>
>>> parse("SELECT * FROM foo ORDER BY bar DESC")[0]._pprint_tree()
|- 0 DML 'SELECT'
|- 1 Whitespace ' '
|- 2 Wildcard '*'
|- 3 Whitespace ' '
|- 4 Keyword 'FROM'
|- 5 Whitespace ' '
|- 6 Identifier 'foo'
| `- 0 Name 'foo'
|- 7 Whitespace ' '
|- 8 Keyword 'ORDER ...'
|- 9 Whitespace ' '
`- 10 Identifier 'bar DE...'
|- 0 Identifier 'bar'
| `- 0 Name 'bar'
|- 1 Whitespace ' '
`- 2 Order 'DESC'
```
whereas when wrapped within a subquery,
```python
>>> from sqlparse import parse
>>> parse("SELECT * FROM (SELECT * FROM foo ORDER BY bar DESC)")[0]._pprint_tree()
|- 0 DML 'SELECT'
|- 1 Whitespace ' '
|- 2 Wildcard '*'
|- 3 Whitespace ' '
|- 4 Keyword 'FROM'
|- 5 Whitespace ' '
`- 6 Parenthesis '(SELEC...'
|- 0 Punctuation '('
|- 1 DML 'SELECT'
|- 2 Whitespace ' '
|- 3 Wildcard '*'
|- 4 Whitespace ' '
|- 5 Keyword 'FROM'
|- 6 Whitespace ' '
|- 7 Identifier 'foo'
| `- 0 Name 'foo'
|- 8 Whitespace ' '
|- 9 Keyword 'ORDER ...'
|- 10 Whitespace ' '
|- 11 Identifier 'bar'
| `- 0 Name 'bar'
|- 12 Whitespace ' '
|- 13 Order 'DESC'
`- 14 Punctuation ')'
```
the `bar DESC` is not grouped.
**Expected behavior**
The `group_order()` function should leverage the `recurse(sql.Identifier)` decorator to ensure that nested orders are grouped appropriately.
**Versions (please complete the following information):**
- Python: 3.9
- sqlparse: 0.4.4 | 0.0 | f101546dafa921edfea5b3107731504665b758ea | [
"tests/test_grouping.py::test_grouping_nested_identifier_with_order",
"tests/test_parse.py::test_parse_casted_params",
"tests/test_parse.py::test_psql_quotation_marks"
]
| [
"tests/test_grouping.py::test_grouping_parenthesis",
"tests/test_grouping.py::test_grouping_comments",
"tests/test_grouping.py::test_grouping_assignment[foo",
"tests/test_grouping.py::test_grouping_typed_literal[x",
"tests/test_grouping.py::test_compare_expr[select",
"tests/test_grouping.py::test_grouping_identifiers",
"tests/test_grouping.py::test_simple_identifiers[1",
"tests/test_grouping.py::test_simple_identifiers[foo",
"tests/test_grouping.py::test_simple_identifiers[1/2",
"tests/test_grouping.py::test_simple_identifiers[1<2",
"tests/test_grouping.py::test_group_identifier_list[foo,",
"tests/test_grouping.py::test_group_identifier_list[sum(a),",
"tests/test_grouping.py::test_group_identifier_list[sum(a)",
"tests/test_grouping.py::test_group_identifier_list[sum(a)::integer,",
"tests/test_grouping.py::test_group_identifier_list[sum(a)/count(b)",
"tests/test_grouping.py::test_group_identifier_list[sum(a)::integer",
"tests/test_grouping.py::test_group_identifier_list[sum(a)::integer/count(b)",
"tests/test_grouping.py::test_grouping_identifier_wildcard",
"tests/test_grouping.py::test_grouping_identifier_name_wildcard",
"tests/test_grouping.py::test_grouping_identifier_invalid",
"tests/test_grouping.py::test_grouping_identifier_invalid_in_middle",
"tests/test_grouping.py::test_grouping_identifer_as[foo",
"tests/test_grouping.py::test_grouping_identifier_as_invalid",
"tests/test_grouping.py::test_grouping_identifier_function",
"tests/test_grouping.py::test_grouping_operation[foo+100]",
"tests/test_grouping.py::test_grouping_operation[foo",
"tests/test_grouping.py::test_grouping_operation[foo*100]",
"tests/test_grouping.py::test_grouping_identifier_list",
"tests/test_grouping.py::test_grouping_identifier_list_subquery",
"tests/test_grouping.py::test_grouping_identifier_list_case",
"tests/test_grouping.py::test_grouping_identifier_list_other",
"tests/test_grouping.py::test_grouping_identifier_list_with_inline_comments",
"tests/test_grouping.py::test_grouping_identifiers_with_operators",
"tests/test_grouping.py::test_grouping_identifier_list_with_order",
"tests/test_grouping.py::test_grouping_where",
"tests/test_grouping.py::test_grouping_where_union[select",
"tests/test_grouping.py::test_returning_kw_ends_where_clause",
"tests/test_grouping.py::test_into_kw_ends_where_clause",
"tests/test_grouping.py::test_grouping_typecast[select",
"tests/test_grouping.py::test_grouping_alias",
"tests/test_grouping.py::test_grouping_alias_case",
"tests/test_grouping.py::test_grouping_alias_ctas",
"tests/test_grouping.py::test_grouping_subquery_no_parens",
"tests/test_grouping.py::test_grouping_alias_returns_none[foo.bar]",
"tests/test_grouping.py::test_grouping_alias_returns_none[x,",
"tests/test_grouping.py::test_grouping_alias_returns_none[x",
"tests/test_grouping.py::test_grouping_idlist_function",
"tests/test_grouping.py::test_grouping_comparison_exclude",
"tests/test_grouping.py::test_grouping_function",
"tests/test_grouping.py::test_grouping_function_not_in",
"tests/test_grouping.py::test_grouping_varchar",
"tests/test_grouping.py::test_statement_get_type",
"tests/test_grouping.py::test_identifier_with_operators",
"tests/test_grouping.py::test_identifier_with_op_trailing_ws",
"tests/test_grouping.py::test_identifier_with_string_literals",
"tests/test_grouping.py::test_identifier_consumes_ordering",
"tests/test_grouping.py::test_comparison_with_keywords",
"tests/test_grouping.py::test_comparison_with_floats",
"tests/test_grouping.py::test_comparison_with_parenthesis",
"tests/test_grouping.py::test_comparison_with_strings[=]",
"tests/test_grouping.py::test_comparison_with_strings[!=]",
"tests/test_grouping.py::test_comparison_with_strings[>]",
"tests/test_grouping.py::test_comparison_with_strings[<]",
"tests/test_grouping.py::test_comparison_with_strings[<=]",
"tests/test_grouping.py::test_comparison_with_strings[>=]",
"tests/test_grouping.py::test_comparison_with_strings[~]",
"tests/test_grouping.py::test_comparison_with_strings[~~]",
"tests/test_grouping.py::test_comparison_with_strings[!~~]",
"tests/test_grouping.py::test_comparison_with_strings[LIKE]",
"tests/test_grouping.py::test_comparison_with_strings[NOT",
"tests/test_grouping.py::test_comparison_with_strings[ILIKE]",
"tests/test_grouping.py::test_like_and_ilike_comparison",
"tests/test_grouping.py::test_comparison_with_functions",
"tests/test_grouping.py::test_comparison_with_typed_literal",
"tests/test_grouping.py::test_forloops[FOR]",
"tests/test_grouping.py::test_forloops[FOREACH]",
"tests/test_grouping.py::test_nested_for",
"tests/test_grouping.py::test_begin",
"tests/test_grouping.py::test_keyword_followed_by_parenthesis",
"tests/test_grouping.py::test_nested_begin",
"tests/test_grouping.py::test_aliased_column_without_as",
"tests/test_grouping.py::test_qualified_function",
"tests/test_grouping.py::test_aliased_function_without_as",
"tests/test_grouping.py::test_aliased_literal_without_as",
"tests/test_grouping.py::test_grouping_as_cte",
"tests/test_grouping.py::test_grouping_create_table",
"tests/test_parse.py::test_parse_tokenize",
"tests/test_parse.py::test_parse_multistatement",
"tests/test_parse.py::test_parse_newlines[select\\n*from",
"tests/test_parse.py::test_parse_newlines[select\\r\\n*from",
"tests/test_parse.py::test_parse_newlines[select\\r*from",
"tests/test_parse.py::test_parse_within",
"tests/test_parse.py::test_parse_child_of",
"tests/test_parse.py::test_parse_has_ancestor",
"tests/test_parse.py::test_parse_float[.5]",
"tests/test_parse.py::test_parse_float[.51]",
"tests/test_parse.py::test_parse_float[1.5]",
"tests/test_parse.py::test_parse_float[12.5]",
"tests/test_parse.py::test_parse_placeholder[select",
"tests/test_parse.py::test_parse_modulo_not_placeholder",
"tests/test_parse.py::test_parse_access_symbol",
"tests/test_parse.py::test_parse_square_brackets_notation_isnt_too_greedy",
"tests/test_parse.py::test_parse_square_brackets_notation_isnt_too_greedy2",
"tests/test_parse.py::test_parse_keyword_like_identifier",
"tests/test_parse.py::test_parse_function_parameter",
"tests/test_parse.py::test_parse_function_param_single_literal",
"tests/test_parse.py::test_parse_nested_function",
"tests/test_parse.py::test_parse_div_operator",
"tests/test_parse.py::test_quoted_identifier",
"tests/test_parse.py::test_valid_identifier_names[foo]",
"tests/test_parse.py::test_valid_identifier_names[_foo]",
"tests/test_parse.py::test_valid_identifier_names[1_data]",
"tests/test_parse.py::test_valid_identifier_names[\\u696d\\u8005\\u540d\\u7a31]",
"tests/test_parse.py::test_double_precision_is_builtin",
"tests/test_parse.py::test_placeholder[?]",
"tests/test_parse.py::test_placeholder[:1]",
"tests/test_parse.py::test_placeholder[:foo]",
"tests/test_parse.py::test_placeholder[%s]",
"tests/test_parse.py::test_placeholder[%(foo)s]",
"tests/test_parse.py::test_scientific_numbers[6.67428E-8-expected0]",
"tests/test_parse.py::test_scientific_numbers[1.988e33-expected1]",
"tests/test_parse.py::test_scientific_numbers[1e-12-expected2]",
"tests/test_parse.py::test_scientific_numbers[e1-None]",
"tests/test_parse.py::test_single_quotes_are_strings",
"tests/test_parse.py::test_double_quotes_are_identifiers",
"tests/test_parse.py::test_single_quotes_with_linebreaks",
"tests/test_parse.py::test_sqlite_identifiers",
"tests/test_parse.py::test_simple_1d_array_index",
"tests/test_parse.py::test_2d_array_index",
"tests/test_parse.py::test_array_index_function_result",
"tests/test_parse.py::test_schema_qualified_array_index",
"tests/test_parse.py::test_aliased_array_index",
"tests/test_parse.py::test_array_literal",
"tests/test_parse.py::test_typed_array_definition",
"tests/test_parse.py::test_single_line_comments[select",
"tests/test_parse.py::test_names_and_special_names[foo]",
"tests/test_parse.py::test_names_and_special_names[@foo]",
"tests/test_parse.py::test_names_and_special_names[#foo]",
"tests/test_parse.py::test_names_and_special_names[##foo]",
"tests/test_parse.py::test_get_token_at_offset",
"tests/test_parse.py::test_pprint",
"tests/test_parse.py::test_wildcard_multiplication",
"tests/test_parse.py::test_stmt_tokens_parents",
"tests/test_parse.py::test_dbldollar_as_literal[$$foo$$-True]",
"tests/test_parse.py::test_dbldollar_as_literal[$_$foo$_$-True]",
"tests/test_parse.py::test_dbldollar_as_literal[$token$",
"tests/test_parse.py::test_dbldollar_as_literal[$_$",
"tests/test_parse.py::test_dbldollar_as_literal[$A$",
"tests/test_parse.py::test_non_ascii",
"tests/test_parse.py::test_get_real_name",
"tests/test_parse.py::test_from_subquery",
"tests/test_parse.py::test_parenthesis",
"tests/test_parse.py::test_configurable_keywords",
"tests/test_parse.py::test_configurable_regex"
]
| {
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2023-11-07 05:00:37+00:00 | bsd-3-clause | 1,083 |
|
andialbrecht__sqlparse-764 | diff --git a/CHANGELOG b/CHANGELOG
index 0ede280..0b48e9f 100644
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -14,6 +14,7 @@ Enhancements:
Bug Fixes
* Ignore dunder attributes when creating Tokens (issue672).
+* Allow operators to precede dollar-quoted strings (issue763).
Release 0.4.4 (Apr 18, 2023)
diff --git a/sqlparse/keywords.py b/sqlparse/keywords.py
index b45f3e0..d3794fd 100644
--- a/sqlparse/keywords.py
+++ b/sqlparse/keywords.py
@@ -30,7 +30,7 @@ SQL_REGEX = [
(r"`(``|[^`])*`", tokens.Name),
(r"´(´´|[^´])*´", tokens.Name),
- (r'((?<!\S)\$(?:[_A-ZÀ-Ü]\w*)?\$)[\s\S]*?\1', tokens.Literal),
+ (r'((?<![\w\"\$])\$(?:[_A-ZÀ-Ü]\w*)?\$)[\s\S]*?\1', tokens.Literal),
(r'\?', tokens.Name.Placeholder),
(r'%(\(\w+\))?s', tokens.Name.Placeholder),
| andialbrecht/sqlparse | f101546dafa921edfea5b3107731504665b758ea | diff --git a/tests/test_parse.py b/tests/test_parse.py
index 5feef5a..6e4df7c 100644
--- a/tests/test_parse.py
+++ b/tests/test_parse.py
@@ -180,6 +180,14 @@ def test_psql_quotation_marks():
$PROC_2$ LANGUAGE plpgsql;""")
assert len(t) == 2
+ # operators are valid infront of dollar quoted strings
+ t = sqlparse.split("""UPDATE SET foo =$$bar;SELECT bar$$""")
+ assert len(t) == 1
+
+ # identifiers must be separated by whitespace
+ t = sqlparse.split("""UPDATE SET foo TO$$bar;SELECT bar$$""")
+ assert len(t) == 2
+
def test_double_precision_is_builtin():
s = 'DOUBLE PRECISION'
| Dollar quoted strings (PostgreSQL) cannot follow an operator (e.g. `=$$Hello$$`)
**Describe the bug**
Dollar quoted strings (e.g. PostgreSQL) are not properly detected, when there is an operator directly preceding the dollar quoted string (e.g `var=$$text$$`). While according to PostgreSQL docs ...
> A dollar-quoted string that follows a keyword or identifier must be separated from it by whitespace; otherwise the dollar quoting delimiter would be taken as part of the preceding identifier.
... this does not hold true for operators. i.e. `SET application_name=$$Hello$$;` is valid.
This seems to relate to
https://github.com/andialbrecht/sqlparse/blob/f101546dafa921edfea5b3107731504665b758ea/sqlparse/keywords.py#L33
and the negative lookbehind ensuring that there's no non-whitespace character before the starting `$`. This potentially should be changed to
```
(r'((?<![\w\"\$])\$(?:[_A-ZÀ-Ü]\w*)?\$)[\s\S]*?\1', tokens.Literal)
```
which would filter out any keywords or identifiers.
**To Reproduce**
```python
sqlparse.split('''update test set a=$$test;test$$;''');
```
```
['update test set a=$$test;', 'test$$;']
```
**Expected behavior**
```
['update test set a=$$test;test$$;']
```
**Versions (please complete the following information):**
- Python: 3.12.1
- sqlparse: 0.4.4
**Additional context**
Add any other context about the problem here.
| 0.0 | f101546dafa921edfea5b3107731504665b758ea | [
"tests/test_parse.py::test_psql_quotation_marks"
]
| [
"tests/test_parse.py::test_parse_tokenize",
"tests/test_parse.py::test_parse_multistatement",
"tests/test_parse.py::test_parse_newlines[select\\n*from",
"tests/test_parse.py::test_parse_newlines[select\\r\\n*from",
"tests/test_parse.py::test_parse_newlines[select\\r*from",
"tests/test_parse.py::test_parse_within",
"tests/test_parse.py::test_parse_child_of",
"tests/test_parse.py::test_parse_has_ancestor",
"tests/test_parse.py::test_parse_float[.5]",
"tests/test_parse.py::test_parse_float[.51]",
"tests/test_parse.py::test_parse_float[1.5]",
"tests/test_parse.py::test_parse_float[12.5]",
"tests/test_parse.py::test_parse_placeholder[select",
"tests/test_parse.py::test_parse_modulo_not_placeholder",
"tests/test_parse.py::test_parse_access_symbol",
"tests/test_parse.py::test_parse_square_brackets_notation_isnt_too_greedy",
"tests/test_parse.py::test_parse_square_brackets_notation_isnt_too_greedy2",
"tests/test_parse.py::test_parse_keyword_like_identifier",
"tests/test_parse.py::test_parse_function_parameter",
"tests/test_parse.py::test_parse_function_param_single_literal",
"tests/test_parse.py::test_parse_nested_function",
"tests/test_parse.py::test_parse_div_operator",
"tests/test_parse.py::test_quoted_identifier",
"tests/test_parse.py::test_valid_identifier_names[foo]",
"tests/test_parse.py::test_valid_identifier_names[_foo]",
"tests/test_parse.py::test_valid_identifier_names[1_data]",
"tests/test_parse.py::test_valid_identifier_names[\\u696d\\u8005\\u540d\\u7a31]",
"tests/test_parse.py::test_double_precision_is_builtin",
"tests/test_parse.py::test_placeholder[?]",
"tests/test_parse.py::test_placeholder[:1]",
"tests/test_parse.py::test_placeholder[:foo]",
"tests/test_parse.py::test_placeholder[%s]",
"tests/test_parse.py::test_placeholder[%(foo)s]",
"tests/test_parse.py::test_scientific_numbers[6.67428E-8-expected0]",
"tests/test_parse.py::test_scientific_numbers[1.988e33-expected1]",
"tests/test_parse.py::test_scientific_numbers[1e-12-expected2]",
"tests/test_parse.py::test_scientific_numbers[e1-None]",
"tests/test_parse.py::test_single_quotes_are_strings",
"tests/test_parse.py::test_double_quotes_are_identifiers",
"tests/test_parse.py::test_single_quotes_with_linebreaks",
"tests/test_parse.py::test_sqlite_identifiers",
"tests/test_parse.py::test_simple_1d_array_index",
"tests/test_parse.py::test_2d_array_index",
"tests/test_parse.py::test_array_index_function_result",
"tests/test_parse.py::test_schema_qualified_array_index",
"tests/test_parse.py::test_aliased_array_index",
"tests/test_parse.py::test_array_literal",
"tests/test_parse.py::test_typed_array_definition",
"tests/test_parse.py::test_single_line_comments[select",
"tests/test_parse.py::test_names_and_special_names[foo]",
"tests/test_parse.py::test_names_and_special_names[@foo]",
"tests/test_parse.py::test_names_and_special_names[#foo]",
"tests/test_parse.py::test_names_and_special_names[##foo]",
"tests/test_parse.py::test_get_token_at_offset",
"tests/test_parse.py::test_pprint",
"tests/test_parse.py::test_wildcard_multiplication",
"tests/test_parse.py::test_stmt_tokens_parents",
"tests/test_parse.py::test_dbldollar_as_literal[$$foo$$-True]",
"tests/test_parse.py::test_dbldollar_as_literal[$_$foo$_$-True]",
"tests/test_parse.py::test_dbldollar_as_literal[$token$",
"tests/test_parse.py::test_dbldollar_as_literal[$_$",
"tests/test_parse.py::test_dbldollar_as_literal[$A$",
"tests/test_parse.py::test_non_ascii",
"tests/test_parse.py::test_get_real_name",
"tests/test_parse.py::test_from_subquery",
"tests/test_parse.py::test_parenthesis",
"tests/test_parse.py::test_configurable_keywords",
"tests/test_parse.py::test_configurable_regex"
]
| {
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
} | 2024-02-05 06:38:08+00:00 | bsd-3-clause | 1,084 |
|
andrasmaroy__pconf-32 | diff --git a/pconf/pconf.py b/pconf/pconf.py
index 72fdaeb..81848b3 100644
--- a/pconf/pconf.py
+++ b/pconf/pconf.py
@@ -33,8 +33,7 @@ class Pconf(object):
"""
results = {}
- hierarchy = cls.__hierarchy
- hierarchy.reverse()
+ hierarchy = cls.__hierarchy[::-1]
for storeMethod in hierarchy:
cls.merger.merge(results, storeMethod.get())
diff --git a/setup.py b/setup.py
index 4c2269c..669c551 100644
--- a/setup.py
+++ b/setup.py
@@ -12,7 +12,7 @@ def read(fname):
setup(
name="pconf",
- version="1.7.1",
+ version="1.7.2",
author="Andras Maroy",
author_email="[email protected]",
description=("Hierarchical python configuration with files, environment variables, command-line arguments."),
| andrasmaroy/pconf | fa6203d593f7c1ec862dd1647df12f5e2b522844 | diff --git a/tests/test_hierarchy.py b/tests/test_hierarchy.py
index e616d6c..34a8e56 100644
--- a/tests/test_hierarchy.py
+++ b/tests/test_hierarchy.py
@@ -42,6 +42,31 @@ class TestHierarchy(TestCase):
self.assertEqual(expected, results)
+ @patch("pconf.store.env.Env")
+ @patch("pconf.store.file.File")
+ def test_double_forward(self, mock_file, mock_env):
+ mocked_env = MagicMock()
+ mocked_env.get.return_value = self.TEST_ENV_RESULT
+ mock_env.return_value = mocked_env
+
+ mocked_file = MagicMock()
+ mocked_file.get.return_value = self.TEST_FILE_RESULT
+ mock_file.return_value = mocked_file
+
+ Pconf.env()
+ Pconf.file(self.TEST_FILE_PATH)
+ Pconf.get()
+ results = Pconf.get()
+
+ expected = {
+ "file": "result",
+ "env": "result",
+ "overlapping": "env",
+ "deep": {"stillhere": "stillhere", "overlapping": "env"},
+ }
+
+ self.assertEqual(expected, results)
+
@patch("pconf.store.env.Env")
@patch("pconf.store.file.File")
def test_backward(self, mock_file, mock_env):
@@ -65,3 +90,28 @@ class TestHierarchy(TestCase):
}
self.assertEqual(expected, results)
+
+ @patch("pconf.store.env.Env")
+ @patch("pconf.store.file.File")
+ def test_double_backward(self, mock_file, mock_env):
+ mocked_env = MagicMock()
+ mocked_env.get.return_value = self.TEST_ENV_RESULT
+ mock_env.return_value = mocked_env
+
+ mocked_file = MagicMock()
+ mocked_file.get.return_value = self.TEST_FILE_RESULT
+ mock_file.return_value = mocked_file
+
+ Pconf.file(self.TEST_FILE_PATH)
+ Pconf.env()
+ Pconf.get()
+ results = Pconf.get()
+
+ expected = {
+ "file": "result",
+ "env": "result",
+ "overlapping": "file",
+ "deep": {"stillhere": "stillhere", "overlapping": "file"},
+ }
+
+ self.assertEqual(expected, results)
| Calling Pconf.get() multiple times causes configuration flips due to order failure
Calling Pconf.get() multiple times fails to return the same configuration (order of storeMethods flips / reverses).
Scenario (`COMPUTERNAME=aws-ron`)
```
Pconf.env()
Pconf.defaults({'COMPUTERNAME': 'localhost'})
print(Pconf.get()) # ==> {'COMPUTERNAME': 'aws-ron', ...}
print(Pconf.get()) # ==> {'COMPUTERNAME': 'localhost', ...}
print(Pconf.get()) # ==> {'COMPUTERNAME': 'aws-ron', ...}
print(Pconf.get()) # ==> {'COMPUTERNAME': 'localhost', ...}
...
```
Quick fix: do not reverse the hierarchy in-place, but use Python's array bracket operator to reverse it:
`hierarchy = cls.__hierarchy[::-1]`
https://github.com/ronsinai/pconf/blob/master/pconf/pconf.py#L36
https://github.com/ronsinai/pconf/blob/master/pconf/pconf.py#L37 | 0.0 | fa6203d593f7c1ec862dd1647df12f5e2b522844 | [
"tests/test_hierarchy.py::TestHierarchy::test_double_backward",
"tests/test_hierarchy.py::TestHierarchy::test_double_forward"
]
| [
"tests/test_hierarchy.py::TestHierarchy::test_backward",
"tests/test_hierarchy.py::TestHierarchy::test_forward"
]
| {
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
} | 2020-12-08 17:40:02+00:00 | mit | 1,085 |
|
andreoliwa__nitpick-603 | diff --git a/src/nitpick/style/fetchers/github.py b/src/nitpick/style/fetchers/github.py
index 068dedf..4cb0cc5 100644
--- a/src/nitpick/style/fetchers/github.py
+++ b/src/nitpick/style/fetchers/github.py
@@ -50,14 +50,10 @@ class GitHubURL:
return token
@property
- def credentials(self) -> tuple[str, str] | tuple[()]:
- """Credentials encoded in this URL.
-
- A tuple of ``(api_token, '')`` if present, or empty tuple otherwise.
-
- """
+ def authorization_header(self) -> dict[str, str] | None:
+ """Authorization header encoded in this URL."""
token = self.token
- return (token, "") if token else ()
+ return {"Authorization": f"token {token}"} if token else None
@property
def git_reference_or_default(self) -> str:
@@ -166,5 +162,5 @@ class GitHubFetcher(HttpFetcher): # pylint: disable=too-few-public-methods
def _download(self, url: furl, **kwargs) -> str:
github_url = GitHubURL.from_furl(url)
- kwargs.setdefault("auth", github_url.credentials)
+ kwargs.setdefault("headers", github_url.authorization_header)
return super()._download(github_url.raw_content_url, **kwargs)
| andreoliwa/nitpick | 24a1cc345bdf367cca531d6020f125e609b6c945 | diff --git a/tests/test_style.py b/tests/test_style.py
index a9bb199..36ea501 100644
--- a/tests/test_style.py
+++ b/tests/test_style.py
@@ -1,6 +1,5 @@
"""Style tests."""
import warnings
-from base64 import b64encode
from pathlib import Path
from textwrap import dedent
from unittest import mock
@@ -426,8 +425,7 @@ def test_fetch_private_github_urls(tmp_path):
missing = "thing"{SUGGESTION_END}
"""
)
- token_on_basic_auth = b64encode(f"{file_token}:".encode()).decode().strip()
- assert responses.calls[0].request.headers["Authorization"] == f"Basic {token_on_basic_auth}"
+ assert responses.calls[0].request.headers["Authorization"] == f"token {file_token}"
project.flake8(offline=True).assert_no_errors()
@@ -460,8 +458,7 @@ def test_fetch_private_github_urls_no_branch(tmp_path):
"""
)
assert responses.calls[0].request.headers["Authorization"] == f"token {file_token}"
- token_on_basic_auth = b64encode(f"{file_token}:".encode()).decode().strip()
- assert responses.calls[1].request.headers["Authorization"] == f"Basic {token_on_basic_auth}"
+ assert responses.calls[1].request.headers["Authorization"] == f"token {file_token}"
project.flake8(offline=True).assert_no_errors()
@@ -480,10 +477,10 @@ def test_fetch_private_github_urls_no_branch(tmp_path):
"https://raw.githubusercontent.com/andreoliwa/nitpick/develop/initial.toml",
],
)
-def test_github_url_without_token_has_no_credentials(style_url):
+def test_github_url_without_token_has_no_authorization_header(style_url):
"""Check private GitHub URLs with a token in various places are parsed correctly."""
parsed = GitHubURL.from_furl(furl(style_url))
- assert parsed.credentials == ()
+ assert parsed.authorization_header is None
@pytest.mark.parametrize(
@@ -501,10 +498,10 @@ def test_github_url_without_token_has_no_credentials(style_url):
"https://[email protected]/andreoliwa/nitpick/develop/initial.toml",
],
)
-def test_github_url_with_fixed_userinfo_token_has_correct_credential(url):
+def test_github_url_with_fixed_userinfo_token_has_correct_authorization_header(url):
"""Check private GitHub URLs with a token in various places are parsed correctly."""
parsed = GitHubURL.from_furl(furl(url))
- assert parsed.credentials == ("token", "")
+ assert parsed.authorization_header == {"Authorization": "token token"}
@pytest.mark.parametrize(
@@ -522,11 +519,11 @@ def test_github_url_with_fixed_userinfo_token_has_correct_credential(url):
"https://[email protected]/andreoliwa/nitpick/develop/initial.toml",
],
)
-def test_github_url_with_variable_userinfo_token_has_correct_credential(url, monkeypatch):
+def test_github_url_with_variable_userinfo_token_has_correct_authorization_header(url, monkeypatch):
"""Check private GitHub URLs with a token in various places are parsed correctly."""
monkeypatch.setenv("TOKEN", "envvar-token")
parsed = GitHubURL.from_furl(furl(url))
- assert parsed.credentials == ("envvar-token", "")
+ assert parsed.authorization_header == {"Authorization": "token envvar-token"}
@pytest.mark.parametrize(
@@ -546,18 +543,18 @@ def test_github_url_with_variable_userinfo_token_has_correct_credential(url, mon
"github://$ENVVAR@andreoliwa/nitpick/initial.toml?token=$NOTUSED",
],
)
-def test_github_url_with_variable_query_token_has_correct_credential(url, monkeypatch):
+def test_github_url_with_variable_query_token_has_correct_authorization_header(url, monkeypatch):
"""Check private GitHub URLs with a token in various places are parsed correctly."""
monkeypatch.setenv("ENVVAR", "envvar-token")
parsed = GitHubURL.from_furl(furl(url))
- assert parsed.credentials == ("envvar-token", "")
+ assert parsed.authorization_header == {"Authorization": "token envvar-token"}
-def test_github_url_with_missing_envvar_has_empty_credential(monkeypatch):
+def test_github_url_with_missing_envvar_has_empty_authorization_header(monkeypatch):
"""Environment var that doesn't exist is replaced with empty string."""
monkeypatch.delenv("MISSINGVAR", raising=False)
parsed = GitHubURL.from_furl(furl("https://github.com/foo/bar/blob/branch/filename.toml?token=$MISSINGVAR"))
- assert parsed.credentials == ()
+ assert parsed.authorization_header is None
def test_github_url_query_token_retains_other_queryparams():
@@ -567,7 +564,7 @@ def test_github_url_query_token_retains_other_queryparams():
parsed = GitHubURL.from_furl(
furl("https://github.com/foo/bar/blob/branch/filename.toml?token=somevar&leavemealone=ok")
)
- assert parsed.credentials == ("somevar", "")
+ assert parsed.authorization_header == {"Authorization": "token somevar"}
assert ("leavemealone", "ok") in parsed.url.query.params.items()
| Allow Github Apps Access Tokens in Github URLs
## Expected behavior
nitpick should be able to fetch styles from private Github repos with access tokens derived from Github Apps.
## Current behavior
Currently only Personal Access Tokens (starting with ghp_) work correctly. Server to Server Tokens (starting with ghs_) don't work and lead to a 404 Not Found.
## Steps to reproduce
1. [Register a Github App](https://docs.github.com/en/apps/creating-github-apps/registering-a-github-app/registering-a-github-app)
1. [Create an Access Token](https://docs.github.com/en/apps/creating-github-apps/authenticating-with-a-github-app/generating-a-user-access-token-for-a-github-app) for that Github App
1. Use that token in a Github URL in nitpick
## Possible Solution
Currently the authentication method is a username only Basic Authentication. If a token Authorization header is used, as it is already for getting the default branch, then both Personal Access Tokens and Server to Server Tokens from Github Apps work.
Could be that also now other Access Tokens also work, but I haven't tried them.
## Context
We want to use Nitpick in our CI and don't want to use Personal Access Tokens but Github App Access Tokens instead.
## Your environment
<!--- Include as many relevant details about the environment you experienced the bug in -->
- `nitpick` version used: 0.33.2
| 0.0 | 24a1cc345bdf367cca531d6020f125e609b6c945 | [
"tests/test_style.py::test_fetch_private_github_urls",
"tests/test_style.py::test_fetch_private_github_urls_no_branch",
"tests/test_style.py::test_github_url_without_token_has_no_authorization_header[github://andreoliwa/nitpick/initial.toml]",
"tests/test_style.py::test_github_url_without_token_has_no_authorization_header[gh://andreoliwa/nitpick/initial.toml]",
"tests/test_style.py::test_github_url_without_token_has_no_authorization_header[github://andreoliwa/nitpick@develop/initial.toml]",
"tests/test_style.py::test_github_url_without_token_has_no_authorization_header[gh://andreoliwa/nitpick@develop/initial.toml]",
"tests/test_style.py::test_github_url_without_token_has_no_authorization_header[https://github.com/andreoliwa/nitpick/blob/develop/initial.toml]",
"tests/test_style.py::test_github_url_without_token_has_no_authorization_header[https://raw.githubusercontent.com/andreoliwa/nitpick/develop/initial.toml]",
"tests/test_style.py::test_github_url_with_fixed_userinfo_token_has_correct_authorization_header[github://token@andreoliwa/nitpick/initial.toml]",
"tests/test_style.py::test_github_url_with_fixed_userinfo_token_has_correct_authorization_header[gh://token@andreoliwa/nitpick/initial.toml]",
"tests/test_style.py::test_github_url_with_fixed_userinfo_token_has_correct_authorization_header[github://token@andreoliwa/nitpick@develop/initial.toml]",
"tests/test_style.py::test_github_url_with_fixed_userinfo_token_has_correct_authorization_header[gh://token@andreoliwa/nitpick@develop/initial.toml]",
"tests/test_style.py::test_github_url_with_fixed_userinfo_token_has_correct_authorization_header[https://[email protected]/andreoliwa/nitpick/blob/develop/initial.toml]",
"tests/test_style.py::test_github_url_with_fixed_userinfo_token_has_correct_authorization_header[https://[email protected]/andreoliwa/nitpick/develop/initial.toml]",
"tests/test_style.py::test_github_url_with_variable_userinfo_token_has_correct_authorization_header[github://$TOKEN@andreoliwa/nitpick/initial.toml]",
"tests/test_style.py::test_github_url_with_variable_userinfo_token_has_correct_authorization_header[gh://$TOKEN@andreoliwa/nitpick/initial.toml]",
"tests/test_style.py::test_github_url_with_variable_userinfo_token_has_correct_authorization_header[github://$TOKEN@andreoliwa/nitpick@develop/initial.toml]",
"tests/test_style.py::test_github_url_with_variable_userinfo_token_has_correct_authorization_header[gh://$TOKEN@andreoliwa/nitpick@develop/initial.toml]",
"tests/test_style.py::test_github_url_with_variable_userinfo_token_has_correct_authorization_header[https://[email protected]/andreoliwa/nitpick/blob/develop/initial.toml]",
"tests/test_style.py::test_github_url_with_variable_userinfo_token_has_correct_authorization_header[https://[email protected]/andreoliwa/nitpick/develop/initial.toml]",
"tests/test_style.py::test_github_url_with_variable_query_token_has_correct_authorization_header[github://andreoliwa/nitpick/initial.toml?token=$ENVVAR]",
"tests/test_style.py::test_github_url_with_variable_query_token_has_correct_authorization_header[gh://andreoliwa/nitpick/initial.toml?token=$ENVVAR]",
"tests/test_style.py::test_github_url_with_variable_query_token_has_correct_authorization_header[github://andreoliwa/nitpick@develop/initial.toml?token=$ENVVAR]",
"tests/test_style.py::test_github_url_with_variable_query_token_has_correct_authorization_header[gh://andreoliwa/nitpick@develop/initial.toml?token=$ENVVAR]",
"tests/test_style.py::test_github_url_with_variable_query_token_has_correct_authorization_header[https://github.com/andreoliwa/nitpick/blob/develop/initial.toml?token=$ENVVAR]",
"tests/test_style.py::test_github_url_with_variable_query_token_has_correct_authorization_header[https://raw.githubusercontent.com/andreoliwa/nitpick/develop/initial.toml?token=$ENVVAR]",
"tests/test_style.py::test_github_url_with_variable_query_token_has_correct_authorization_header[github://$ENVVAR@andreoliwa/nitpick/initial.toml?token=$NOTUSED]",
"tests/test_style.py::test_github_url_with_missing_envvar_has_empty_authorization_header",
"tests/test_style.py::test_github_url_query_token_retains_other_queryparams"
]
| [
"tests/test_style.py::test_multiple_styles_overriding_values[False]",
"tests/test_style.py::test_multiple_styles_overriding_values[True]",
"tests/test_style.py::test_include_styles_overriding_values[False]",
"tests/test_style.py::test_include_styles_overriding_values[True]",
"tests/test_style.py::test_minimum_version[False]",
"tests/test_style.py::test_minimum_version[True]",
"tests/test_style.py::test_relative_and_other_root_dirs[False]",
"tests/test_style.py::test_relative_and_other_root_dirs[True]",
"tests/test_style.py::test_symlink_subdir[False]",
"tests/test_style.py::test_symlink_subdir[True]",
"tests/test_style.py::test_relative_style_on_urls",
"tests/test_style.py::test_local_style_should_override_settings",
"tests/test_style.py::test_include_remote_style_from_local_style",
"tests/test_style.py::test_merge_styles_into_single_file[False]",
"tests/test_style.py::test_merge_styles_into_single_file[True]",
"tests/test_style.py::test_invalid_tool_nitpick_on_pyproject_toml[False]",
"tests/test_style.py::test_invalid_tool_nitpick_on_pyproject_toml[True]",
"tests/test_style.py::test_invalid_toml",
"tests/test_style.py::test_invalid_nitpick_files[False]",
"tests/test_style.py::test_invalid_nitpick_files[True]",
"tests/test_style.py::test_always_fetch_github_raw_url[github://andreoliwa/nitpick/initial.toml]",
"tests/test_style.py::test_always_fetch_github_raw_url[gh://andreoliwa/nitpick/initial.toml]",
"tests/test_style.py::test_always_fetch_github_raw_url[github://andreoliwa/nitpick@develop/initial.toml]",
"tests/test_style.py::test_always_fetch_github_raw_url[gh://andreoliwa/nitpick@develop/initial.toml]",
"tests/test_style.py::test_always_fetch_github_raw_url[https://github.com/andreoliwa/nitpick/blob/develop/initial.toml]",
"tests/test_style.py::test_always_fetch_github_raw_url[https://raw.githubusercontent.com/andreoliwa/nitpick/develop/initial.toml]",
"tests/test_style.py::test_parsing_github_urls[https://github.com/andreoliwa/nitpick/blob/develop/src/nitpick/__init__.py-https://github.com/andreoliwa/nitpick/blob/develop/src/nitpick/__init__.py-develop-develop-]",
"tests/test_style.py::test_parsing_github_urls[gh://andreoliwa/nitpick/src/nitpick/__init__.py-https://github.com/andreoliwa/nitpick/blob/develop/src/nitpick/__init__.py--develop-]",
"tests/test_style.py::test_parsing_github_urls[github://andreoliwa/nitpick/src/nitpick/__init__.py-https://github.com/andreoliwa/nitpick/blob/develop/src/nitpick/__init__.py--develop-]",
"tests/test_style.py::test_parsing_github_urls[https://github.com/andreoliwa/nitpick/blob/v0.26.0/src/nitpick/__init__.py-https://github.com/andreoliwa/nitpick/blob/v0.26.0/src/nitpick/[email protected]]",
"tests/test_style.py::test_parsing_github_urls[gh://andreoliwa/[email protected]/src/nitpick/__init__.py-https://github.com/andreoliwa/nitpick/blob/v0.23.1/src/nitpick/[email protected]]",
"tests/test_style.py::test_parsing_github_urls[github://andreoliwa/nitpick@master/src/nitpick/__init__.py-https://github.com/andreoliwa/nitpick/blob/master/src/nitpick/__init__.py-master-master-@master]",
"tests/test_style.py::test_parsing_python_package_urls[py://nitpick/styles/nitpick-style.toml-nitpick.styles-nitpick-style.toml]",
"tests/test_style.py::test_parsing_python_package_urls[py://some_package/nitpick.toml-some_package-nitpick.toml]",
"tests/test_style.py::test_parsing_python_package_urls[pypackage://nitpick/styles/nitpick-style.toml-nitpick.styles-nitpick-style.toml]",
"tests/test_style.py::test_parsing_python_package_urls[pypackage://some_package/nitpick.toml-some_package-nitpick.toml]",
"tests/test_style.py::test_raw_content_url_of_python_package[py://tests/resources/empty-style.toml-tests/resources/empty-style.toml]",
"tests/test_style.py::test_raw_content_url_of_python_package[py://tests/resources/nested_package/empty_style.toml-tests/resources/nested_package/empty_style.toml]",
"tests/test_style.py::test_raw_content_url_of_python_package[pypackage://tests/resources/empty-style.toml-tests/resources/empty-style.toml]",
"tests/test_style.py::test_raw_content_url_of_python_package[pypackage://tests/resources/nested_package/empty_style.toml-tests/resources/nested_package/empty_style.toml]",
"tests/test_style.py::test_protocol_not_supported"
]
| {
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
} | 2023-06-09 04:44:09+00:00 | mit | 1,086 |
|
andreoliwa__nitpick-606 | diff --git a/README.rst b/README.rst
index 6d30ac9..61cbd01 100644
--- a/README.rst
+++ b/README.rst
@@ -350,6 +350,13 @@ Or install it with pip::
Run
~~~
+Nitpick_ will fail if no style is explicitly configured.
+Run this command to download and use the opinionated :gitref:`default style file <nitpick-style.toml>`:
+
+ nitpick init
+
+You can use it as a template to :ref:`configure-your-own-style`.
+
To fix and modify your files directly::
nitpick fix
@@ -358,15 +365,10 @@ To check for errors only::
nitpick check
-Nitpick is also a ``flake8`` plugin, so you can run this on a project
-with at least one Python (``.py``) file::
+Nitpick is also a flake8_ plugin, so you can run this on a project with at least one Python (``.py``) file::
flake8 .
-Nitpick will download and use the opinionated `default style file <nitpick-style.toml>`_.
-
-You can use it as a template to configure your own style.
-
Run as a pre-commit hook
~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/docs/configuration.rst b/docs/configuration.rst
index 4f39c68..ce2d29f 100644
--- a/docs/configuration.rst
+++ b/docs/configuration.rst
@@ -18,7 +18,9 @@ Possible configuration files (in order of precedence):
The first file found will be used; the other files will be ignored.
-Run the ``nipick init`` CLI command to create a config file (:ref:`cli_cmd_init`).
+If no style is configured, Nitpick will fail with an error message.
+
+Run ``nipick init`` to create a config file (:ref:`cli_cmd_init`).
To configure your own style, you can either use ``nitpick init``:
@@ -217,7 +219,7 @@ Multiple styles
You can also use multiple styles and mix local files and URLs.
-Example of usage: the ``[tool.nitpick]`` table on :gitref:`Nitpick's own pyproject.toml <pyproject.toml#L1-L7>`.
+Example of usage: the ``[tool.nitpick]`` table on :gitref:`Nitpick's own pyproject.toml <pyproject.toml>`.
.. code-block:: toml
diff --git a/docs/quickstart.rst b/docs/quickstart.rst
index 14907c3..cce7e2c 100644
--- a/docs/quickstart.rst
+++ b/docs/quickstart.rst
@@ -36,6 +36,13 @@ Or install it with pip::
Run
---
+Nitpick_ will fail if no style is explicitly configured.
+Run this command to download and use the opinionated :gitref:`default style file <nitpick-style.toml>`:
+
+ nitpick init
+
+You can use it as a template to :ref:`configure-your-own-style`.
+
To fix and modify your files directly::
nitpick fix
@@ -48,10 +55,6 @@ Nitpick is also a flake8_ plugin, so you can run this on a project with at least
flake8 .
-Nitpick_ will download and use the opinionated :gitref:`default style file <nitpick-style.toml>`.
-
-You can use it as a template to :ref:`configure-your-own-style`.
-
Run as a pre-commit hook
------------------------
diff --git a/src/nitpick/constants.py b/src/nitpick/constants.py
index 7562b60..4cfd309 100644
--- a/src/nitpick/constants.py
+++ b/src/nitpick/constants.py
@@ -54,6 +54,10 @@ CONFIG_FILES = (DOT_NITPICK_TOML, PYPROJECT_TOML)
# Config sections and keys
TOOL_KEY = "tool"
TOOL_NITPICK_KEY = f"{TOOL_KEY}.{PROJECT_NAME}"
+RUN_NITPICK_INIT_OR_CONFIGURE_STYLE_MANUALLY = (
+ f" Run 'nitpick init' or configure a style manually ({', '.join(CONFIG_FILES)})."
+ f" See {READ_THE_DOCS_URL}configuration.html"
+)
# JMESPath expressions
TOOL_NITPICK_JMEX = jmespath.compile(TOOL_NITPICK_KEY)
diff --git a/src/nitpick/style/core.py b/src/nitpick/style/core.py
index 98c8157..4c04272 100644
--- a/src/nitpick/style/core.py
+++ b/src/nitpick/style/core.py
@@ -115,8 +115,8 @@ class StyleManager: # pylint: disable=too-many-instance-attributes
chosen_styles = [sorted(paths)[0].expanduser().resolve().as_uri()]
log_message = "Using local style found climbing the directory tree"
else:
- chosen_styles = [self.get_default_style_url()]
- log_message = "Using default remote Nitpick style"
+ yield Reporter().make_fuss(StyleViolations.NO_STYLE_CONFIGURED)
+ return
logger.info(f"{log_message}: {chosen_styles[0]}")
yield from self.include_multiple_styles(
diff --git a/src/nitpick/violations.py b/src/nitpick/violations.py
index db91fea..5242e7b 100644
--- a/src/nitpick/violations.py
+++ b/src/nitpick/violations.py
@@ -10,7 +10,7 @@ from typing import TYPE_CHECKING
import click
-from nitpick.constants import CONFIG_FILES, FLAKE8_PREFIX, READ_THE_DOCS_URL
+from nitpick.constants import FLAKE8_PREFIX, RUN_NITPICK_INIT_OR_CONFIGURE_STYLE_MANUALLY
if TYPE_CHECKING:
from nitpick.plugins.info import FileInfo
@@ -68,6 +68,7 @@ class StyleViolations(ViolationEnum):
INVALID_DATA_TOOL_NITPICK = (1, " has an incorrect style. Invalid data in [{section}]:")
INVALID_TOML = (1, " has an incorrect style. Invalid TOML{exception}")
INVALID_CONFIG = (1, " has an incorrect style. Invalid config:")
+ NO_STYLE_CONFIGURED = (4, f"No style file configured.{RUN_NITPICK_INIT_OR_CONFIGURE_STYLE_MANUALLY}")
class ProjectViolations(ViolationEnum):
@@ -75,9 +76,7 @@ class ProjectViolations(ViolationEnum):
NO_ROOT_DIR = (
101,
- "No root directory detected."
- f" Create a configuration file ({', '.join(CONFIG_FILES)}) manually, or run 'nitpick init'."
- f" See {READ_THE_DOCS_URL}configuration.html",
+ f"No root directory detected.{RUN_NITPICK_INIT_OR_CONFIGURE_STYLE_MANUALLY}",
)
NO_PYTHON_FILE = (102, "No Python file was found on the root dir and subdir of {root!r}")
MISSING_FILE = (103, " should exist{extra}")
@@ -141,7 +140,7 @@ class Reporter: # pylint: disable=too-few-public-methods
if cls.fixed:
parts.append(f"✅ {cls.fixed} fixed")
if cls.manual:
- parts.append(f"❌ {cls.manual} to change manually")
+ parts.append(f"❌ {cls.manual} to fix manually")
if not parts:
return "No violations found. ✨ 🍰 ✨"
return f"Violations: {', '.join(parts)}."
| andreoliwa/nitpick | db69164046facd7d617213dc41a005582fe3e608 | diff --git a/tests/helpers.py b/tests/helpers.py
index f2c4542..bfab93e 100644
--- a/tests/helpers.py
+++ b/tests/helpers.py
@@ -386,7 +386,7 @@ class ProjectMock:
return self
if violations:
- expected.append(f"Violations: ❌ {violations} to change manually.")
+ expected.append(f"Violations: ❌ {violations} to fix manually.")
elif expected_str_or_lines:
# If the number of violations was not passed but a list of errors was,
# remove the violation count from the actual results.
diff --git a/tests/test_project.py b/tests/test_project.py
index 3136890..28aecae 100644
--- a/tests/test_project.py
+++ b/tests/test_project.py
@@ -13,6 +13,7 @@ from nitpick.constants import (
PACKAGE_JSON,
PRE_COMMIT_CONFIG_YAML,
PYPROJECT_TOML,
+ RUN_NITPICK_INIT_OR_CONFIGURE_STYLE_MANUALLY,
SETUP_CFG,
SETUP_PY,
TOX_INI,
@@ -20,7 +21,6 @@ from nitpick.constants import (
from nitpick.core import Nitpick
from nitpick.exceptions import QuitComplainingError
from nitpick.project import Configuration, confirm_project_root, find_main_python_file
-from nitpick.style import StyleManager
from nitpick.violations import ProjectViolations
from tests.helpers import ProjectMock
@@ -123,12 +123,12 @@ def test_django_project_structure(tmp_path):
def test_when_no_config_file_the_default_style_is_requested(tmp_path, caplog):
- """There is a root dir (setup.py), but no config file."""
- project = ProjectMock(tmp_path, pyproject_toml=False, setup_py=True).api_check(offline=True)
- style_url = StyleManager.get_default_style_url()
+ """There is a root dir (setup.py), but no style file. The user should explicitly set the style, no default will be used."""
+ project = ProjectMock(tmp_path, pyproject_toml=False, setup_py=True)
+ error = f"NIP004 No style file configured.{RUN_NITPICK_INIT_OR_CONFIGURE_STYLE_MANUALLY}"
+ project.flake8().assert_single_error(error).cli_run(error, exit_code=1)
assert project.nitpick_instance.project.read_configuration() == Configuration(None, [], "")
assert "Config file: none found {}" in caplog.messages
- assert f"Using default remote Nitpick style: {style_url} {{}}" in caplog.messages
@pytest.mark.parametrize("config_file", [DOT_NITPICK_TOML, PYPROJECT_TOML])
diff --git a/tests/test_violations.py b/tests/test_violations.py
index eac967f..198e665 100644
--- a/tests/test_violations.py
+++ b/tests/test_violations.py
@@ -37,12 +37,12 @@ def test_reporter():
reporter.increment()
assert reporter.manual == 1
assert reporter.fixed == 0
- assert reporter.get_counts() == "Violations: ❌ 1 to change manually."
+ assert reporter.get_counts() == "Violations: ❌ 1 to fix manually."
reporter.increment(True)
assert reporter.manual == 1
assert reporter.fixed == 1
- assert reporter.get_counts() == "Violations: ✅ 1 fixed, ❌ 1 to change manually."
+ assert reporter.get_counts() == "Violations: ✅ 1 fixed, ❌ 1 to fix manually."
reporter.reset()
assert reporter.manual == 0
| Nitpick should fail when no style is explicitly configured
## Problem
I installed nitpick in my project, but I forget to configure the `style =` key in `[tool.nitpick]`, so when I ran `nitpick fix` it used the default nitpick style which created a `package.json`, `.github/` directory, etc. which I do not need.
## Possible solution
I would have expected the tool to fail and tell me to explicitly configure a style.
I'm creating this issue because I do not really see any use case where using the default nitpick style makes sense. If not, I'm curious about that.
| 0.0 | db69164046facd7d617213dc41a005582fe3e608 | [
"tests/test_project.py::test_singleton",
"tests/test_project.py::test_no_root_dir_with_python_file",
"tests/test_project.py::test_no_root_dir_no_python_file",
"tests/test_project.py::test_multiple_root_dirs",
"tests/test_project.py::test_no_python_file_root_dir",
"tests/test_project.py::test_at_least_one_python_file[depth1.py-False]",
"tests/test_project.py::test_at_least_one_python_file[subdir/depth2.py-False]",
"tests/test_project.py::test_at_least_one_python_file[subdir/another/depth3.py-True]",
"tests/test_project.py::test_django_project_structure",
"tests/test_project.py::test_when_no_config_file_the_default_style_is_requested",
"tests/test_project.py::test_has_one_config_file[.nitpick.toml]",
"tests/test_project.py::test_has_one_config_file[pyproject.toml]",
"tests/test_project.py::test_has_multiple_config_files",
"tests/test_project.py::test_use_current_dir_dont_climb_dirs_to_find_project_root[.nitpick.toml]",
"tests/test_project.py::test_use_current_dir_dont_climb_dirs_to_find_project_root[.pre-commit-config.yaml]",
"tests/test_project.py::test_use_current_dir_dont_climb_dirs_to_find_project_root[pyproject.toml]",
"tests/test_project.py::test_use_current_dir_dont_climb_dirs_to_find_project_root[setup.py]",
"tests/test_project.py::test_use_current_dir_dont_climb_dirs_to_find_project_root[setup.cfg]",
"tests/test_project.py::test_use_current_dir_dont_climb_dirs_to_find_project_root[requirements.txt]",
"tests/test_project.py::test_use_current_dir_dont_climb_dirs_to_find_project_root[requirements_dev.txt]",
"tests/test_project.py::test_use_current_dir_dont_climb_dirs_to_find_project_root[Pipfile]",
"tests/test_project.py::test_use_current_dir_dont_climb_dirs_to_find_project_root[Pipfile.lock]",
"tests/test_project.py::test_use_current_dir_dont_climb_dirs_to_find_project_root[tox.ini]",
"tests/test_project.py::test_use_current_dir_dont_climb_dirs_to_find_project_root[package.json]",
"tests/test_project.py::test_use_current_dir_dont_climb_dirs_to_find_project_root[Cargo.toml]",
"tests/test_project.py::test_use_current_dir_dont_climb_dirs_to_find_project_root[Cargo.lock]",
"tests/test_project.py::test_use_current_dir_dont_climb_dirs_to_find_project_root[go.mod]",
"tests/test_project.py::test_use_current_dir_dont_climb_dirs_to_find_project_root[go.sum]",
"tests/test_project.py::test_use_current_dir_dont_climb_dirs_to_find_project_root[nitpick-style.toml]",
"tests/test_project.py::test_find_root_django",
"tests/test_violations.py::test_fuss_pretty[False]",
"tests/test_violations.py::test_fuss_pretty[True]",
"tests/test_violations.py::test_reporter",
"tests/test_violations.py::test_flatten_marshmallow_errors"
]
| []
| {
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2023-08-19 16:22:11+00:00 | mit | 1,087 |
|
andreroggeri__pynubank-134 | diff --git a/pynubank/nubank.py b/pynubank/nubank.py
index 553440c..c73ee2a 100644
--- a/pynubank/nubank.py
+++ b/pynubank/nubank.py
@@ -152,3 +152,16 @@ class Nubank:
barcode = boleto_response['data']['createTransferInBoleto']['boleto']['readableBarcode']
return barcode
+
+ def create_money_request(self, amount: float) -> str:
+ account_data = self._make_graphql_request('account_feed')
+ account_id = account_data['data']['viewer']['savingsAccount']['id']
+ payload = {
+ 'input': {
+ 'amount': amount, 'savingsAccountId': account_id
+ }
+ }
+
+ money_request_response = self._make_graphql_request('create_money_request', payload)
+
+ return money_request_response['data']['createMoneyRequest']['moneyRequest']['url']
diff --git a/pynubank/queries/account_feed.gql b/pynubank/queries/account_feed.gql
index 7e8fd90..5236319 100644
--- a/pynubank/queries/account_feed.gql
+++ b/pynubank/queries/account_feed.gql
@@ -1,6 +1,7 @@
{
viewer {
savingsAccount {
+ id
feed {
id
__typename
diff --git a/pynubank/queries/create_money_request.gql b/pynubank/queries/create_money_request.gql
new file mode 100644
index 0000000..f23ef4c
--- /dev/null
+++ b/pynubank/queries/create_money_request.gql
@@ -0,0 +1,12 @@
+mutation createMoneyRequest_createMoneyRequestMutation(
+ $input: CreateMoneyRequestInput!
+) {
+ createMoneyRequest(input: $input) {
+ moneyRequest {
+ amount
+ message
+ url
+ id
+ }
+ }
+}
| andreroggeri/pynubank | e21ae7a6bdc519be5ffc28d2a4719fe8cc03b871 | diff --git a/tests/fixtures/__init__.py b/tests/fixtures/__init__.py
index 20b8897..07c467e 100644
--- a/tests/fixtures/__init__.py
+++ b/tests/fixtures/__init__.py
@@ -4,6 +4,7 @@ from .authentication import authentication_return
from .bills import bills_return
from .bills_details import bill_details_return
from .create_boleto import create_boleto_return
+from .create_money_request import create_money_request_return
from .events import events_return
from .gen_certificate import gen_certificate_return
from .proxy import proxy_return
diff --git a/tests/fixtures/account_statements.py b/tests/fixtures/account_statements.py
index 4315b35..5c400a4 100644
--- a/tests/fixtures/account_statements.py
+++ b/tests/fixtures/account_statements.py
@@ -3,53 +3,62 @@ import pytest
@pytest.fixture
def account_statements_return():
- return {'data': {'viewer': {'savingsAccount': {'feed': [
- {
- 'id': 'abcde-fghi-jklmn-opqrst-uvxw',
- '__typename': 'BillPaymentEvent',
- 'title': 'Pagamento da fatura',
- 'detail': 'Cartão Nubank - R$ 50,00',
- 'postDate': '2018-03-07'
- },
- {
- 'id': 'abcde-fghi-jklmn-opqrst-uvxy',
- '__typename': 'TransferOutReversalEvent',
- 'title': 'Transferência devolvida',
- 'detail': 'Juquinha da Silva Sauro - R$ 20,00',
- 'postDate': '2018-03-06'
- },
- {
- 'id': 'abcde-fghi-jklmn-opqrst-uvxz',
- '__typename': 'TransferOutEvent',
- 'title': 'Transferência enviada',
- 'detail': 'Juquinha da Silva Sauro - R$ 20,00',
- 'postDate': '2018-03-06',
- 'amount': 20.0,
- 'destinationAccount': {
- 'name': 'Juquinha da Silva Sauro'
+ return {
+ 'data': {
+ 'viewer': {
+ 'savingsAccount': {
+ 'id': 'abc123123',
+ 'feed': [
+ {
+ 'id': 'abcde-fghi-jklmn-opqrst-uvxw',
+ '__typename': 'BillPaymentEvent',
+ 'title': 'Pagamento da fatura',
+ 'detail': 'Cartão Nubank - R$ 50,00',
+ 'postDate': '2018-03-07'
+ },
+ {
+ 'id': 'abcde-fghi-jklmn-opqrst-uvxy',
+ '__typename': 'TransferOutReversalEvent',
+ 'title': 'Transferência devolvida',
+ 'detail': 'Juquinha da Silva Sauro - R$ 20,00',
+ 'postDate': '2018-03-06'
+ },
+ {
+ 'id': 'abcde-fghi-jklmn-opqrst-uvxz',
+ '__typename': 'TransferOutEvent',
+ 'title': 'Transferência enviada',
+ 'detail': 'Juquinha da Silva Sauro - R$ 20,00',
+ 'postDate': '2018-03-06',
+ 'amount': 20.0,
+ 'destinationAccount': {
+ 'name': 'Juquinha da Silva Sauro'
+ }
+ },
+ {
+ 'id': 'abcde-fghi-jklmn-opqrst-uvx1',
+ '__typename': 'TransferInEvent',
+ 'title': 'Transferência recebida',
+ 'detail': 'R$127.33',
+ 'postDate': '2018-03-06',
+ 'amount': 127.33
+ },
+ {
+ "id": "abcdefgh-ijkl-mnop-qrst-uvwxyz0123",
+ "__typename": "BarcodePaymentEvent",
+ "title": "Pagamento efetuado",
+ "detail": "AES ELETROPAULO",
+ "postDate": "2018-02-05",
+ "amount": 169.2
+ },
+ {
+ 'id': 'abcde-fghi-jklmn-opqrst-uvx2',
+ '__typename': 'WelcomeEvent',
+ 'title': 'Bem vindo à sua conta!',
+ 'detail': 'Waldisney Santos\nBanco 260 - Nu Pagamentos S.A.\nAgência 0001\nConta 000000-1',
+ 'postDate': '2017-12-18'
+ }
+ ]
+ }
}
- },
- {
- 'id': 'abcde-fghi-jklmn-opqrst-uvx1',
- '__typename': 'TransferInEvent',
- 'title': 'Transferência recebida',
- 'detail': 'R$127.33',
- 'postDate': '2018-03-06',
- 'amount': 127.33
- },
- {
- "id": "abcdefgh-ijkl-mnop-qrst-uvwxyz0123",
- "__typename": "BarcodePaymentEvent",
- "title": "Pagamento efetuado",
- "detail": "AES ELETROPAULO",
- "postDate": "2018-02-05",
- "amount": 169.2
- },
- {
- 'id': 'abcde-fghi-jklmn-opqrst-uvx2',
- '__typename': 'WelcomeEvent',
- 'title': 'Bem vindo à sua conta!',
- 'detail': 'Waldisney Santos\nBanco 260 - Nu Pagamentos S.A.\nAgência 0001\nConta 000000-1',
- 'postDate': '2017-12-18'
}
- ]}}}}
+ }
diff --git a/tests/fixtures/create_money_request.py b/tests/fixtures/create_money_request.py
new file mode 100644
index 0000000..36851b4
--- /dev/null
+++ b/tests/fixtures/create_money_request.py
@@ -0,0 +1,17 @@
+import pytest
+
+
[email protected]()
+def create_money_request_return():
+ return {
+ "data": {
+ "createMoneyRequest": {
+ "moneyRequest": {
+ "amount": 550.0,
+ "message": None,
+ "url": "https://some.tld/path1/path2",
+ "id": "123123123123"
+ }
+ }
+ }
+ }
diff --git a/tests/test_nubank_client.py b/tests/test_nubank_client.py
index 860c8b5..271ab93 100644
--- a/tests/test_nubank_client.py
+++ b/tests/test_nubank_client.py
@@ -1,4 +1,4 @@
-from unittest.mock import MagicMock
+from unittest.mock import MagicMock, Mock
from qrcode import QRCode
@@ -288,3 +288,15 @@ def test_should_generate_boleto(monkeypatch, create_boleto_return):
boleto = client.create_boleto(200.50)
assert boleto == create_boleto_return['data']['createTransferInBoleto']['boleto']['readableBarcode']
+
+
+def test_should_create_money_request(monkeypatch, create_money_request_return, account_statements_return):
+ monkeypatch.setattr(Discovery, '_update_proxy_urls', fake_update_proxy)
+ post_mock = Mock()
+ post_mock.side_effect = [account_statements_return, create_money_request_return]
+ monkeypatch.setattr(HttpClient, 'post', post_mock)
+ client = Nubank()
+
+ url = client.create_money_request(200)
+
+ assert url == create_money_request_return['data']['createMoneyRequest']['moneyRequest']['url']
| QRCode Para Efetivar Transferência
Olá!
Venho através deste, solicitar/opinar sobre um sistema interessante.
Seria basicamente um QRCode para ser escaneado e efetivado uma transferência de Nubank para Nubank.
Seria algo como:
Gerar um QRCode com os dados da pessoa que irá receber o valor da transferência.
Para fazer a transferência, seria direto pelo App > Transferir > QRCode, e definir o valor a ser transferido.
Grato! | 0.0 | e21ae7a6bdc519be5ffc28d2a4719fe8cc03b871 | [
"tests/test_nubank_client.py::test_should_create_money_request"
]
| [
"tests/test_nubank_client.py::test_authenticate_with_qr_code_succeeds",
"tests/test_nubank_client.py::test_authenticate_with_cert",
"tests/test_nubank_client.py::test_authenticate_with_refresh_token",
"tests/test_nubank_client.py::test_get_card_feed",
"tests/test_nubank_client.py::test_get_bills",
"tests/test_nubank_client.py::test_get_bill_details",
"tests/test_nubank_client.py::test_get_card_statements",
"tests/test_nubank_client.py::test_get_account_balance",
"tests/test_nubank_client.py::test_get_account_feed",
"tests/test_nubank_client.py::test_get_account_statements",
"tests/test_nubank_client.py::test_get_qr_code",
"tests/test_nubank_client.py::test_should_generate_boleto"
]
| {
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
} | 2020-07-21 00:40:30+00:00 | mit | 1,088 |
|
andreroggeri__pynubank-170 | diff --git a/pynubank/exception.py b/pynubank/exception.py
index 4326696..c5bc410 100644
--- a/pynubank/exception.py
+++ b/pynubank/exception.py
@@ -7,6 +7,11 @@ class NuException(Exception):
super().__init__(message)
+class NuMissingCreditCard(NuException):
+ def __init__(self):
+ super().__init__(f'Couldn\'t fetch bills due to missing credit card.')
+
+
class NuRequestException(NuException):
def __init__(self, response: Response):
super().__init__(f'The request made failed with HTTP status code {response.status_code}')
diff --git a/pynubank/nubank.py b/pynubank/nubank.py
index 4ed2338..f83fd98 100644
--- a/pynubank/nubank.py
+++ b/pynubank/nubank.py
@@ -6,6 +6,7 @@ from qrcode import QRCode
from pynubank.utils.discovery import Discovery
from pynubank.utils.http import HttpClient
from pynubank.utils.graphql import prepare_request_body
+from pynubank.exception import NuMissingCreditCard
PAYMENT_EVENT_TYPES = (
'TransferOutEvent',
@@ -43,11 +44,23 @@ class Nubank:
}
return self.client.post(self.discovery.get_url('login'), json=payload)
+ def _find_url(self, known_keys: set, links: dict) -> str:
+ intersection = known_keys.intersection(links)
+ iterator = iter(intersection)
+ key = next(iterator, None)
+ return links.get(key, {}).get('href', None)
+
def _save_auth_data(self, auth_data: dict) -> None:
self.client.set_header('Authorization', f'Bearer {auth_data["access_token"]}')
- self.feed_url = auth_data['_links']['events']['href']
- self.query_url = auth_data['_links']['ghostflame']['href']
- self.bills_url = auth_data['_links']['bills_summary']['href']
+
+ links = auth_data['_links']
+ self.query_url = links['ghostflame']['href']
+
+ feed_url_keys = {'magnitude', 'events'}
+ bills_url_keys = {'bills_summary'}
+
+ self.feed_url = self._find_url(feed_url_keys, links)
+ self.bills_url = self._find_url(bills_url_keys, links)
def get_qr_code(self) -> Tuple[str, QRCode]:
content = str(uuid.uuid4())
@@ -108,8 +121,11 @@ class Nubank:
return list(filter(lambda x: x['category'] == 'transaction', feed['events']))
def get_bills(self):
- request = self.client.get(self.bills_url)
- return request['bills']
+ if self.bills_url is not None:
+ request = self.client.get(self.bills_url)
+ return request['bills']
+ else:
+ raise NuMissingCreditCard
def get_bill_details(self, bill: dict):
return self.client.get(bill['_links']['self']['href'])
diff --git a/pynubank/utils/mock_http.py b/pynubank/utils/mock_http.py
index 19c5f30..b4ae558 100644
--- a/pynubank/utils/mock_http.py
+++ b/pynubank/utils/mock_http.py
@@ -33,6 +33,12 @@ class MockHttpClient(HttpClient):
self._results[('https://mocked-proxy-url/api/proxy/ghostflame_123',
str(prepare_request_body('create_money_request')))] = self._read_data('money')
+ def add_mock_url(self, url: str, graphql_object: str, response_json_name: str):
+ self._results[(url, graphql_object)] = self._read_data(response_json_name)
+
+ def remove_mock_url(self, route: tuple):
+ del self._results[route]
+
def get(self, url: str) -> dict:
result = self._find(url)
if result is None:
diff --git a/pynubank/utils/mocked_responses/discovery_login_alternative.json b/pynubank/utils/mocked_responses/discovery_login_alternative.json
new file mode 100644
index 0000000..986c4c8
--- /dev/null
+++ b/pynubank/utils/mocked_responses/discovery_login_alternative.json
@@ -0,0 +1,51 @@
+{
+ "access_token": "access_token_123",
+ "token_type": "bearer",
+ "_links": {
+ "change_password": {
+ "href": "https://mocked-proxy-url/api/proxy/change_password_123"
+ },
+ "enabled_features": {
+ "href": "https://mocked-proxy-url/api/proxy/enabled_features_123"
+ },
+ "revoke_token": {
+ "href": "https://mocked-proxy-url/api/proxy/revoke_token_123"
+ },
+ "userinfo": {
+ "href": "https://mocked-proxy-url/api/proxy/userinfo_123"
+ },
+ "events_page": {
+ "href": "https://mocked-proxy-url/api/proxy/events_page_123"
+ },
+ "postcode": {
+ "href": "https://mocked-proxy-url/api/proxy/post_code_123"
+ },
+ "app_flows": {
+ "href": "https://mocked-proxy-url/api/proxy/app_flows_123"
+ },
+ "revoke_all": {
+ "href": "https://mocked-proxy-url/api/proxy/revoke_all_123"
+ },
+ "customer": {
+ "href": "https://mocked-proxy-url/api/proxy/customer_123"
+ },
+ "account": {
+ "href": "https://mocked-proxy-url/api/proxy/account_123"
+ },
+ "purchases": {
+ "href": "https://mocked-proxy-url/api/proxy/purchases_123"
+ },
+ "ghostflame": {
+ "href": "https://mocked-proxy-url/api/proxy/ghostflame_123"
+ },
+ "user_change_password": {
+ "href": "https://mocked-proxy-url/api/proxy/user_change_password_123"
+ },
+ "magnitude": {
+ "href": "https://mocked-proxy-url/api/proxy/magnitude_123"
+ },
+ "savings_account": {
+ "href": "https://mocked-proxy-url/api/proxy/savings_account_123"
+ }
+ }
+}
| andreroggeri/pynubank | ba59c507753b41cf0b6aac2f79bbb2eac84d81d1 | diff --git a/requirements-test.txt b/requirements-test.txt
index 038e63b..467289f 100644
--- a/requirements-test.txt
+++ b/requirements-test.txt
@@ -1,5 +1,5 @@
-r requirements.txt
-pytest==6.1.1
+pytest==6.1.2
pytest-cov==2.10.1
coveralls==2.1.2
tox==3.20.1
diff --git a/tests/test_nubank_client.py b/tests/test_nubank_client.py
index 2072af9..d4dddd8 100644
--- a/tests/test_nubank_client.py
+++ b/tests/test_nubank_client.py
@@ -1,10 +1,8 @@
-from unittest.mock import MagicMock, Mock
-
+import pytest
from qrcode import QRCode
-
from pynubank.nubank import Nubank
-from pynubank.utils.http import HttpClient
from pynubank import MockHttpClient
+from pynubank.exception import NuMissingCreditCard
def test_authenticate_with_qr_code_succeeds():
@@ -31,6 +29,22 @@ def test_authenticate_with_refresh_token():
assert nubank_client.client.get_header('Authorization') == 'Bearer access_token_123'
+def test_authenticate_with_cert_missing_credit_card():
+ mock_client = MockHttpClient()
+ mock_client.remove_mock_url(('https://mocked-proxy-url/api/proxy/events_123', ''))
+ mock_client.remove_mock_url(('https://mocked-proxy-url/api/token', ''))
+
+ mock_client.add_mock_url('https://mocked-proxy-url/api/proxy/magnitude_123', '', 'proxy_events')
+ mock_client.add_mock_url('https://mocked-proxy-url/api/token', '', 'discovery_login_alternative')
+
+ nubank_client = Nubank(client=mock_client)
+ nubank_client.authenticate_with_cert('1234', 'hunter12', 'some-file.p12')
+
+ assert nubank_client.feed_url == 'https://mocked-proxy-url/api/proxy/magnitude_123'
+ assert nubank_client.bills_url is None
+ assert nubank_client.client.get_header('Authorization') == 'Bearer access_token_123'
+
+
def test_get_card_feed():
nubank_client = Nubank(client=MockHttpClient())
nubank_client.authenticate_with_qr_code('12345678912', 'hunter12', 'some-uuid')
@@ -56,6 +70,21 @@ def test_get_card_feed():
assert events[0]['_links']['self']['href'] == 'https://prod-s0-webapp-proxy.nubank.com.br/api/proxy/_links_123'
+def test_get_bills_missing_credit_card():
+ mock_client = MockHttpClient()
+ mock_client.remove_mock_url(('https://mocked-proxy-url/api/proxy/events_123', ''))
+ mock_client.remove_mock_url(('https://mocked-proxy-url/api/token', ''))
+
+ mock_client.add_mock_url('https://mocked-proxy-url/api/proxy/magnitude_123', '', 'proxy_events')
+ mock_client.add_mock_url('https://mocked-proxy-url/api/token', '', 'discovery_login_alternative')
+
+ nubank_client = Nubank(client=mock_client)
+ nubank_client.authenticate_with_cert('1234', 'hunter12', 'some-file.p12')
+
+ with pytest.raises(NuMissingCreditCard):
+ nubank_client.get_bills()
+
+
def test_get_bills():
nubank_client = Nubank(client=MockHttpClient())
nubank_client.authenticate_with_qr_code('12345678912', 'hunter12', 'some-uuid')
| Problemas com função _save_auth_data no arquivo nubank.py
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/sergio/Documentos/pynubank/pynubank/nubank.py", line 99, in authenticate_with_cert
self._save_auth_data(response)
File "/home/sergio/Documentos/pynubank/pynubank/nubank.py", line 63, in _save_auth_data
self.feed_url = auth_data['_links']['events']['href']
KeyError: 'events'
Este erro ocorre ao tentar autenticar. Precisei corrigir a função _save_auth_data do arquivo nubank.py substituindo os nomes de chaves que mudaram no request. A função corrigida fica assim:
def _save_auth_data(self, auth_data: dict) -> None:
self.client.set_header('Authorization', f'Bearer {auth_data["access_token"]}')
self.feed_url = auth_data['_links']['magnitude']['href']
self.query_url = auth_data['_links']['ghostflame']['href']
self.bills_url = auth_data['_links']['savings_account']['href']
| 0.0 | ba59c507753b41cf0b6aac2f79bbb2eac84d81d1 | [
"tests/test_nubank_client.py::test_authenticate_with_qr_code_succeeds",
"tests/test_nubank_client.py::test_authenticate_with_cert",
"tests/test_nubank_client.py::test_authenticate_with_refresh_token",
"tests/test_nubank_client.py::test_authenticate_with_cert_missing_credit_card",
"tests/test_nubank_client.py::test_get_card_feed",
"tests/test_nubank_client.py::test_get_bills_missing_credit_card",
"tests/test_nubank_client.py::test_get_bills",
"tests/test_nubank_client.py::test_get_bill_details",
"tests/test_nubank_client.py::test_get_card_statements",
"tests/test_nubank_client.py::test_get_account_balance",
"tests/test_nubank_client.py::test_get_account_feed",
"tests/test_nubank_client.py::test_get_account_statements",
"tests/test_nubank_client.py::test_get_account_investments_details",
"tests/test_nubank_client.py::test_get_qr_code",
"tests/test_nubank_client.py::test_should_generate_boleto",
"tests/test_nubank_client.py::test_should_create_money_request"
]
| []
| {
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2020-10-16 16:46:10+00:00 | mit | 1,089 |
|
andreroggeri__pynubank-243 | diff --git a/pynubank/nubank.py b/pynubank/nubank.py
index b0c1be9..931ad44 100644
--- a/pynubank/nubank.py
+++ b/pynubank/nubank.py
@@ -1,6 +1,5 @@
import calendar
import datetime
-import re
import uuid
from typing import Tuple
@@ -10,6 +9,7 @@ from pynubank.exception import NuMissingCreditCard
from pynubank.utils.discovery import Discovery
from pynubank.utils.graphql import prepare_request_body
from pynubank.utils.http import HttpClient
+from pynubank.utils.parsing import parse_float, parse_pix_transaction
PAYMENT_EVENT_TYPES = (
'TransferOutEvent',
@@ -20,7 +20,11 @@ PAYMENT_EVENT_TYPES = (
'DebitPurchaseReversalEvent',
'BillPaymentEvent',
'DebitWithdrawalFeeEvent',
- 'DebitWithdrawalEvent'
+ 'DebitWithdrawalEvent',
+ 'PixTransferOutEvent',
+ 'PixTransferInEvent',
+ 'PixTransferOutReversalEvent',
+ 'PixTransferFailedEvent',
)
@@ -157,6 +161,7 @@ class Nubank:
def get_account_statements(self):
feed = self.get_account_feed()
+ feed = map(parse_pix_transaction, feed)
return list(filter(lambda x: x['__typename'] in PAYMENT_EVENT_TYPES, feed))
def get_account_balance(self):
@@ -179,7 +184,7 @@ class Nubank:
value = data['data']['viewer']['productFeatures']['savings']['screens']['detailedBalance']['monthBalanceSection']['yieldSection']['semantics']['label']
- return float(re.search(r'\d*,\d\d', value).group().replace(',', '.'))
+ return parse_float(value)
def create_boleto(self, amount: float) -> str:
customer_id_response = self._make_graphql_request('account_id')
diff --git a/pynubank/utils/parsing.py b/pynubank/utils/parsing.py
new file mode 100644
index 0000000..6dab3b4
--- /dev/null
+++ b/pynubank/utils/parsing.py
@@ -0,0 +1,28 @@
+import re
+
+TITLE_INFLOW_PIX = 'Transferência recebida'
+TITLE_OUTFLOW_PIX = 'Transferência enviada'
+TITLE_REVERSAL_PIX = 'Reembolso enviado'
+TITLE_FAILED_PIX = 'Transferência falhou'
+
+PIX_TRANSACTION_MAP = {
+ TITLE_INFLOW_PIX: 'PixTransferInEvent',
+ TITLE_OUTFLOW_PIX: 'PixTransferOutEvent',
+ TITLE_REVERSAL_PIX: 'PixTransferOutReversalEvent',
+ TITLE_FAILED_PIX: 'PixTransferFailedEvent',
+}
+
+
+def parse_float(value: str):
+ return float(re.search(r'(?:\d.)?\d+,\d*', value).group().replace('.', '').replace(',', '.'))
+
+
+def parse_pix_transaction(transaction: dict) -> dict:
+ if transaction['__typename'] != 'GenericFeedEvent':
+ return transaction
+
+ if transaction['title'] in PIX_TRANSACTION_MAP.keys():
+ transaction['__typename'] = PIX_TRANSACTION_MAP[transaction['title']]
+ transaction['amount'] = parse_float(transaction['detail'])
+
+ return transaction
| andreroggeri/pynubank | 85cb39b247d374c6db35fcfaf9b33414b771a159 | diff --git a/tests/test_nubank_client.py b/tests/test_nubank_client.py
index 688d5ec..d122511 100644
--- a/tests/test_nubank_client.py
+++ b/tests/test_nubank_client.py
@@ -251,7 +251,7 @@ def test_get_account_statements():
statements = nubank_client.get_account_statements()
- assert len(statements) == 12
+ assert len(statements) == 23
assert statements[0]['id'] == 'e409e495-4a16-4bad-9ddb-5c447c84fdcb'
assert statements[0]['__typename'] == 'TransferOutEvent'
assert statements[0]['title'] == 'Transferência enviada'
@@ -259,12 +259,12 @@ def test_get_account_statements():
assert statements[0]['postDate'] == '2021-04-14'
assert statements[0]['amount'] == 4496.9
- assert statements[11]['id'] == 'a9f96774-37f2-431e-9e6f-a081defacf25'
- assert statements[11]['__typename'] == 'BarcodePaymentEvent'
- assert statements[11]['title'] == 'Pagamento efetuado'
- assert statements[11]['detail'] == 'CONFIDENCE CORRETORA DE CAMBIO S A'
- assert statements[11]['postDate'] == '2020-12-08'
- assert statements[11]['amount'] == 4245.1
+ assert statements[22]['id'] == 'a9f96774-37f2-431e-9e6f-a081defacf25'
+ assert statements[22]['__typename'] == 'BarcodePaymentEvent'
+ assert statements[22]['title'] == 'Pagamento efetuado'
+ assert statements[22]['detail'] == 'CONFIDENCE CORRETORA DE CAMBIO S A'
+ assert statements[22]['postDate'] == '2020-12-08'
+ assert statements[22]['amount'] == 4245.1
def test_get_account_investments_details():
diff --git a/tests/test_parsing.py b/tests/test_parsing.py
new file mode 100644
index 0000000..f7c38c7
--- /dev/null
+++ b/tests/test_parsing.py
@@ -0,0 +1,60 @@
+from pynubank.utils.parsing import parse_pix_transaction
+
+base_transaction = {
+ "id": "12c77a49-21c2-427d-8662-beba354e8356",
+ "__typename": "GenericFeedEvent",
+ "title": "Transferência enviada",
+ "detail": "Waldisney da Silva\nR$ 3.668,40",
+ "postDate": "2021-03-24"
+}
+
+
+def test_should_do_nothing_with_transactions_that_arent_pix():
+ transaction = base_transaction.copy()
+ transaction['__typename'] = 'TransferInEvent'
+ transaction['amount'] = 3429
+
+ parsed = parse_pix_transaction(transaction)
+
+ assert parsed['__typename'] == transaction['__typename']
+ assert parsed['amount'] == transaction['amount']
+
+
+def test_should_parse_inflow_pix_transaction():
+ transaction = base_transaction.copy()
+ transaction['title'] = 'Transferência recebida'
+
+ parsed = parse_pix_transaction(transaction)
+
+ assert parsed['__typename'] == 'PixTransferInEvent'
+ assert parsed['amount'] == 3668.40
+
+
+def test_should_parse_outflow_pix_transaction():
+ transaction = base_transaction.copy()
+ transaction['title'] = 'Transferência enviada'
+
+ parsed = parse_pix_transaction(transaction)
+
+ assert parsed['__typename'] == 'PixTransferOutEvent'
+ assert parsed['amount'] == 3668.40
+
+
+def test_should_parse_reversal_pix_transaction():
+ transaction = base_transaction.copy()
+ transaction['title'] = 'Reembolso enviado'
+
+ parsed = parse_pix_transaction(transaction)
+
+ assert parsed['__typename'] == 'PixTransferOutReversalEvent'
+ assert parsed['amount'] == 3668.40
+
+
+def test_should_parse_failed_pix_transaction():
+ transaction = base_transaction.copy()
+ transaction['title'] = 'Transferência falhou'
+
+ parsed = parse_pix_transaction(transaction)
+
+ assert parsed['__typename'] == 'PixTransferFailedEvent'
+ assert parsed['amount'] == 3668.40
| Transferências utilizando Pix
Olá, as transferências por Pix estão vindo dessa maneira, caso possa interessar a alguém.
Elas não aparece porque estão filtradas na biblioteca. Acredito que no futuro eles mudem isso.
'__typename': 'GenericFeedEvent' | 0.0 | 85cb39b247d374c6db35fcfaf9b33414b771a159 | [
"tests/test_nubank_client.py::test_authenticate_with_qr_code_succeeds",
"tests/test_nubank_client.py::test_authenticate_with_cert",
"tests/test_nubank_client.py::test_authenticate_with_refresh_token",
"tests/test_nubank_client.py::test_authenticate_with_cert_missing_credit_card",
"tests/test_nubank_client.py::test_get_card_feed",
"tests/test_nubank_client.py::test_get_bills_missing_credit_card",
"tests/test_nubank_client.py::test_get_bills",
"tests/test_nubank_client.py::test_get_bill_details",
"tests/test_nubank_client.py::test_get_card_statements",
"tests/test_nubank_client.py::test_get_account_balance",
"tests/test_nubank_client.py::test_get_account_feed",
"tests/test_nubank_client.py::test_get_account_statements",
"tests/test_nubank_client.py::test_get_account_investments_details",
"tests/test_nubank_client.py::test_get_account_investments_yield",
"tests/test_nubank_client.py::test_get_customer",
"tests/test_nubank_client.py::test_get_qr_code",
"tests/test_nubank_client.py::test_should_generate_boleto",
"tests/test_nubank_client.py::test_should_create_money_request",
"tests/test_nubank_client.py::test_should_fetch_pix_keys",
"tests/test_nubank_client.py::test_should_create_pix_money_request",
"tests/test_nubank_client.py::test_should_revoke_certificate",
"tests/test_nubank_client.py::test_should_use_http_client_if_none_is_provided",
"tests/test_parsing.py::test_should_do_nothing_with_transactions_that_arent_pix",
"tests/test_parsing.py::test_should_parse_inflow_pix_transaction",
"tests/test_parsing.py::test_should_parse_outflow_pix_transaction",
"tests/test_parsing.py::test_should_parse_reversal_pix_transaction",
"tests/test_parsing.py::test_should_parse_failed_pix_transaction"
]
| []
| {
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2021-06-04 00:58:44+00:00 | mit | 1,090 |
|
andreroggeri__pynubank-337 | diff --git a/pynubank/utils/parsing.py b/pynubank/utils/parsing.py
index c1bed33..75d4317 100644
--- a/pynubank/utils/parsing.py
+++ b/pynubank/utils/parsing.py
@@ -1,5 +1,6 @@
import re
+BRL = 'R$'
TITLE_INFLOW_PIX = 'Transferência recebida'
TITLE_OUTFLOW_PIX = 'Transferência enviada'
TITLE_REVERSAL_PIX = 'Reembolso enviado'
@@ -23,7 +24,7 @@ def parse_pix_transaction(transaction: dict) -> dict:
if transaction['__typename'] != 'GenericFeedEvent':
return transaction
- if transaction['title'] in PIX_TRANSACTION_MAP.keys():
+ if BRL in transaction['detail'] and transaction['title'] in PIX_TRANSACTION_MAP.keys():
transaction['__typename'] = PIX_TRANSACTION_MAP[transaction['title']]
transaction['amount'] = parse_float(transaction['detail'])
@@ -32,9 +33,9 @@ def parse_pix_transaction(transaction: dict) -> dict:
def parse_generic_transaction(transaction: dict) -> dict:
amount = None
- if transaction['node']['detail'] and 'R$' in transaction['node']['detail']:
+ if transaction['node']['detail'] and BRL in transaction['node']['detail']:
amount = parse_float(transaction['node']['detail'])
- elif transaction['node']['footer'] and 'R$' in transaction['node']['footer']:
+ elif transaction['node']['footer'] and BRL in transaction['node']['footer']:
amount = parse_float(transaction['node']['footer'])
if amount:
| andreroggeri/pynubank | 1817459911645eed3b4c94d6e42a115293e5c328 | diff --git a/tests/test_parsing.py b/tests/test_parsing.py
index 3476a84..bf29261 100644
--- a/tests/test_parsing.py
+++ b/tests/test_parsing.py
@@ -71,6 +71,17 @@ def test_should_parse_failed_pix_transaction():
assert parsed['amount'] == 3668.40
+def test_should_ignore_transactions_without_value():
+ transaction = base_generic_transaction.copy()
+ transaction['title'] = 'Transferência enviada'
+ transaction['detail'] = 'Something without money'
+
+ parsed = parse_pix_transaction(transaction)
+
+ assert parsed['__typename'] == 'GenericFeedEvent'
+ assert parsed.get('amount') is None
+
+
def test_parse_generic_transaction_should_retrieve_amount_from_detail_when_contains_rs():
transaction = create_edge_transaction()
transaction['node']['detail'] = 'R$ 123,56'
| Erro em get_card_statements()
Ao tentar utilizar o método get_card_statements() é retornado o erro
> Traceback (most recent call last):
File "C:\Python\Python310\lib\code.py", line 90, in runcode
exec(code, self.locals)
File "<input>", line 1, in <module>
File "C:\Python\Python310\lib\site-packages\pynubank\auth_mode.py", line 23, in wrapper
return function(*args, **kwargs)
File "C:\Python\Python310\lib\site-packages\pynubank\nubank.py", line 199, in get_account_statements
return list(filter(lambda x: x['__typename'] in PAYMENT_EVENT_TYPES, feed))
File "C:\Python\Python310\lib\site-packages\pynubank\utils\parsing.py", line 28, in parse_pix_transaction
transaction['amount'] = parse_float(transaction['detail'])
File "C:\Python\Python310\lib\site-packages\pynubank\utils\parsing.py", line 19, in parse_float
return float(re.search(r'(?:\d*\.)*\d+,\d{1,2}', value).group().replace('.', '').replace(',', '.'))
AttributeError: 'NoneType' object has no attribute 'group'
Em parsing.py na função parse_float(value: str), value recebe um valor de uma string sem o valor em R$. Isto veio de uma entrada antiga que tive com os dizeres "Estorno - Transferência enviada pelo Pix". Talvez seja uma condição atípica e não esperada pelo código. | 0.0 | 1817459911645eed3b4c94d6e42a115293e5c328 | [
"tests/test_parsing.py::test_should_ignore_transactions_without_value"
]
| [
"tests/test_parsing.py::test_should_do_nothing_with_transactions_that_arent_pix",
"tests/test_parsing.py::test_should_parse_inflow_pix_transaction",
"tests/test_parsing.py::test_should_parse_outflow_pix_transaction",
"tests/test_parsing.py::test_should_parse_reversal_pix_transaction",
"tests/test_parsing.py::test_should_parse_failed_pix_transaction",
"tests/test_parsing.py::test_parse_generic_transaction_should_retrieve_amount_from_detail_when_contains_rs",
"tests/test_parsing.py::test_parse_generic_transaction_should_ignore_amount_from_detail_when_doesnt_contains_rs",
"tests/test_parsing.py::test_parse_generic_transaction_should_retrieve_amount_from_footer_when_contains_rs",
"tests/test_parsing.py::test_parse_generic_transaction_should_ignore_amount_from_footer_when_doesnt_contains_rs",
"tests/test_parsing.py::test_parse_float[R$1,00-1.0]",
"tests/test_parsing.py::test_parse_float[R$0,01-0.01]",
"tests/test_parsing.py::test_parse_float[R$0,1-0.1]",
"tests/test_parsing.py::test_parse_float[R$1.000,20-1000.2]",
"tests/test_parsing.py::test_parse_float[R$83.120,11-83120.11]",
"tests/test_parsing.py::test_parse_float[R$9.183.120,11-9183120.11]",
"tests/test_parsing.py::test_parse_float[Proje\\xe7\\xe3o"
]
| {
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
} | 2022-07-25 12:36:50+00:00 | mit | 1,091 |
|
andrewDoing__hydrate-40 | diff --git a/README.md b/README.md
index df64bf4..084fde5 100644
--- a/README.md
+++ b/README.md
@@ -2,7 +2,7 @@
Hydrate crawls a kubernetes cluster and generates a high level description of your deployments.
## Setup
-Ensure you are using Python 3 or a newer version.
+Ensure you are using Python 3.6 or a newer version.
Include a "kubeconfig" file for your cluster in the same directory as hydrate.py,
or specify one with the -k argument.
Finally, install the dependencies.
@@ -12,8 +12,15 @@ pip install -r requirements.txt
## Basic Usage
```bash
-python -m hydrate [-h] [-n NAME] [-o path] [-v] [-d] run
+python -m hydrate [-h] [-n NAME] [-k FILE] [-o PATH] [-v] [-d] run
```
+The component.yaml file that is created is based on the specification detailed in the [Fabrikate](https://github.com/Microsoft/fabrikate "Fabrikate") repo.
+
+[Fabrikate Component Definition](https://github.com/microsoft/fabrikate/blob/master/docs/component.md "Component Definition")
+
+[Fabrikate Config Definition](https://github.com/microsoft/fabrikate/blob/master/docs/config.md "Config Definition")
+
+
### Positional arguments:
diff --git a/azure-pipelines.yml b/azure-pipelines.yml
index bd33118..99166bc 100644
--- a/azure-pipelines.yml
+++ b/azure-pipelines.yml
@@ -10,8 +10,6 @@ pool:
vmImage: 'ubuntu-latest'
strategy:
matrix:
- Python35:
- python.version: '3.5'
Python36:
python.version: '3.6'
Python37:
diff --git a/hydrate/component.py b/hydrate/component.py
index 96c1736..3b929d4 100644
--- a/hydrate/component.py
+++ b/hydrate/component.py
@@ -6,9 +6,9 @@ from copy import deepcopy
class Component():
"""Hold the information for fabrikate High-Level Deployment(HLD)."""
- def __init__(self, name, generator="static",
- source="<source repository url>", method="git",
- path=None):
+ def __init__(self, name, generator=None, source="<source repository url>",
+ method="git", path=None, version=None, branch=None,
+ hooks=None, repositories=None, subcomponents=None):
"""Instantiate a Component object.
Args:
@@ -22,11 +22,11 @@ class Component():
self.source = source
self.method = method
self.path = path
- self.version = None
- self.branch = None
- self.hooks = None
- self.repositories = None
- self.subcomponents = None
+ self.version = version
+ self.branch = branch
+ self.hooks = hooks
+ self.repositories = repositories
+ self.subcomponents = subcomponents
def __eq__(self, other):
"""Override the default __eq__."""
diff --git a/hydrate/scrape.py b/hydrate/scrape.py
index eec595d..88623d7 100644
--- a/hydrate/scrape.py
+++ b/hydrate/scrape.py
@@ -5,28 +5,55 @@ import re
from .component import Component
# URL to the Fabrikate Component Definitions
-COMP_DEFS_URL = "https://api.github.com/repos/microsoft/fabrikate-definitions/contents/definitions"
+FAB_DEFS_URL = "https://github.com/microsoft/fabrikate-definitions"
+FAB_DEFS_API = "https://api.github.com/repos/microsoft/fabrikate-definitions/contents/definitions"
def get_repo_components():
"""Return the Fabrikate Component List."""
- json_obj = json_get(COMP_DEFS_URL)
+ json_obj = json_get(FAB_DEFS_API)
if json_obj:
- components = parse_json(json_obj)
+ json_data = parse_json(json_obj)
+ components = construct_components(json_data)
components = remove_fabrikate_prefix(components)
return components
- raise Exception('JSON not retrieved. URL:{}'.format(COMP_DEFS_URL))
+ raise Exception('JSON not retrieved. URL:{}'.format(FAB_DEFS_API))
-def parse_json(json_list):
- """Parse json to get each component."""
+def get_path(html_url):
+ """Get the component path from the html_url."""
+ return re.sub(r'.*master/', '', html_url)
+
+
+def construct_components(json_data):
+ """Construct Component objects using a list of data tuples."""
components = []
- for entry in json_list:
- component = Component(entry["name"], source=entry["html_url"])
- components.append(component)
+ for defintion in json_data:
+ components.append(
+ Component(name=defintion["name"],
+ source=defintion["source"],
+ path=defintion["path"]))
return components
+def parse_json(json_list):
+ """Parse json to get information for each definition.
+
+ Returns:
+ dict
+
+ """
+ json_dicts = []
+ for entry in json_list:
+ json_data = {
+ 'name': entry["name"],
+ 'source': FAB_DEFS_URL,
+ 'path': get_path(entry["html_url"])
+ }
+ json_dicts.append(json_data)
+ return json_dicts
+
+
def remove_fabrikate_prefix(components):
"""Remove the fabrikate prefix from the Component names."""
for component in components:
| andrewDoing/hydrate | 012aed555c47e27008afae6bf75e11305c373bbb | diff --git a/tests/test_scrape.py b/tests/test_scrape.py
index c945d5f..2f11ee8 100644
--- a/tests/test_scrape.py
+++ b/tests/test_scrape.py
@@ -2,8 +2,10 @@
import pytest
from hydrate.component import Component
+from hydrate.scrape import FAB_DEFS_URL, FAB_DEFS_API
from hydrate.scrape import get_repo_components
from hydrate.scrape import parse_json
+from hydrate.scrape import construct_components
from hydrate.scrape import remove_fabrikate_prefix
from hydrate.scrape import json_get
@@ -11,31 +13,64 @@ from hydrate.scrape import json_get
[(1), (None)])
def test_get_repo_components(mocker, json_get_ret):
"""Test the get_repo_components function."""
+ mock_json_get = mocker.patch("hydrate.scrape.json_get",
+ return_value=json_get_ret)
mock_parse_json = mocker.patch("hydrate.scrape.parse_json",
return_value=json_get_ret)
+ mock_construct_components = mocker.patch(
+ "hydrate.scrape.construct_components",
+ return_value=json_get_ret)
mock_rm_fab_prefix = mocker.patch("hydrate.scrape.remove_fabrikate_prefix")
- mock_json_get = mocker.patch("hydrate.scrape.json_get")
- get_repo_components()
+
+ if json_get_ret:
+ get_repo_components()
+ else:
+ with pytest.raises(Exception, match=r".* URL:%s" % (FAB_DEFS_API)):
+ get_repo_components()
+
mock_json_get.assert_called_once()
if mock_json_get.return_value:
mock_parse_json.assert_called_once()
+ mock_construct_components.assert_called_once()
mock_rm_fab_prefix.assert_called_once()
else:
mock_parse_json.assert_not_called()
+ mock_construct_components.assert_not_called()
mock_rm_fab_prefix.assert_not_called()
-tst_json_list = [{"name": "Test1", "html_url": "www.test1.com"},
- {"name": "Test2", "html_url": "www.test2.com"}]
-exp_components = [Component("Test1", source="www.test1.com"),
- Component("Test2", source="www.test2.com")]
-
[email protected]('json_list, exp_components',
- [(tst_json_list, exp_components)])
-def test_parse_json(json_list, exp_components):
+tst_json_list = [{"name": "Test1",
+ "source": FAB_DEFS_URL,
+ "html_url": "www.test1.com"},
+ {"name": "Test2",
+ "source": FAB_DEFS_URL,
+ "html_url": "www.test2.com"}]
+exp_json_data = [{"name": "Test1",
+ "source": FAB_DEFS_URL,
+ "path": "www.test1.com"},
+ {"name": "Test2",
+ "source": FAB_DEFS_URL,
+ "path": "www.test2.com"}]
+
[email protected]('json_list, exp_json_data',
+ [(tst_json_list, exp_json_data)])
+def test_parse_json(json_list, exp_json_data):
"""Test parse_json function."""
- assert parse_json(json_list) == exp_components
+ assert parse_json(json_list) == exp_json_data
+
+
+exp_new_components = [Component("Test1",
+ source=FAB_DEFS_URL,
+ path="www.test1.com"),
+ Component("Test2",
+ source=FAB_DEFS_URL,
+ path="www.test2.com")]
[email protected]('tst_json_data, exp_components',
+ [(exp_json_data, exp_new_components)])
+def test_construct_components(tst_json_data, exp_components):
+ """Test construct_components function."""
+ assert construct_components(tst_json_data) == exp_components
tst_fab_comps = [Component("fabrikate-test-component"),
| Add path to fabrikate subcomponent
For components that have a source in a github repo, add a path property with the corresponding path instead of including the full link to the source. | 0.0 | 012aed555c47e27008afae6bf75e11305c373bbb | [
"tests/test_scrape.py::test_get_repo_components[1]",
"tests/test_scrape.py::test_get_repo_components[None]",
"tests/test_scrape.py::test_parse_json[json_list0-exp_json_data0]",
"tests/test_scrape.py::test_construct_components[tst_json_data0-exp_components0]",
"tests/test_scrape.py::test_remove_fabriakte_prefix[components0-exp_components0]",
"tests/test_scrape.py::test_json_get[www.get-test-json.com-exp_json0-mock_resp0]",
"tests/test_scrape.py::test_json_get[www.get-test-json.com-None-mock_resp1]"
]
| []
| {
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
} | 2019-06-28 00:21:55+00:00 | mit | 1,092 |
|
andrewDoing__hydrate-44 | diff --git a/hydrate/__main__.py b/hydrate/__main__.py
index c61bb5d..bb04e88 100644
--- a/hydrate/__main__.py
+++ b/hydrate/__main__.py
@@ -9,7 +9,7 @@ import sys
from pathlib import Path
from .cluster import Cluster
-from .component import Component, get_full_matches
+from .component import Component, match_components
from .hld import generate_HLD
from .scrape import get_repo_components
@@ -26,20 +26,20 @@ def main(args):
print("Connected!")
print("Collecting information from the cluster...")
- cluster_components = my_cluster.get_components()
+ cc = my_cluster.get_components()
print("Collecting Fabrikate Components from GitHub...")
- repo_components = get_repo_components()
+ rc = get_repo_components()
print("Comparing Fabrikate Components to Cluster Deployments...")
- full_matches = get_full_matches(repo_components, cluster_components)
+ subcomponents, category_indeces = match_components(rc, cc)
verbose_print("Creating Component object...")
my_component = Component(args.name, path="./manifests")
verbose_print("Creating the list of subcomponents...")
sub_list = []
- for component in full_matches:
+ for component in subcomponents:
component.delete_none_attrs()
sub_list.append(component)
@@ -47,19 +47,22 @@ def main(args):
print("Writing HLD...")
+ output_file = None
if args.dry_run:
verbose_print("Writing component.yaml to terminal...")
- generate_HLD(my_component, sys.stdout)
+ generate_HLD(my_component, sys.stdout, category_indeces)
+
else:
if args.output:
verbose_print("Writing component.yaml to {}.".format(args.output))
- output = os.path.join(args.output, "component.yaml")
- with open(output, "w") as of:
- generate_HLD(my_component, of)
+ output_file = os.path.join(args.output, "component.yaml")
+
else:
verbose_print("Writing to component.yaml...")
- with open("component.yaml", "w") as of:
- generate_HLD(my_component, of)
+ output_file = "component.yaml"
+
+ with open(output_file, "w") as of:
+ generate_HLD(my_component, of, category_indeces)
def parse_args():
diff --git a/hydrate/cluster.py b/hydrate/cluster.py
index e9d274b..cfedb27 100644
--- a/hydrate/cluster.py
+++ b/hydrate/cluster.py
@@ -1,6 +1,7 @@
"""Kubernetes Cluster API Class."""
from kubernetes import client, config
from .component import Component
+import re
class Cluster():
@@ -16,7 +17,8 @@ class Cluster():
self.kubeconfig = kubeconfig
self.apps_v1_api = None
self.core_v1_api = None
- self.namespaced_pods = {}
+ self.namespaced_pods = dict()
+ self.namespaced_deployments = dict()
def connect_to_cluster(self):
"""Connect to the cluster. Set API attributes."""
@@ -32,24 +34,49 @@ class Cluster():
"""
components = []
+ default_deps = self.get_namespaced_deployments("default")
namespaces = self.get_namespaces()
namespaces = self.remove_defaults(namespaces)
- # Scenario where cluster contains namespaces other than default ones
+ # Scenario where cluster applications live in namespaces
if namespaces:
- components = [namespace for namespace in namespaces]
- components = [get_first_word(comp) for comp in components]
- components = [Component(name) for name in components]
- # Scenario where cluster applications all live in the default namespace
- else:
- pods = self.get_namespaced_pods("default")
- components = self.process_cluster_objects(pods)
+ first_words = [get_first_word(name) for name in namespaces]
+ components.extend([Component(word) for word in first_words])
+ # Scenario where cluster applications live in default
+ if default_deps:
+ dep_names = [
+ re.sub(r'-deployment', '', dep) for dep in default_deps]
+ components.extend([Component(n) for n in dep_names])
+
return components
+ def get_statefulsets(self):
+ """Query the cluster for statefulsets."""
+ ret = self.apps_v1_api.list_stateful_set_for_all_namespaces()
+ with open("statefulsets.json", "w") as of:
+ of.write(dict(ret))
+
def get_namespaces(self):
"""Query the cluster for namespaces."""
ret = self.core_v1_api.list_namespace()
return [i.metadata.name for i in ret.items]
+ def get_namespaced_deployments(self, namespace):
+ """Store the list of deployments in the namespace.
+
+ Args:
+ namespace: The namespace to look in.
+
+ Return:
+ deployment_list: list of pods found in the namespace.
+ """
+ if namespace in self.namespaced_deployments:
+ return self.namespaced_deployments[namespace]
+ else:
+ ret = self.apps_v1_api.list_namespaced_deployment(namespace)
+ deployment_list = [i.metadata.name for i in ret.items]
+ self.namespaced_pods[namespace] = deployment_list
+ return deployment_list
+
def get_namespaced_pods(self, namespace):
"""Store the list of pods in the namespace.
diff --git a/hydrate/component.py b/hydrate/component.py
index 3b929d4..139404f 100644
--- a/hydrate/component.py
+++ b/hydrate/component.py
@@ -66,10 +66,37 @@ class Component():
delattr(self, key)
+def match_components(repo_components, cluster_components):
+ """Match cluster and repo components."""
+ subcomponents = []
+ category_indeces = []
+ rc = repo_components
+ cc = cluster_components
+ full_matches, fm_leftovers = get_full_matches(rc, cc)
+
+ # Indeces are determined by the length of the previous category
+ if full_matches:
+ subcomponents.extend(full_matches)
+ category_indeces.append((0, "Full Match Components"))
+
+ if fm_leftovers:
+ subcomponents.extend(fm_leftovers)
+ category_indeces.append((len(full_matches), "No Match Deployments"))
+
+ return subcomponents, category_indeces
+
+
def get_full_matches(repo_components, cluster_components):
- """Return the Fabrikate Components that fully match the cluster."""
+ """Determine which components fully match the cluster.
+
+ Returns:
+ full_matches: list of components
+ leftovers: list of components
+
+ """
full_matches = []
cluster_set = set()
+ leftovers = None
for cc in cluster_components:
cluster_set.add(cc.name)
for rc in repo_components:
@@ -81,5 +108,6 @@ def get_full_matches(repo_components, cluster_components):
if cluster_set:
print("Leftover deployments in cluster: {}".format(cluster_set))
+ leftovers = [cc for cc in cluster_components if cc.name in cluster_set]
- return full_matches
+ return full_matches, leftovers
diff --git a/hydrate/hld.py b/hydrate/hld.py
index 7f869f5..8ee8f79 100644
--- a/hydrate/hld.py
+++ b/hydrate/hld.py
@@ -1,18 +1,28 @@
"""Use to construct the High-Level Deployment."""
+from ruamel.yaml.comments import CommentedMap, CommentedSeq
from ruamel.yaml import YAML
yaml = YAML()
+OFFSET = 2
-def generate_HLD(component, output):
+
+def generate_HLD(component, output, comment_indeces=None):
"""Create HLD yaml file.
Args:
component: Component object
output: filestream
+ comment_indeces: List of tuples (index, comment text)
"""
component.delete_none_attrs()
- yaml.indent(mapping=2, sequence=4, offset=2)
+ yaml.indent(mapping=2, sequence=4, offset=OFFSET)
d = component.asdict()
+ if comment_indeces:
+ d = CommentedMap(d)
+ lst = CommentedSeq(d["subcomponents"])
+ for idx, comment in comment_indeces:
+ lst.yaml_set_comment_before_after_key(idx, comment, OFFSET)
+ d["subcomponents"] = lst
yaml.dump(d, output)
diff --git a/hydrate/scrape.py b/hydrate/scrape.py
index 88623d7..23f010f 100644
--- a/hydrate/scrape.py
+++ b/hydrate/scrape.py
@@ -6,7 +6,7 @@ from .component import Component
# URL to the Fabrikate Component Definitions
FAB_DEFS_URL = "https://github.com/microsoft/fabrikate-definitions"
-FAB_DEFS_API = "https://api.github.com/repos/microsoft/fabrikate-definitions/contents/definitions"
+FAB_DEFS_API = "https://api.github.com/repos/microsoft/fabrikate-definitions/contents/definitions" # noqa
def get_repo_components():
| andrewDoing/hydrate | 0f093f25dca2589b290bd3d3151b5b4335de314a | diff --git a/tests/test_cluster.py b/tests/test_cluster.py
index 4e9ad86..db77bcb 100644
--- a/tests/test_cluster.py
+++ b/tests/test_cluster.py
@@ -43,13 +43,14 @@ class TestCluster():
mock_client.CoreV1Api.assert_called_once()
tst_namespaces = ["elasticsearch", "istio", "jaeger"]
- tst_pods = ["elasticsearch-pod", "istio-pod", "jaeger-pod"]
+ tst_deps = ["elasticsearch-dep", "istio-dep", "jaeger-dep"]
- @pytest.mark.parametrize("tst_namespaces, tst_pods",
- [(tst_namespaces, None),
- (None, tst_pods)])
+ @pytest.mark.parametrize("tst_namespaces, tst_deps",
+ [(tst_namespaces, tst_deps),
+ (tst_namespaces, None),
+ (None, tst_deps)])
def test_get_components(self, mocker, cluster_connection,
- tst_namespaces, tst_pods):
+ tst_namespaces, tst_deps):
"""Test the method get_components."""
mock_get_namespaces = mocker.patch(
"hydrate.cluster.Cluster.get_namespaces",
@@ -58,23 +59,22 @@ class TestCluster():
"hydrate.cluster.Cluster.remove_defaults",
return_value=tst_namespaces)
mock_get_first_word = mocker.patch("hydrate.cluster.get_first_word")
- mock_get_namespaced_pods = mocker.patch(
- "hydrate.cluster.Cluster.get_namespaced_pods",
- return_value=tst_pods)
- mock_process_cluster_objects = mocker.patch(
- "hydrate.cluster.Cluster.process_cluster_objects",
- return_value=tst_pods)
+ mock_get_namespaced_deployments = mocker.patch(
+ "hydrate.cluster.Cluster.get_namespaced_deployments",
+ return_value=tst_deps
+ )
+ mock_re_sub = mocker.patch("hydrate.cluster.re.sub")
components = cluster_connection.get_components()
assert components
+ mock_get_namespaced_deployments.assert_called_once()
mock_get_namespaces.assert_called_once()
mock_remove_defaults.assert_called_once()
if tst_namespaces:
mock_get_first_word.assert_called()
- else:
- mock_get_namespaced_pods.assert_called_once()
- mock_process_cluster_objects.assert_called_once()
+ if tst_deps:
+ mock_re_sub.assert_called()
tst_get_namespaces = ["elasticsearch", "istio", "jaeger"]
@pytest.mark.parametrize("tst_get_namespaces",
diff --git a/tests/test_component.py b/tests/test_component.py
index a99c307..b7b6f04 100644
--- a/tests/test_component.py
+++ b/tests/test_component.py
@@ -50,14 +50,23 @@ tst_repo_components = [Component("dep1-dep2"),
Component("dep4-dep5")]
exp_full_matches = [Component("dep1-dep2"),
Component("dep3")]
+exp_leftovers = [Component("dep4"),
+ Component("dep6")]
[email protected]('repo_components, cluster_components, expected',
[email protected]('''repo_components, cluster_components,
+ expected_fm, expected_leftos''',
[(tst_repo_components,
tst_cluster_components,
- exp_full_matches)])
-def test_get_full_matches(repo_components, cluster_components, expected):
+ exp_full_matches,
+ exp_leftovers)])
+def test_get_full_matches(repo_components, cluster_components,
+ expected_fm, expected_leftos):
"""Test get_full_matches()."""
- full_matches = get_full_matches(repo_components, cluster_components)
- for fmc, exp in zip_longest(full_matches, exp_full_matches):
+ fms, leftos = get_full_matches(repo_components, cluster_components)
+
+ for fmc, exp in zip_longest(fms, exp_full_matches):
assert fmc.name == exp.name
+
+ for lefto, exp_lefto in zip_longest(leftos, exp_leftovers):
+ assert lefto.name == exp_lefto.name
| Create Subcomponent Entries for No Match Deployments
As a user, I want the deployments living on my cluster to be reflected in the generated component.yaml, even if there isn't a Fabrikate component that exists for it.
Steps for handling these cases:
1. Use comments in the component.yaml to make a section for these "no match" deployments, since they will likely require user input before they are ready to be used by Fabrikate.
2. Guide the user through adding a source and generator to the component.yaml
- Deployment lives on GitHub
- Deployment uses a helm chart
Part of #29 | 0.0 | 0f093f25dca2589b290bd3d3151b5b4335de314a | [
"tests/test_cluster.py::TestCluster::test_get_components[tst_namespaces0-tst_deps0]",
"tests/test_cluster.py::TestCluster::test_get_components[tst_namespaces1-None]",
"tests/test_cluster.py::TestCluster::test_get_components[None-tst_deps2]",
"tests/test_component.py::test_get_full_matches[repo_components0-cluster_components0-expected_fm0-expected_leftos0]"
]
| [
"tests/test_cluster.py::TestCluster::test_connect_to_cluster",
"tests/test_cluster.py::TestCluster::test_get_namespaces[tst_get_namespaces0]",
"tests/test_cluster.py::TestCluster::test_get_namespaced_pods[tst_pods0]",
"tests/test_cluster.py::TestCluster::test_process_cluster_objects[tst_objects0-exp_components0]",
"tests/test_cluster.py::TestCluster::test_remove_defaults[tst_namespaces0-exp_namespaces0]",
"tests/test_cluster.py::TestCluster::test_remove_defaults[tst_namespaces1-exp_namespaces1]",
"tests/test_cluster.py::test_get_first_word[fabrikate-elasticsearch---fabrikate]",
"tests/test_cluster.py::test_count_first_word[str_list0-expected0]",
"tests/test_cluster.py::test_sort_dict_by_value[d0-expected0]",
"tests/test_component.py::TestComponent::test_asdict",
"tests/test_component.py::TestComponent::test_delete_none_attrs"
]
| {
"failed_lite_validators": [
"has_issue_reference",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2019-07-02 00:53:41+00:00 | mit | 1,093 |
|
andrewgodwin__urlman-12 | diff --git a/urlman.py b/urlman.py
index 738018e..7700384 100644
--- a/urlman.py
+++ b/urlman.py
@@ -51,7 +51,7 @@ class UrlsMetaclass(type):
return type.__new__(self, name, bases, attrs)
def __get__(self, instance, klass):
- return self(klass, instance)
+ return self(klass, instance, self.__name__)
class Urls(with_metaclass(UrlsMetaclass)):
@@ -62,11 +62,12 @@ class Urls(with_metaclass(UrlsMetaclass)):
format. If you need to you can also specify a handler function for a url.
"""
- def __init__(self, klass, instance):
+ def __init__(self, klass, instance, name):
self.klass = klass
self.instance = instance
self.context = {"self": self.instance}
self.context.update(self.urls)
+ self.__qualname__ = ".".join((klass.__qualname__, name))
def __getattr__(self, attr):
return self.get_url(attr)
| andrewgodwin/urlman | a750ba9a9922b32d46a90f8a2e69cea7f1103296 | diff --git a/test_urlman.py b/test_urlman.py
index 6fb6713..201eed9 100644
--- a/test_urlman.py
+++ b/test_urlman.py
@@ -135,3 +135,7 @@ def test_rest_framework_serializer(post):
"admin": post.urls.admin.full(),
}
assert relative_field.to_representation(post) == {"view": post.urls.view}
+
+
+def test_qualname(post):
+ assert post.urls.__qualname__ == "Post.urls"
| __qualname__ access by Sphinx 3.4.0 fails
I'm using Sphinx to document my project that uses `urlman`. As of the new Sphinx 3.4.0, Sphinx [uses](https://github.com/sphinx-doc/sphinx/issues/5538) `__qualname__` to resolve inheritance when running `autodoc`. This process causes `urlman` to throw an exception, making the docs unbuildable:
```
File "/home/pretalx_docs/.local/lib/python3.8/site-packages/sphinx/ext/autodoc/__init__.py", line 931, in generate
sourcename = self.get_sourcename()
File "/home/pretalx_docs/.local/lib/python3.8/site-packages/sphinx/ext/autodoc/__init__.py", line 575, in get_sourcename
getattr(self.object, '__qualname__', None)):
File "/home/pretalx_docs/.local/lib/python3.8/site-packages/urlman.py", line 69, in __getattr__
return self.get_url(attr)
File "/home/pretalx_docs/.local/lib/python3.8/site-packages/urlman.py", line 76, in get_url
raise ValueError("No URL called %r on %r" %
ValueError: No URL called '__qualname__' on 'NoneType'
```
I'm not sure what the right solution is – I'm tempted to say that dunder attribute access should just not be interpreted as a URL, or at least should be given a second attempt at the actual attributes when URL lookup fails. | 0.0 | a750ba9a9922b32d46a90f8a2e69cea7f1103296 | [
"test_urlman.py::test_qualname"
]
| [
"test_urlman.py::test_urlstring_standalone",
"test_urlman.py::test_basic",
"test_urlman.py::test_get_example_url",
"test_urlman.py::test_non_extistent_url",
"test_urlman.py::test_broken_url",
"test_urlman.py::test_recursion_url",
"test_urlman.py::test_full_default_url",
"test_urlman.py::test_full_scheme_url",
"test_urlman.py::test_full_hostname_url",
"test_urlman.py::test_full_port_url",
"test_urlman.py::test_full_params_url",
"test_urlman.py::test_full_query_url",
"test_urlman.py::test_full_fragement_url",
"test_urlman.py::test_callable",
"test_urlman.py::test_rest_framework_serializer"
]
| {
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
} | 2020-12-25 08:03:05+00:00 | apache-2.0 | 1,094 |
|
angr__claripy-310 | diff --git a/claripy/backends/backend_z3.py b/claripy/backends/backend_z3.py
index 6a133e05..0ace4228 100644
--- a/claripy/backends/backend_z3.py
+++ b/claripy/backends/backend_z3.py
@@ -65,7 +65,7 @@ def _add_memory_pressure(p):
This is not a problem for CPython since its GC is based on reference counting.
"""
- global _is_pypy
+ global _is_pypy # pylint:disable=global-variable-not-assigned
if _is_pypy:
__pypy__.add_memory_pressure(p)
@@ -136,7 +136,6 @@ class BackendZ3(Backend):
def __init__(self, reuse_z3_solver=None, ast_cache_size=10000):
Backend.__init__(self, solver_required=True)
- self._enable_simplification_cache = False
# Per-thread Z3 solver
# This setting is treated as a global setting and is not supposed to be changed during runtime, unless you know
@@ -257,30 +256,12 @@ class BackendZ3(Backend):
self._tls.sym_cache = weakref.WeakValueDictionary()
return self._tls.sym_cache
- @property
- def _simplification_cache_key(self):
- try:
- return self._tls.simplification_cache_key
- except AttributeError:
- self._tls.simplification_cache_key = weakref.WeakValueDictionary()
- return self._tls.simplification_cache_key
-
- @property
- def _simplification_cache_val(self):
- try:
- return self._tls.simplification_cache_val
- except AttributeError:
- self._tls.simplification_cache_val = weakref.WeakValueDictionary()
- return self._tls.simplification_cache_val
-
def downsize(self):
Backend.downsize(self)
self._ast_cache.clear()
self._var_cache.clear()
self._sym_cache.clear()
- self._simplification_cache_key.clear()
- self._simplification_cache_val.clear()
def _name(self, o): #pylint:disable=unused-argument
l.warning("BackendZ3.name() called. This is weird.")
@@ -378,7 +359,7 @@ class BackendZ3(Backend):
#
@condom
- def _convert(self, obj): # pylint:disable=arguments-differ
+ def _convert(self, obj): # pylint:disable=arguments-renamed
if isinstance(obj, FSort):
return z3.FPSortRef(z3.Z3_mk_fpa_sort(self._context.ref(), obj.exp, obj.mantissa), self._context)
elif isinstance(obj, RM):
@@ -406,7 +387,7 @@ class BackendZ3(Backend):
l.debug("BackendZ3 encountered unexpected type %s", type(obj))
raise BackendError("unexpected type %s encountered in BackendZ3" % type(obj))
- def call(self, *args, **kwargs): # pylint;disable=arguments-differ
+ def call(self, *args, **kwargs): # pylint;disable=arguments-renamed
return Backend.call(self, *args, **kwargs)
@condom
@@ -975,22 +956,6 @@ class BackendZ3(Backend):
if expr._simplified:
return expr
- if self._enable_simplification_cache:
- try:
- k = self._simplification_cache_key[expr._cache_key]
- #print "HIT WEAK KEY CACHE"
- return k
- except KeyError:
- pass
- try:
- k = self._simplification_cache_val[expr._cache_key]
- #print "HIT WEAK VALUE CACHE"
- return k
- except KeyError:
- pass
-
- #print "MISS CACHE"
-
#l.debug("SIMPLIFYING EXPRESSION")
expr_raw = self.convert(expr)
@@ -1016,9 +981,6 @@ class BackendZ3(Backend):
o = self._abstract(s)
o._simplified = Base.FULL_SIMPLIFY
- if self._enable_simplification_cache:
- self._simplification_cache_val[expr._cache_key] = o
- self._simplification_cache_key[expr._cache_key] = o
return o
def _is_false(self, e, extra_constraints=(), solver=None, model_callback=None):
diff --git a/claripy/frontend_mixins/model_cache_mixin.py b/claripy/frontend_mixins/model_cache_mixin.py
index 57965461..b773b2ee 100644
--- a/claripy/frontend_mixins/model_cache_mixin.py
+++ b/claripy/frontend_mixins/model_cache_mixin.py
@@ -1,3 +1,4 @@
+from typing import Tuple
import weakref
import itertools
@@ -10,6 +11,7 @@ class ModelCache:
def __init__(self, model):
self.model = model
self.replacements = weakref.WeakKeyDictionary()
+ self.constraint_only_replacements = weakref.WeakKeyDictionary()
def __hash__(self):
if not hasattr(self, '_hash'):
@@ -25,6 +27,7 @@ class ModelCache:
def __setstate__(self, s):
self.model = s[0]
self.replacements = weakref.WeakKeyDictionary()
+ self.constraint_only_replacements = weakref.WeakKeyDictionary()
#
# Splitting support
@@ -50,12 +53,29 @@ class ModelCache:
a
)
- def eval_ast(self, ast):
- """Eval the ast, replacing symbols by their last value in the model.
+ def _leaf_op_existonly(self, a):
+ return (
+ all_operations.BVV(self.model[a.args[0]], a.length) if a.op == 'BVS' else
+ all_operations.BoolV(self.model[a.args[0]]) if a.op == 'BoolS' else
+ all_operations.FPV(self.model[a.args[0]], a.args[1]) if a.op == 'FPS' else
+ all_operations.StringV(self.model[a.args[0]]) if a.op == 'StringS' else
+ a
+ )
+
+ def eval_ast(self, ast, allow_unconstrained: bool=True):
"""
- # If there was no last value, it was not constrained, so we can use
- # anything.
- new_ast = ast.replace_dict(self.replacements, leaf_operation=self._leaf_op)
+ Eval the ast, replacing symbols by their last value in the model.
+
+ :param ast: The AST to evaluate.
+ :param allow_unconstrained: When set to True, we will treat non-existent variables as unconstrained variables
+ and will use arbitrary concrete values for them during evaluation. Otherwise, raise
+ KeyErrors for non-existent variables.
+ """
+
+ if allow_unconstrained:
+ new_ast = ast.replace_dict(self.replacements, leaf_operation=self._leaf_op)
+ else:
+ new_ast = ast.replace_dict(self.constraint_only_replacements, leaf_operation=self._leaf_op_existonly)
return backends.concrete.eval(new_ast, 1)[0]
def eval_constraints(self, constraints):
@@ -68,8 +88,19 @@ class ModelCache:
except errors.ClaripyZeroDivisionError:
return False
- def eval_list(self, asts):
- return tuple(self.eval_ast(c) for c in asts)
+ def eval_list(self, asts, allow_unconstrained: bool=True) -> Tuple:
+ """
+ Evaluate a list of ASTs.
+
+ :param asts: A list of ASTs to evaluate.
+ :param allow_unconstrained: When set to True, we will treat non-existent variables as unconstrained variables
+ and will use arbitrary concrete values for them during evaluation. Otherwise, raise
+ KeyErrors for non-existent variables.
+ :return: A tuple of evaluated results, one element per AST.
+ """
+
+ return tuple(self.eval_ast(c, allow_unconstrained=allow_unconstrained) for c in asts)
+
class ModelCacheMixin:
def __init__(self, *args, **kwargs):
@@ -221,30 +252,31 @@ class ModelCacheMixin:
# Z3 might give us solutions for variables that we did not ask for. so we create a new dict with solutions for
# only the variables that are under the solver's control
m_ = dict((k, v) for k, v in m.items() if k in self.variables)
- model = ModelCache(m_)
- self._models.add(model)
+ if m_:
+ model = ModelCache(m_)
+ self._models.add(model)
def _get_models(self, extra_constraints=()):
for m in self._models:
if m.eval_constraints(extra_constraints):
yield m
- def _get_batch_solutions(self, asts, n=None, extra_constraints=()):
+ def _get_batch_solutions(self, asts, n=None, extra_constraints=(), allow_unconstrained=True):
results = set()
for m in self._get_models(extra_constraints):
try:
- results.add(m.eval_list(asts))
- except ZeroDivisionError:
+ results.add(m.eval_list(asts, allow_unconstrained=allow_unconstrained))
+ except (ZeroDivisionError, KeyError):
continue
if len(results) == n:
break
return results
- def _get_solutions(self, e, n=None, extra_constraints=()):
+ def _get_solutions(self, e, n=None, extra_constraints=(), allow_unconstrained=True):
return tuple(v[0] for v in self._get_batch_solutions(
- [e], n=n, extra_constraints=extra_constraints
+ [e], n=n, extra_constraints=extra_constraints, allow_unconstrained=allow_unconstrained,
))
@@ -283,7 +315,11 @@ class ModelCacheMixin:
raise
if len(extra_constraints) == 0 and len(results) < n:
- self._eval_exhausted.update(e.cache_key for e in asts)
+ for e in asts:
+ # only mark an AST as eval-exhausted if e.variables is a subset of variables that the current solver
+ # knows about (from its constraints)
+ if self.variables.issuperset(e.variables):
+ self._eval_exhausted.add(e.cache_key)
return results
@@ -293,7 +329,9 @@ class ModelCacheMixin:
def min(self, e, extra_constraints=(), signed=False, **kwargs):
cached = [ ]
if e.cache_key in self._eval_exhausted or e.cache_key in self._min_exhausted:
- cached = self._get_solutions(e, extra_constraints=extra_constraints)
+ # we set allow_unconstrained to False because we expect all returned values for e are returned by Z3,
+ # instead of some arbitrarily assigned concrete values.
+ cached = self._get_solutions(e, extra_constraints=extra_constraints, allow_unconstrained=False)
if len(cached) > 0:
signed_key = lambda v: v if v < 2**(len(e)-1) else v - 2**len(e)
@@ -307,7 +345,7 @@ class ModelCacheMixin:
def max(self, e, extra_constraints=(), signed=False, **kwargs):
cached = [ ]
if e.cache_key in self._eval_exhausted or e.cache_key in self._max_exhausted:
- cached = self._get_solutions(e, extra_constraints=extra_constraints)
+ cached = self._get_solutions(e, extra_constraints=extra_constraints, allow_unconstrained=False)
if len(cached) > 0:
signed_key = lambda v: v if v < 2**(len(e)-1) else v - 2**len(e)
| angr/claripy | 59f1a0b87be4434871242c1fb04b0bbcf70ebacf | diff --git a/tests/test_expression.py b/tests/test_expression.py
index 509c7d36..6627f900 100644
--- a/tests/test_expression.py
+++ b/tests/test_expression.py
@@ -246,6 +246,9 @@ class TestExpression(unittest.TestCase):
ite = claripy.ite_dict(x, {1: 11, 2: 22, 3: 33, 4: 44, 5: 55, 6: 66, 7: 77, 8: 88, 9: 99}, claripy.BVV(0, 32))
self.assertEqual(sorted(s.eval(ite, 100)), [0, 11, 22, 33, 44, 55, 66, 77, 88, 99])
+ # calling it a second time should not fail. it was failing on master due to we incorrectly putting the AST
+ # into the eval-exhausted set.
+ self.assertEqual(sorted(s.eval(ite, 100)), [0, 11, 22, 33, 44, 55, 66, 77, 88, 99])
ss = s.branch()
ss.add(ite == 88)
diff --git a/tests/test_solver.py b/tests/test_solver.py
index bdc269f4..54aee73f 100644
--- a/tests/test_solver.py
+++ b/tests/test_solver.py
@@ -608,6 +608,22 @@ class StandardTests(TestCase):
print(s.max(x, extra_constraints=[x <= 18]))
assert s.max(x) == 19
+ def test_cached_max(self):
+ s = claripy.Solver()
+ x = claripy.BVS("x", 32)
+ assert not s.constraints
+ assert s.max(x) == 0xffffffff
+ assert len(s.constraints) == 1 # ConstraintExpansionMixin will add a new constraint
+ assert s.max(x) == 0xffffffff # calling it the second time, the cache should not give a different result
+
+ s = claripy.Solver()
+ y = claripy.BVS("y", 32)
+ s.add(y == 8)
+ assert s.eval(y, n=1)[0] == 8
+ assert len(s.constraints) == 1
+ assert s.max(x) == 0xffffffff
+ assert s.max(x) == 0xffffffff
+
#
# Multi-Solver test base classes
#
| Calling solver max twice changes the result.
### Description
Calling `s.max(x)` twice in a row yields different results. I imagine this might be due to caching, as I can see the first invocation of `s.max(x)` calls out to `z3`'s backend, where as the second one does not. This invocation also adds extra constraints to the solver.
Note: calling `min` twice also has odd effects, though since the initial return value of `s.min(x)` is 0, the return value still returns correctly the second time.
### Steps to reproduce the bug
```python
>>> import claripy
>>> s = claripy.Solver()
>>> x = claripy.BVS("x", 32)
>>> s.max(x)
4294967295
>>> s.max(x)
0
```
### Environment
New venv with fresh clone of master: https://github.com/angr/claripy/commit/33483ea55d6074270f65e2e2b106016be814430f
### Additional context
Notice that `s.constraints` is changed by the max invocation:
```python
>>> import claripy
>>> s = claripy.Solver()
>>> x = claripy.BVS("x", 32)
>>> s.constraints
[]
>>> s.max(x)
4294967295
>>> s.constraints
[<Bool x_0_32 <= 0xffffffff>]
>>> s.max(x)
0
``` | 0.0 | 59f1a0b87be4434871242c1fb04b0bbcf70ebacf | [
"tests/test_expression.py::TestExpression::test_ite_Solver",
"tests/test_expression.py::TestExpression::test_ite_SolverComposite",
"tests/test_expression.py::TestExpression::test_ite_SolverHybrid",
"tests/test_solver.py::StandardTests::test_cached_max"
]
| [
"tests/test_expression.py::TestExpression::test_arith_shift",
"tests/test_expression.py::TestExpression::test_bool",
"tests/test_expression.py::TestExpression::test_bool_conversion",
"tests/test_expression.py::TestExpression::test_canonical",
"tests/test_expression.py::TestExpression::test_cardinality",
"tests/test_expression.py::TestExpression::test_depth",
"tests/test_expression.py::TestExpression::test_depth_repr",
"tests/test_expression.py::TestExpression::test_expression",
"tests/test_expression.py::TestExpression::test_extract",
"tests/test_expression.py::TestExpression::test_extract_concat_simplify",
"tests/test_expression.py::TestExpression::test_get_byte",
"tests/test_expression.py::TestExpression::test_if_stuff",
"tests/test_expression.py::TestExpression::test_ite_reverse",
"tests/test_expression.py::TestExpression::test_logic_shift_right",
"tests/test_expression.py::TestExpression::test_multiarg",
"tests/test_expression.py::TestExpression::test_rename",
"tests/test_expression.py::TestExpression::test_signed_concrete",
"tests/test_expression.py::TestExpression::test_signed_symbolic",
"tests/test_expression.py::TestExpression::test_smudging",
"tests/test_expression.py::TestExpression::test_true_false_cache",
"tests/test_solver.py::StandardTests::test_composite_discrepancy_with_reuse",
"tests/test_solver.py::StandardTests::test_composite_discrepancy_without_reuse",
"tests/test_solver.py::StandardTests::test_composite_solver_branching_optimizations",
"tests/test_solver.py::StandardTests::test_exhaustion",
"tests/test_solver.py::StandardTests::test_minmax_with_reuse",
"tests/test_solver.py::StandardTests::test_minmax_without_reuse",
"tests/test_solver.py::StandardTests::test_model",
"tests/test_solver.py::StandardTests::test_simplification_annotations",
"tests/test_solver.py::StandardTests::test_unsatness",
"tests/test_solver.py::StandardTests::test_zero_division_in_cache_mixin",
"tests/test_solver.py::TestSolver::test_ancestor_merge_with_reuse",
"tests/test_solver.py::TestSolver::test_ancestor_merge_without_reuse",
"tests/test_solver.py::TestSolver::test_combine_with_reuse",
"tests/test_solver.py::TestSolver::test_combine_without_reuse",
"tests/test_solver.py::TestSolver::test_solver_branching_with_reuse",
"tests/test_solver.py::TestSolver::test_solver_branching_without_reuse",
"tests/test_solver.py::TestSolver::test_solver_with_reuse",
"tests/test_solver.py::TestSolver::test_solver_without_reuse",
"tests/test_solver.py::TestSolver::test_unsat_core_with_reuse",
"tests/test_solver.py::TestSolver::test_unsat_core_without_reuse",
"tests/test_solver.py::TestSolverReplacement::test_ancestor_merge_with_reuse",
"tests/test_solver.py::TestSolverReplacement::test_ancestor_merge_without_reuse",
"tests/test_solver.py::TestSolverReplacement::test_combine_with_reuse",
"tests/test_solver.py::TestSolverReplacement::test_combine_without_reuse",
"tests/test_solver.py::TestSolverReplacement::test_replacement_solver_with_reuse",
"tests/test_solver.py::TestSolverReplacement::test_replacement_solver_without_reuse",
"tests/test_solver.py::TestSolverReplacement::test_solver_branching_with_reuse",
"tests/test_solver.py::TestSolverReplacement::test_solver_branching_without_reuse",
"tests/test_solver.py::TestSolverReplacement::test_solver_with_reuse",
"tests/test_solver.py::TestSolverReplacement::test_solver_without_reuse",
"tests/test_solver.py::TestHybrid::test_ancestor_merge_with_reuse",
"tests/test_solver.py::TestHybrid::test_ancestor_merge_without_reuse",
"tests/test_solver.py::TestHybrid::test_combine_with_reuse",
"tests/test_solver.py::TestHybrid::test_combine_without_reuse",
"tests/test_solver.py::TestHybrid::test_hybrid_solver_with_reuse",
"tests/test_solver.py::TestHybrid::test_hybrid_solver_without_reuse",
"tests/test_solver.py::TestHybrid::test_solver_branching_with_reuse",
"tests/test_solver.py::TestHybrid::test_solver_branching_without_reuse",
"tests/test_solver.py::TestHybrid::test_solver_with_reuse",
"tests/test_solver.py::TestHybrid::test_solver_without_reuse",
"tests/test_solver.py::TestHybrid::test_unsat_core_with_reuse",
"tests/test_solver.py::TestHybrid::test_unsat_core_without_reuse",
"tests/test_solver.py::TestComposite::test_ancestor_merge_with_reuse",
"tests/test_solver.py::TestComposite::test_ancestor_merge_without_reuse",
"tests/test_solver.py::TestComposite::test_combine_with_reuse",
"tests/test_solver.py::TestComposite::test_combine_without_reuse",
"tests/test_solver.py::TestComposite::test_composite_solver_with_reuse",
"tests/test_solver.py::TestComposite::test_composite_solver_without_reuse",
"tests/test_solver.py::TestComposite::test_solver_branching_with_reuse",
"tests/test_solver.py::TestComposite::test_solver_branching_without_reuse",
"tests/test_solver.py::TestComposite::test_solver_with_reuse",
"tests/test_solver.py::TestComposite::test_solver_without_reuse",
"tests/test_solver.py::TestComposite::test_unsat_core_with_reuse",
"tests/test_solver.py::TestComposite::test_unsat_core_without_reuse",
"tests/test_solver.py::TestSolverCacheless::test_ancestor_merge_with_reuse",
"tests/test_solver.py::TestSolverCacheless::test_ancestor_merge_without_reuse",
"tests/test_solver.py::TestSolverCacheless::test_combine_with_reuse",
"tests/test_solver.py::TestSolverCacheless::test_combine_without_reuse",
"tests/test_solver.py::TestSolverCacheless::test_solver_branching_with_reuse",
"tests/test_solver.py::TestSolverCacheless::test_solver_branching_without_reuse",
"tests/test_solver.py::TestSolverCacheless::test_solver_with_reuse",
"tests/test_solver.py::TestSolverCacheless::test_solver_without_reuse",
"tests/test_solver.py::TestSolverCacheless::test_unsat_core_with_reuse",
"tests/test_solver.py::TestSolverCacheless::test_unsat_core_without_reuse"
]
| {
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2022-10-31 23:17:37+00:00 | bsd-2-clause | 1,095 |
|
antirotor__speedcopy-6 | diff --git a/speedcopy/__init__.py b/speedcopy/__init__.py
index c7b2c99..e681fd7 100644
--- a/speedcopy/__init__.py
+++ b/speedcopy/__init__.py
@@ -144,8 +144,8 @@ if not sys.platform.startswith("win32"):
os.symlink(os.readlink(src), dst)
else:
fs_src_type = FilesystemInfo().filesystem(src.encode('utf-8'))
- fs_dst_type = FilesystemInfo().filesystem(
- os.path.dirname(dst.encode('utf-8')))
+ dst_dir_path = os.path.normpath(os.path.dirname(dst.encode('utf-8'))) # noqa: E501
+ fs_dst_type = FilesystemInfo().filesystem(dst_dir_path)
supported_fs = ['CIFS', 'SMB2']
debug(">>> Source FS: {}".format(fs_src_type))
debug(">>> Destination FS: {}".format(fs_dst_type))
@@ -218,8 +218,8 @@ else:
ctypes.c_void_p)
copyfile.restype = ctypes.HRESULT
- source_file = os.path.normpath(src)
- dest_file = os.path.normpath(dst)
+ source_file = os.path.abspath(os.path.normpath(src))
+ dest_file = os.path.abspath(os.path.normpath(dst))
if source_file.startswith('\\\\'):
source_file = 'UNC\\' + source_file[2:]
if dest_file.startswith('\\\\'):
| antirotor/speedcopy | 78dee74fa0f0aef2c61f9f48f6389969f63272f7 | diff --git a/tests/test_speedcopy.py b/tests/test_speedcopy.py
index bea8ba4..ec69756 100644
--- a/tests/test_speedcopy.py
+++ b/tests/test_speedcopy.py
@@ -3,6 +3,7 @@ import speedcopy
import os
speedcopy.SPEEDCOPY_DEBUG = True
+_FILE_SIZE = 5 * 1024 * 1024
def setup_function(function):
@@ -13,11 +14,11 @@ def teadown_function(function):
speedcopy.unpatch_copyfile()
-def test_copy(tmpdir):
+def test_copy_abs(tmpdir):
src = tmpdir.join("source")
dst = tmpdir.join("destination")
with open(str(src), "wb") as f:
- f.write(os.urandom(5 * 1024 * 1024))
+ f.write(os.urandom(_FILE_SIZE))
f.close()
shutil.copyfile(str(src), str(dst))
@@ -25,6 +26,24 @@ def test_copy(tmpdir):
assert os.path.isfile(str(dst))
+def test_copy_rel(tmpdir):
+ cwd = os.getcwd()
+ os.chdir(str(tmpdir))
+
+ try:
+ src = "source"
+ dst = "destination"
+ with open(str(src), "wb") as f:
+ f.write(os.urandom(_FILE_SIZE))
+ f.close()
+
+ shutil.copyfile(str(src), str(dst))
+
+ assert os.path.isfile(str(dst))
+ finally:
+ os.chdir(cwd)
+
+
def test_patch():
assert shutil.copyfile == speedcopy.copyfile
| Copying doesn't work if non-absolute filenames supplied
```
speedcopy.copyfile("C:/Temp/asdf", "C:/Temp/fdsa")
```
should work exactly the same as
```
os.chdir("C:/Temp")
speedcopy.copyfile("asdf", "fdsa")
```
| 0.0 | 78dee74fa0f0aef2c61f9f48f6389969f63272f7 | [
"tests/test_speedcopy.py::test_copy_rel"
]
| [
"tests/test_speedcopy.py::test_copy_abs",
"tests/test_speedcopy.py::test_patch",
"tests/test_speedcopy.py::test_unpatch"
]
| {
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
} | 2020-01-09 11:44:18+00:00 | apache-2.0 | 1,096 |
|
app-sre__sretoolbox-53 | diff --git a/sretoolbox/container/image.py b/sretoolbox/container/image.py
index 92bedf5..0721ab5 100644
--- a/sretoolbox/container/image.py
+++ b/sretoolbox/container/image.py
@@ -41,6 +41,7 @@ class NoTagForImageByDigest(Exception):
Raised when the Image was constructed with a by-digest URL and an
operation is attempted that requires a tag.
"""
+
def __init__(self, image):
super().__init__(
f"Can't determine a unique tag for Image: {str(image)}")
@@ -65,7 +66,7 @@ class Image:
MAX_CACHE_ITEM_SIZE = 50*1024
def __init__(self, url, tag_override=None, username=None, password=None,
- auth_server=None, response_cache=None):
+ auth_server=None, response_cache=None, auth_token=None):
image_data = self._parse_image_url(url)
self.scheme = image_data['scheme']
self.registry = image_data['registry']
@@ -73,6 +74,7 @@ class Image:
self.image = image_data['image']
self.response_cache = response_cache
+ self.auth_token = auth_token
if tag_override is None:
self.tag = image_data['tag']
else:
@@ -361,17 +363,23 @@ class Image:
@retry(exceptions=(HTTPError, requests.ConnectionError), max_attempts=5)
def _request_get(self, url, method=requests.get):
- # Try first without 'Authorization' header
+ # Use any cached tokens, they may still be valid
headers = {
'Accept':
- 'application/vnd.docker.distribution.manifest.v1+json,'
- 'application/vnd.docker.distribution.manifest.v2+json,'
- 'application/vnd.docker.distribution.manifest.v1+prettyjws,'
+ 'application/vnd.docker.distribution.manifest.v1+json,'
+ 'application/vnd.docker.distribution.manifest.v2+json,'
+ 'application/vnd.docker.distribution.manifest.v1+prettyjws,'
}
- response = method(url, headers=headers, auth=self.auth)
+ if self.auth_token:
+ headers['Authorization'] = self.auth_token
+ auth = None
+ else:
+ auth = self.auth
+
+ response = method(url, headers=headers, auth=auth)
- # Unauthorized, meaning we have to acquire a token
+ # Unauthorized, meaning we have to acquire a new token
if response.status_code == 401:
auth_specs = response.headers.get('Www-Authenticate')
if auth_specs is None:
@@ -379,8 +387,9 @@ class Image:
www_auth = self._parse_www_auth(auth_specs)
- # Try again, this time with the Authorization header
- headers['Authorization'] = self._get_auth(www_auth)
+ # Try again, with the new Authorization header
+ self.auth_token = self._get_auth(www_auth)
+ headers['Authorization'] = self.auth_token
response = method(url, headers=headers)
self._raise_for_status(response)
@@ -467,7 +476,8 @@ class Image:
return Image(url=str(self), tag_override=str(item),
username=self.username, password=self.password,
auth_server=self.auth_server,
- response_cache=self.response_cache)
+ response_cache=self.response_cache,
+ auth_token=self.auth_token)
def __iter__(self):
for tag in self.tags:
| app-sre/sretoolbox | 072f8c60b2121b89be2c0028c55b925bd54d4472 | diff --git a/tests/test_container.py b/tests/test_container.py
index 542d716..cc92830 100644
--- a/tests/test_container.py
+++ b/tests/test_container.py
@@ -15,7 +15,7 @@ import requests
import pytest
-from unittest.mock import patch
+from unittest.mock import patch, MagicMock
from sretoolbox.container import Image
@@ -158,11 +158,12 @@ class TestContainer:
assert e.typename == 'NoTagForImageByDigest'
def test_getitem(self):
- image = Image("quay.io/foo/bar:latest", response_cache={})
+ image = Image("quay.io/foo/bar:latest", response_cache={},
+ auth_token="atoken")
other = image['current']
assert image.response_cache is other.response_cache
-
-
+ assert other.auth_token is image.auth_token
+ assert other.tag == 'current'
@patch.object(Image, '_request_get', spec=Image)
@@ -229,3 +230,56 @@ class TestGetManifest:
"https://quay.io/v2/foo/bar/manifests/latest"
)
should_cache.assert_not_called()
+
+
[email protected](Image, '_parse_www_auth')
[email protected](Image, '_get_auth')
+class TestRequestGet:
+ def test_username_and_password_ok(self, getauth, parseauth):
+ r = requests.Response()
+ r.status_code = 200
+ method = MagicMock(return_value=r)
+ i = Image("quay.io/foo/bar:latest", username="user", password="pass")
+ i._request_get.__wrapped__(i, "http://www.google.com", method=method)
+ method.assert_called_once()
+ c = method.call_args_list[0]
+
+ assert c[0] == ('http://www.google.com', )
+ assert 'Authorization' not in c[1]['headers']
+ assert c[1]['auth'] == i.auth
+ getauth.assert_not_called()
+ parseauth.assert_not_called()
+
+ def test_username_and_password_reauthenticate(self, getauth, parseauth):
+ r = requests.Response()
+ r.status_code = 401
+ r.headers['Www-Authenticate'] = 'something something'
+ gets = [r]
+ r = requests.Response()
+ r.status_code = 200
+ gets.append(r)
+ method = MagicMock(side_effect=gets)
+ r = requests.Response()
+ r.status_code = 200
+ i = Image("quay.io/foo/bar:latest", username="user", password="pass")
+ getauth.return_value = "anauthtoken"
+ parseauth.return_value = "aparsedauth"
+ i._request_get.__wrapped__(i, "http://www.google.com", method=method)
+ parseauth.assert_called_once_with('something something')
+ assert method.call_count == 2
+ assert i.auth_token == 'anauthtoken'
+
+ def test_persistent_failure(self, getauth, parseauth):
+ r = requests.Response()
+ r.status_code = 401
+ r.headers['Www-Authenticate'] = 'something something'
+ method = MagicMock(return_value=r)
+ r = requests.Response()
+ r.status_code = 200
+ i = Image("quay.io/foo/bar:latest", username="user", password="pass")
+ getauth.return_value = "anauthtoken"
+ parseauth.return_value = "aparsedauth"
+ with pytest.raises(requests.exceptions.HTTPError):
+ i._request_get.__wrapped__(i, "http://www.google.com", method=method)
+ getauth.assert_called_once()
+ parseauth.assert_called_once()
| Cache registry token
We acquire a new token on every request:
https://github.com/app-sre/sretoolbox/blob/master/sretoolbox/container/image.py#L355
We should probably make a more efficient use of it. | 0.0 | 072f8c60b2121b89be2c0028c55b925bd54d4472 | [
"tests/test_container.py::TestContainer::test_getitem",
"tests/test_container.py::TestRequestGet::test_username_and_password_reauthenticate"
]
| [
"tests/test_container.py::TestContainer::test_parser[memcached-expected_struct0]",
"tests/test_container.py::TestContainer::test_parser[docker.io/memcached-expected_struct1]",
"tests/test_container.py::TestContainer::test_parser[library/memcached-expected_struct2]",
"tests/test_container.py::TestContainer::test_parser[quay.io/app-sre/qontract-reconcile-expected_struct3]",
"tests/test_container.py::TestContainer::test_parser[docker://docker.io/fedora:28-expected_struct4]",
"tests/test_container.py::TestContainer::test_parser[example-local.com:5000/my-repo/my-image:build-expected_struct5]",
"tests/test_container.py::TestContainer::test_parser[docker://docker.io/tnozicka/openshift-acme:v0.8.0-pre-alpha-expected_struct6]",
"tests/test_container.py::TestContainer::test_parser[quay.io/app-sre/pagerduty-operator-registry@sha256:bc1ed82a75f2ca160225b8281c50b7074e7678c2a1f61b1fb298e545b455925e-expected_struct7]",
"tests/test_container.py::TestContainer::test_str[memcached-docker://docker.io/library/memcached:latest]",
"tests/test_container.py::TestContainer::test_str[docker.io/fedora-docker://docker.io/library/fedora:latest]",
"tests/test_container.py::TestContainer::test_str[docker://docker.io/app-sre/fedora-docker://docker.io/app-sre/fedora:latest]",
"tests/test_container.py::TestContainer::test_str[docker.io:8080/app-sre/fedora:30-docker://docker.io:8080/app-sre/fedora:30]",
"tests/test_container.py::TestContainer::test_str[quay.io/app-sre/qontract-reconcile:build-docker://quay.io/app-sre/qontract-reconcile:build]",
"tests/test_container.py::TestContainer::test_str[quay.io/app-sre/pagerduty-operator-registry@sha256:bc1ed82a75f2ca160225b8281c50b7074e7678c2a1f61b1fb298e545b455925e-docker://quay.io/app-sre/pagerduty-operator-registry@sha256:bc1ed82a75f2ca160225b8281c50b7074e7678c2a1f61b1fb298e545b455925e]",
"tests/test_container.py::TestContainer::test_str[pagerduty-operator-registry@sha256:bc1ed82a75f2ca160225b8281c50b7074e7678c2a1f61b1fb298e545b455925e-docker://docker.io/library/pagerduty-operator-registry@sha256:bc1ed82a75f2ca160225b8281c50b7074e7678c2a1f61b1fb298e545b455925e]",
"tests/test_container.py::TestContainer::test_str[registry.access.redhat.com/ubi8/ubi-minimal-docker://registry.access.redhat.com/ubi8/ubi-minimal:latest]",
"tests/test_container.py::TestContainer::test_str[registry.access.redhat.com/ubi8/ubi-minimal:a61f590-docker://registry.access.redhat.com/ubi8/ubi-minimal:a61f590]",
"tests/test_container.py::TestContainer::test_tag_override[memcached:20-latest-docker://docker.io/library/memcached:latest]",
"tests/test_container.py::TestContainer::test_tag_override[docker.io/fedora:31-30-docker://docker.io/library/fedora:30]",
"tests/test_container.py::TestContainer::test_tag_override[docker://docker.io/app-sre/fedora-25-docker://docker.io/app-sre/fedora:25]",
"tests/test_container.py::TestContainer::test_tag_override[docker.io:443/app-sre/fedora:30-31-docker://docker.io:443/app-sre/fedora:31]",
"tests/test_container.py::TestContainer::test_tag_override[quay.io/app-sre/qontract-reconcile:build-latest-docker://quay.io/app-sre/qontract-reconcile:latest]",
"tests/test_container.py::TestContainer::test_tag_override[quay.io/app-sre/pagerduty-operator-registry@sha256:bc1ed82a75f2ca160225b8281c50b7074e7678c2a1f61b1fb298e545b455925e-foo-docker://quay.io/app-sre/pagerduty-operator-registry:foo]",
"tests/test_container.py::TestContainer::test_no_tag",
"tests/test_container.py::TestGetManifest::test_empty_cache_should_cache",
"tests/test_container.py::TestGetManifest::test_empty_cache_should_not_cache",
"tests/test_container.py::TestGetManifest::test_already_cached",
"tests/test_container.py::TestGetManifest::test_no_cache",
"tests/test_container.py::TestRequestGet::test_username_and_password_ok",
"tests/test_container.py::TestRequestGet::test_persistent_failure"
]
| {
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2021-08-25 22:19:09+00:00 | apache-2.0 | 1,097 |
|
argoproj-labs__gordian-42 | diff --git a/CHANGELOG.md b/CHANGELOG.md
index cab46c1..eafba89 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -4,6 +4,11 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com),
and this project adheres to [Semantic Versioning](https://semver.org).
+
+## [3.7.0] - 2023-10-18
+### Added
+- Added support to pass in a path when calling get_files(), resolves issue #34
+
## [3.6.0] - 2023-10-06
### Fix
- Fetch repo content from target branch
diff --git a/gordian/repo.py b/gordian/repo.py
index ae72642..0c53a0b 100644
--- a/gordian/repo.py
+++ b/gordian/repo.py
@@ -77,9 +77,9 @@ class Repo:
return PlainTextFile(file, self)
- def get_files(self):
+ def get_files(self, path=''):
if not self.files:
- contents = self._get_repo_contents('')
+ contents = self._get_repo_contents(path)
while contents:
file = contents.pop(0)
| argoproj-labs/gordian | 5924837aac8e416abd170bebd8c3ccac614b1fd5 | diff --git a/tests/test_repo.py b/tests/test_repo.py
index 4398ec0..a341ea1 100644
--- a/tests/test_repo.py
+++ b/tests/test_repo.py
@@ -191,6 +191,16 @@ class TestRepo(unittest.TestCase):
repo._source_repo.delete_file.assert_called_once()
self.assertTrue(repo.dirty)
+ def test_get_files_with_path(self):
+ self.repo._set_target_branch('target')
+ self.repo.files = []
+ self.repo._source_repo = MagicMock()
+ repository_file = MagicMock(path='test/afile.txt', type='not_dir')
+ self.repo._source_repo.get_contents.side_effect = [[MagicMock(path='directory', type='dir')],[repository_file]]
+ self.repo.get_files('test')
+ self.repo._source_repo.get_contents.assert_has_calls([call('test', 'target'), call('directory', 'target')])
+ self.assertEquals(self.repo.files, [repository_file])
+
def test__get_github_client(self):
repo = Repo('test_repo', branch='', github=self.mock_git)
| Fetching of all files
It appears gordian is fetching all the files of a repo even if you only need 1. | 0.0 | 5924837aac8e416abd170bebd8c3ccac614b1fd5 | [
"tests/test_repo.py::TestRepo::test_get_files_with_path"
]
| [
"tests/test_repo.py::TestRepo::test__get_github_client",
"tests/test_repo.py::TestRepo::test_create_file",
"tests/test_repo.py::TestRepo::test_create_pr",
"tests/test_repo.py::TestRepo::test_create_pr_no_labels",
"tests/test_repo.py::TestRepo::test_default_github_url",
"tests/test_repo.py::TestRepo::test_delete_file",
"tests/test_repo.py::TestRepo::test_fork",
"tests/test_repo.py::TestRepo::test_get_existing_object",
"tests/test_repo.py::TestRepo::test_get_files",
"tests/test_repo.py::TestRepo::test_get_new_version_major",
"tests/test_repo.py::TestRepo::test_get_new_version_minor",
"tests/test_repo.py::TestRepo::test_get_new_version_patch",
"tests/test_repo.py::TestRepo::test_get_object_does_not_exist",
"tests/test_repo.py::TestRepo::test_init_with_passed_token",
"tests/test_repo.py::TestRepo::test_init_with_token_from_env",
"tests/test_repo.py::TestRepo::test_init_with_user_pass_env",
"tests/test_repo.py::TestRepo::test_make_branch_fork",
"tests/test_repo.py::TestRepo::test_make_branch_no_fork",
"tests/test_repo.py::TestRepo::test_new_files_object",
"tests/test_repo.py::TestRepo::test_no_fork",
"tests/test_repo.py::TestRepo::test_override_github_url",
"tests/test_repo.py::TestRepo::test_remove_dot_git_from_repo_name",
"tests/test_repo.py::TestRepo::test_set_target_branch",
"tests/test_repo.py::TestRepo::test_set_target_branch_reset_file_cache",
"tests/test_repo.py::TestRepo::test_set_target_branch_source_branch"
]
| {
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
} | 2023-05-16 21:53:32+00:00 | apache-2.0 | 1,098 |
|
arviz-devs__arviz-1076 | diff --git a/CHANGELOG.md b/CHANGELOG.md
index bee1c84..16824f2 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -36,6 +36,7 @@
* Fix `io_pymc3.py` to handle models with `potentials` (#1043)
* Fix several inconsistencies between schema and `from_pymc3` implementation
in groups `prior`, `prior_predictive` and `observed_data` (#1045)
+* Stabilize covariance matrix for `plot_kde_2d` (#1075)
### Deprecation
diff --git a/arviz/plots/backends/matplotlib/traceplot.py b/arviz/plots/backends/matplotlib/traceplot.py
index 437f925..e181417 100644
--- a/arviz/plots/backends/matplotlib/traceplot.py
+++ b/arviz/plots/backends/matplotlib/traceplot.py
@@ -1,5 +1,6 @@
"""Matplotlib traceplot."""
+import warnings
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import numpy as np
@@ -48,8 +49,8 @@ def plot_trace(
rug : bool
If True adds a rugplot. Defaults to False. Ignored for 2D KDE. Only affects continuous
variables.
- lines : tuple
- Tuple of (var_name, {'coord': selection}, [line, positions]) to be overplotted as
+ lines : tuple or list
+ list of tuple of (var_name, {'coord': selection}, [line_positions]) to be overplotted as
vertical lines on the density and horizontal lines on the trace.
combined : bool
Flag for combining multiple chains into a single line. If False (default), chains will be
@@ -124,6 +125,21 @@ def plot_trace(
_, axes = plt.subplots(len(plotters), 2, squeeze=False, figsize=figsize, **backend_kwargs)
+ # Check the input for lines
+ if lines is not None:
+ all_var_names = set(plotter[0] for plotter in plotters)
+
+ invalid_var_names = set()
+ for line in lines:
+ if line[0] not in all_var_names:
+ invalid_var_names.add(line[0])
+ if invalid_var_names:
+ warnings.warn(
+ "A valid var_name should be provided, found {} expected from {}".format(
+ invalid_var_names, all_var_names
+ )
+ )
+
for idx, (var_name, selection, value) in enumerate(plotters):
value = np.atleast_2d(value)
@@ -219,6 +235,10 @@ def plot_trace(
line_values = [vlines]
else:
line_values = np.atleast_1d(vlines).ravel()
+ if not np.issubdtype(line_values.dtype, np.number):
+ raise ValueError(
+ "line-positions should be numeric, found {}".format(line_values)
+ )
axes[idx, 0].vlines(line_values, *ylims[0], colors="black", linewidth=1.5, alpha=0.75)
axes[idx, 1].hlines(
line_values, *xlims[1], colors="black", linewidth=1.5, alpha=trace_kwargs["alpha"]
diff --git a/arviz/plots/plot_utils.py b/arviz/plots/plot_utils.py
index 3d1c189..a611401 100644
--- a/arviz/plots/plot_utils.py
+++ b/arviz/plots/plot_utils.py
@@ -818,7 +818,9 @@ def _cov(data):
x -= avg[:, None]
prod = _dot(x, x.T.conj())
prod *= np.true_divide(1, ddof)
- return prod.squeeze()
+ prod = prod.squeeze()
+ prod += 1e-6 * np.eye(prod.shape[0])
+ return prod
else:
raise ValueError("{} dimension arrays are not supported".format(data.ndim))
| arviz-devs/arviz | 0eef3b95eff477541ba599f15687612652074b7e | diff --git a/arviz/tests/test_plots_matplotlib.py b/arviz/tests/test_plots_matplotlib.py
index a688a79..c0fa4ae 100644
--- a/arviz/tests/test_plots_matplotlib.py
+++ b/arviz/tests/test_plots_matplotlib.py
@@ -156,6 +156,21 @@ def test_plot_trace_max_subplots_warning(models):
assert axes.shape
[email protected]("kwargs", [{"var_names": ["mu", "tau"], "lines": [("hey", {}, [1])]}])
+def test_plot_trace_invalid_varname_warning(models, kwargs):
+ with pytest.warns(UserWarning, match="valid var.+should be provided"):
+ axes = plot_trace(models.model_1, **kwargs)
+ assert axes.shape
+
+
[email protected](
+ "bad_kwargs", [{"var_names": ["mu", "tau"], "lines": [("mu", {}, ["hey"])]}]
+)
+def test_plot_trace_bad_lines_value(models, bad_kwargs):
+ with pytest.raises(ValueError, match="line-positions should be numeric"):
+ plot_trace(models.model_1, **bad_kwargs)
+
+
@pytest.mark.parametrize("model_fits", [["model_1"], ["model_1", "model_2"]])
@pytest.mark.parametrize(
"args_expected",
@@ -701,7 +716,6 @@ def test_plot_posterior_point_estimates(models, point_estimate):
"kwargs", [{"insample_dev": False}, {"plot_standard_error": False}, {"plot_ic_diff": False}]
)
def test_plot_compare(models, kwargs):
-
model_compare = compare({"Model 1": models.model_1, "Model 2": models.model_2})
axes = plot_compare(model_compare, **kwargs)
| plot_trace lines is unclear and it may yield unexpected results
**Describe the bug**
The argument `lines` for the function `plot_trace` can give unexpected results. Moreover, the documentation is a bit nebulous.
**To Reproduce**
A toy example is defined
```python
import pymc3 as pm
import arviz as az
import numpy as np
# fake data
mu_real = 0
sigma_real = 1
n_samples = 150
Y = np.random.normal(loc=mu_real, scale=sigma_real, size=n_samples)
with pm.Model() as model:
mu = pm.Normal('mu', mu=0, sigma=10)
sigma = pm.HalfNormal('sigma', sigma=10)
likelihood = pm.Normal('likelihood', mu=mu, sigma=sigma, observed=Y)
trace = pm.sample()
```
As per [documentation](https://arviz-devs.github.io/arviz/generated/arviz.plot_trace.html#arviz.plot_trace), the argument `lines` accepts a tuple in the form `(var_name, {‘coord’: selection}, [line, positions])`. So, the command
```python
az.plot_trace(trace, lines=(('mu', {}, mu_real),))
```
yields correctly

I can also pass a list of tuples or a list of tuples and lists and it will work fine:
```
az.plot_trace(trace, lines=[('mu', {}, mu_real)]) # list of tuples
az.plot_trace(trace, lines=[['mu', {}, mu_real]]) # list of lists
az.plot_trace(trace, lines=[['mu', {}, mu_real], ('sigma', {}, sigma_real)]) # list of lists and tuples
```
however, I cannot pass a simple tuple because I will get a `KeyError: 0`
```python
az.plot_trace(trace, lines=(['mu', {}, mu_real]))
az.plot_trace(trace, lines=(('mu', {}, mu_real)))
```
Also, I can pass a variable or coordinate name that do not exist and Arviz will not complain---but not lines will be plotted (here I would expect a warning)
```python
az.plot_trace(trace, lines=[('hey', {}, mu_real)])
az.plot_trace(trace, lines=[('mu', {'hey'}, mu_real)])
```

The weird behavior happens when I pass a string:
```python
az.plot_trace(trace, lines=[('mu', {}, 'hey')])
```

**Expected behavior**
The [documentation](https://arviz-devs.github.io/arviz/generated/arviz.plot_trace.html#arviz.plot_trace) could be improved and the function could check the inputs. In addition to what described above, the placeholder `[line, positions]` in `(var_name, {‘coord’: selection}, [line, positions])` should be something like `[line_positions]` otherwise one may think (like myself :) ) that two values should be inserted (one for `line` and one for `positions`).
**Additional context**
I am using Win10, fresh conda environment with PyMC3 and Arviz from master.
Possibly related https://github.com/pymc-devs/pymc3/issues/3495, https://github.com/pymc-devs/pymc3/issues/3497 | 0.0 | 0eef3b95eff477541ba599f15687612652074b7e | [
"arviz/tests/test_plots_matplotlib.py::test_plot_trace_invalid_varname_warning[kwargs0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_trace_bad_lines_value[bad_kwargs0]"
]
| [
"arviz/tests/test_plots_matplotlib.py::test_plot_density_float[kwargs0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_density_float[kwargs1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_density_float[kwargs2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_density_float[kwargs3]",
"arviz/tests/test_plots_matplotlib.py::test_plot_density_float[kwargs4]",
"arviz/tests/test_plots_matplotlib.py::test_plot_density_float[kwargs5]",
"arviz/tests/test_plots_matplotlib.py::test_plot_density_float[kwargs6]",
"arviz/tests/test_plots_matplotlib.py::test_plot_density_float[kwargs7]",
"arviz/tests/test_plots_matplotlib.py::test_plot_density_float[kwargs8]",
"arviz/tests/test_plots_matplotlib.py::test_plot_density_bad_kwargs",
"arviz/tests/test_plots_matplotlib.py::test_plot_trace[kwargs0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_trace[kwargs1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_trace[kwargs2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_trace[kwargs3]",
"arviz/tests/test_plots_matplotlib.py::test_plot_trace[kwargs4]",
"arviz/tests/test_plots_matplotlib.py::test_plot_trace[kwargs5]",
"arviz/tests/test_plots_matplotlib.py::test_plot_trace[kwargs6]",
"arviz/tests/test_plots_matplotlib.py::test_plot_trace[kwargs7]",
"arviz/tests/test_plots_matplotlib.py::test_plot_trace[kwargs8]",
"arviz/tests/test_plots_matplotlib.py::test_plot_trace[kwargs9]",
"arviz/tests/test_plots_matplotlib.py::test_plot_trace_max_subplots_warning",
"arviz/tests/test_plots_matplotlib.py::test_plot_forest[args_expected0-model_fits0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_forest[args_expected0-model_fits1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_forest[args_expected1-model_fits0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_forest[args_expected1-model_fits1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_forest[args_expected2-model_fits0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_forest[args_expected2-model_fits1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_forest[args_expected3-model_fits0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_forest[args_expected3-model_fits1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_forest[args_expected4-model_fits0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_forest[args_expected4-model_fits1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_forest[args_expected5-model_fits0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_forest[args_expected5-model_fits1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_forest[args_expected6-model_fits0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_forest[args_expected6-model_fits1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_forest_rope_exception",
"arviz/tests/test_plots_matplotlib.py::test_plot_forest_single_value",
"arviz/tests/test_plots_matplotlib.py::test_plot_forest_bad[model_fits0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_forest_bad[model_fits1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_energy[kde]",
"arviz/tests/test_plots_matplotlib.py::test_plot_energy[hist]",
"arviz/tests/test_plots_matplotlib.py::test_plot_energy_bad",
"arviz/tests/test_plots_matplotlib.py::test_plot_parallel_raises_valueerror",
"arviz/tests/test_plots_matplotlib.py::test_plot_parallel[None]",
"arviz/tests/test_plots_matplotlib.py::test_plot_parallel[normal]",
"arviz/tests/test_plots_matplotlib.py::test_plot_parallel[minmax]",
"arviz/tests/test_plots_matplotlib.py::test_plot_parallel[rank]",
"arviz/tests/test_plots_matplotlib.py::test_plot_parallel_exception[None]",
"arviz/tests/test_plots_matplotlib.py::test_plot_parallel_exception[mu]",
"arviz/tests/test_plots_matplotlib.py::test_plot_parallel_exception[var_names2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_joint_bad",
"arviz/tests/test_plots_matplotlib.py::test_plot_kde[kwargs0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_kde[kwargs1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_kde[kwargs2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_kde[kwargs3]",
"arviz/tests/test_plots_matplotlib.py::test_plot_kde[kwargs4]",
"arviz/tests/test_plots_matplotlib.py::test_cov[x0]",
"arviz/tests/test_plots_matplotlib.py::test_cov[x1]",
"arviz/tests/test_plots_matplotlib.py::test_cov[x2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_kde_cumulative[kwargs0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_kde_cumulative[kwargs1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_kde_cumulative[kwargs2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_kde_cumulative[kwargs3]",
"arviz/tests/test_plots_matplotlib.py::test_plot_dist[kwargs1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_dist_2d_kde[kwargs0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_dist_2d_kde[kwargs1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_dist_2d_kde[kwargs2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_kde_quantiles[kwargs0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_kde_quantiles[kwargs1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_kde_quantiles[kwargs2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_kde_inference_data",
"arviz/tests/test_plots_matplotlib.py::test_plot_pair[kwargs2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_pair[kwargs3]",
"arviz/tests/test_plots_matplotlib.py::test_plot_pair[kwargs4]",
"arviz/tests/test_plots_matplotlib.py::test_plot_pair[kwargs5]",
"arviz/tests/test_plots_matplotlib.py::test_plot_pair_2var[kwargs0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_pair_2var[kwargs1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_pair_2var[kwargs2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_pair_bad",
"arviz/tests/test_plots_matplotlib.py::test_plot_pair_divergences_warning[True]",
"arviz/tests/test_plots_matplotlib.py::test_plot_pair_divergences_warning[False]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc[False-None-kde]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc[False-None-cumulative]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc[False-None-scatter]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc[False-0.2-kde]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc[False-0.2-cumulative]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc[False-0.2-scatter]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc[False-1-kde]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc[False-1-cumulative]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc[False-1-scatter]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc[True-None-kde]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc[True-None-cumulative]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc[True-None-scatter]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc[True-0.2-kde]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc[True-0.2-cumulative]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc[True-0.2-scatter]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc[True-1-kde]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc[True-1-cumulative]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc[True-1-scatter]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[False-None-kde]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[False-None-cumulative]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[False-None-scatter]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[False-0-kde]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[False-0-cumulative]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[False-0-scatter]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[False-0.1-kde]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[False-0.1-cumulative]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[False-0.1-scatter]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[False-1-kde]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[False-1-cumulative]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[False-1-scatter]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[False-3-kde]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[False-3-cumulative]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[False-3-scatter]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[True-None-kde]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[True-None-cumulative]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[True-None-scatter]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[True-0-kde]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[True-0-cumulative]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[True-0-scatter]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[True-0.1-kde]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[True-0.1-cumulative]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[True-0.1-scatter]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[True-1-kde]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[True-1-cumulative]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[True-1-scatter]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[True-3-kde]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[True-3-cumulative]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[True-3-scatter]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_discrete[False-cumulative]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_discrete[True-cumulative]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_grid",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_bad[kde]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_bad[cumulative]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_bad[scatter]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_ax[kde]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_ax[cumulative]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_ax[scatter]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_bad_ax",
"arviz/tests/test_plots_matplotlib.py::test_plot_violin[None]",
"arviz/tests/test_plots_matplotlib.py::test_plot_violin[mu]",
"arviz/tests/test_plots_matplotlib.py::test_plot_violin[var_names2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_violin_ax",
"arviz/tests/test_plots_matplotlib.py::test_plot_violin_layout",
"arviz/tests/test_plots_matplotlib.py::test_plot_autocorr_short_chain",
"arviz/tests/test_plots_matplotlib.py::test_plot_autocorr_uncombined",
"arviz/tests/test_plots_matplotlib.py::test_plot_autocorr_combined",
"arviz/tests/test_plots_matplotlib.py::test_plot_autocorr_var_names[None]",
"arviz/tests/test_plots_matplotlib.py::test_plot_autocorr_var_names[mu]",
"arviz/tests/test_plots_matplotlib.py::test_plot_autocorr_var_names[var_names2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_rank[kwargs0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_rank[kwargs1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_rank[kwargs2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_rank[kwargs3]",
"arviz/tests/test_plots_matplotlib.py::test_plot_rank[kwargs4]",
"arviz/tests/test_plots_matplotlib.py::test_plot_posterior[kwargs0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_posterior[kwargs1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_posterior[kwargs2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_posterior[kwargs3]",
"arviz/tests/test_plots_matplotlib.py::test_plot_posterior[kwargs4]",
"arviz/tests/test_plots_matplotlib.py::test_plot_posterior[kwargs5]",
"arviz/tests/test_plots_matplotlib.py::test_plot_posterior[kwargs6]",
"arviz/tests/test_plots_matplotlib.py::test_plot_posterior[kwargs7]",
"arviz/tests/test_plots_matplotlib.py::test_plot_posterior[kwargs8]",
"arviz/tests/test_plots_matplotlib.py::test_plot_posterior[kwargs9]",
"arviz/tests/test_plots_matplotlib.py::test_plot_posterior[kwargs10]",
"arviz/tests/test_plots_matplotlib.py::test_plot_posterior[kwargs11]",
"arviz/tests/test_plots_matplotlib.py::test_plot_posterior[kwargs12]",
"arviz/tests/test_plots_matplotlib.py::test_plot_posterior_bad",
"arviz/tests/test_plots_matplotlib.py::test_plot_posterior_point_estimates[mode]",
"arviz/tests/test_plots_matplotlib.py::test_plot_posterior_point_estimates[mean]",
"arviz/tests/test_plots_matplotlib.py::test_plot_posterior_point_estimates[median]",
"arviz/tests/test_plots_matplotlib.py::test_plot_hpd[kwargs0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_hpd[kwargs1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_hpd[kwargs2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_hpd[kwargs3]",
"arviz/tests/test_plots_matplotlib.py::test_plot_hpd[kwargs4]",
"arviz/tests/test_plots_matplotlib.py::test_fast_kde_scipy[limits0]",
"arviz/tests/test_plots_matplotlib.py::test_fast_kde_scipy[limits1]",
"arviz/tests/test_plots_matplotlib.py::test_fast_kde_scipy[limits2]",
"arviz/tests/test_plots_matplotlib.py::test_fast_kde_cumulative[limits0]",
"arviz/tests/test_plots_matplotlib.py::test_fast_kde_cumulative[limits1]",
"arviz/tests/test_plots_matplotlib.py::test_fast_kde_cumulative[limits2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_elpd_one_model",
"arviz/tests/test_plots_matplotlib.py::test_plot_khat_annotate",
"arviz/tests/test_plots_matplotlib.py::test_plot_khat_bad_input",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess[local-kwargs0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess[local-kwargs1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess[local-kwargs2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess[local-kwargs3]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess[quantile-kwargs0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess[quantile-kwargs1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess[quantile-kwargs2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess[quantile-kwargs3]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess[evolution-kwargs0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess[evolution-kwargs1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess[evolution-kwargs2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess[evolution-kwargs3]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess_local_quantile[local-kwargs0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess_local_quantile[local-kwargs1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess_local_quantile[local-kwargs2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess_local_quantile[local-kwargs3]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess_local_quantile[local-kwargs4]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess_local_quantile[quantile-kwargs0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess_local_quantile[quantile-kwargs1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess_local_quantile[quantile-kwargs2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess_local_quantile[quantile-kwargs3]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess_local_quantile[quantile-kwargs4]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess_evolution",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess_bad_kind",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess_bad_coords[chain]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess_bad_coords[draw]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess_no_sample_stats",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess_no_divergences",
"arviz/tests/test_plots_matplotlib.py::test_plot_loo_pit[kwargs0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_loo_pit[kwargs1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_loo_pit[kwargs2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_loo_pit[kwargs3]",
"arviz/tests/test_plots_matplotlib.py::test_plot_loo_pit[kwargs4]",
"arviz/tests/test_plots_matplotlib.py::test_plot_loo_pit[kwargs5]",
"arviz/tests/test_plots_matplotlib.py::test_plot_loo_pit[kwargs6]",
"arviz/tests/test_plots_matplotlib.py::test_plot_loo_pit[kwargs7]",
"arviz/tests/test_plots_matplotlib.py::test_plot_loo_pit_incompatible_args",
"arviz/tests/test_plots_matplotlib.py::test_plot_loo_pit_label[args0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_loo_pit_label[args1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_loo_pit_label[args2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_loo_pit_label[args3]",
"arviz/tests/test_plots_matplotlib.py::test_plot_loo_pit_label[args4]",
"arviz/tests/test_plots_matplotlib.py::test_plot_mcse[kwargs0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_mcse[kwargs1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_mcse[kwargs2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_mcse[kwargs3]",
"arviz/tests/test_plots_matplotlib.py::test_plot_mcse[kwargs4]",
"arviz/tests/test_plots_matplotlib.py::test_plot_mcse[kwargs5]",
"arviz/tests/test_plots_matplotlib.py::test_plot_mcse[kwargs6]",
"arviz/tests/test_plots_matplotlib.py::test_plot_mcse_bad_coords[chain]",
"arviz/tests/test_plots_matplotlib.py::test_plot_mcse_bad_coords[draw]",
"arviz/tests/test_plots_matplotlib.py::test_plot_mcse_no_sample_stats",
"arviz/tests/test_plots_matplotlib.py::test_plot_mcse_no_divergences"
]
| {
"failed_lite_validators": [
"has_hyperlinks",
"has_media",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
} | 2020-02-17 22:14:31+00:00 | apache-2.0 | 1,099 |
|
arvkevi__kneed-53 | diff --git a/kneed/knee_locator.py b/kneed/knee_locator.py
index 82412bc..c8c1954 100644
--- a/kneed/knee_locator.py
+++ b/kneed/knee_locator.py
@@ -81,12 +81,12 @@ class KneeLocator(object):
# Step 4: Identify local maxima/minima
# local maxima
- self.maxima_indices = argrelextrema(self.y_difference, np.greater)[0]
+ self.maxima_indices = argrelextrema(self.y_difference, np.greater_equal)[0]
self.x_difference_maxima = self.x_difference[self.maxima_indices]
self.y_difference_maxima = self.y_difference[self.maxima_indices]
# local minima
- self.minima_indices = argrelextrema(self.y_difference, np.less)[0]
+ self.minima_indices = argrelextrema(self.y_difference, np.less_equal)[0]
self.x_difference_minima = self.x_difference[self.minima_indices]
self.y_difference_minima = self.y_difference[self.minima_indices]
diff --git a/kneed/version.py b/kneed/version.py
index 3d18726..dd9b22c 100644
--- a/kneed/version.py
+++ b/kneed/version.py
@@ -1,1 +1,1 @@
-__version__ = "0.5.0"
+__version__ = "0.5.1"
| arvkevi/kneed | cb35c9b21ac551eb3af5fa1372745e6bd57f084d | diff --git a/tests/test_sample.py b/tests/test_sample.py
index f82e9e8..8c3f160 100644
--- a/tests/test_sample.py
+++ b/tests/test_sample.py
@@ -151,3 +151,19 @@ def test_list_input():
x, y = dg.figure2()
kl = KneeLocator(x.tolist(), y.tolist(), S=1.0, curve='concave', interp_method='polynomial')
assert math.isclose(kl.knee, 0.22, rel_tol=0.05)
+
+
+def test_flat_maxima():
+ """The global maxima has a sequentially equal value in the difference curve"""
+ x = [0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0]
+ y = [1, 0.787701317715959, 0.7437774524158126, 0.6559297218155198, 0.5065885797950219, 0.36749633967789164,
+ 0.2547584187408492, 0.16251830161054173, 0.10395314787701318, 0.06734992679355783, 0.043923865300146414,
+ 0.027818448023426062, 0.01903367496339678, 0.013177159590043924, 0.010248901903367497, 0.007320644216691069,
+ 0.005856515373352855, 0.004392386530014641]
+ # When S=0.0 the first local maximum is found.
+ kl = KneeLocator(x, y, curve='convex', direction='decreasing', S=0.0)
+ assert math.isclose(kl.knee, 1.0, rel_tol=0.05)
+
+ # When S=1.0 the global maximum is found.
+ kl = KneeLocator(x, y, curve='convex', direction='decreasing', S=1.0)
+ assert math.isclose(kl.knee, 8.0, rel_tol=0.05)
| KneeLocator fails if there are flat extrema
This simple example fails:
```python
from kneed import KneeLocator
x = [0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0]
y = [1, 0.787701317715959, 0.7437774524158126, 0.6559297218155198, 0.5065885797950219, 0.36749633967789164, 0.2547584187408492, 0.16251830161054173, 0.10395314787701318, 0.06734992679355783, 0.043923865300146414, 0.027818448023426062, 0.01903367496339678, 0.013177159590043924, 0.010248901903367497, 0.007320644216691069, 0.005856515373352855, 0.004392386530014641]
k = KneeLocator(x, y, curve='convex', direction='decreasing')
```
Output:
`UserWarning: No knee/elbow found`
However, if we obtain the normalized knee plot, it is clear that there is a "flat optimum".

It seems that the algorithm should be able to find that point between 0.4 and 0.5.
I've been able to workaround this issue by modifying `knee_locator.py` in the calculation of `self.maxima_indices` and `self.minima_indices`, by using `np.greater_equal` and `np.less_equal` rather than `np.great` and `np.less`, but I'm not sure if this is a proper solution.
Thanks! | 0.0 | cb35c9b21ac551eb3af5fa1372745e6bd57f084d | [
"tests/test_sample.py::test_flat_maxima"
]
| [
"tests/test_sample.py::test_figure2[interp1d]",
"tests/test_sample.py::test_figure2[polynomial]",
"tests/test_sample.py::test_NoisyGaussian[interp1d]",
"tests/test_sample.py::test_NoisyGaussian[polynomial]",
"tests/test_sample.py::test_concave_increasing[interp1d]",
"tests/test_sample.py::test_concave_increasing[polynomial]",
"tests/test_sample.py::test_concave_decreasing[interp1d]",
"tests/test_sample.py::test_concave_decreasing[polynomial]",
"tests/test_sample.py::test_convex_increasing[interp1d]",
"tests/test_sample.py::test_convex_increasing[polynomial]",
"tests/test_sample.py::test_convex_decreasing[interp1d]",
"tests/test_sample.py::test_convex_decreasing[polynomial]",
"tests/test_sample.py::test_concave_increasing_truncated[interp1d]",
"tests/test_sample.py::test_concave_increasing_truncated[polynomial]",
"tests/test_sample.py::test_concave_decreasing_truncated[interp1d]",
"tests/test_sample.py::test_concave_decreasing_truncated[polynomial]",
"tests/test_sample.py::test_convex_increasing_truncated[interp1d]",
"tests/test_sample.py::test_convex_increasing_truncated[polynomial]",
"tests/test_sample.py::test_convex_decreasing_truncated[interp1d]",
"tests/test_sample.py::test_convex_decreasing_truncated[polynomial]",
"tests/test_sample.py::test_convex_decreasing_bumpy[interp1d-26]",
"tests/test_sample.py::test_convex_decreasing_bumpy[polynomial-28]",
"tests/test_sample.py::test_gamma_online_offline[True-482]",
"tests/test_sample.py::test_gamma_online_offline[False-22]",
"tests/test_sample.py::test_sensitivity",
"tests/test_sample.py::test_sine",
"tests/test_sample.py::test_list_input"
]
| {
"failed_lite_validators": [
"has_hyperlinks",
"has_media",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
} | 2019-11-27 16:42:04+00:00 | bsd-3-clause | 1,100 |
|
arxanchain__py-common-16 | diff --git a/README.md b/README.md
index 8221932..40254e2 100644
--- a/README.md
+++ b/README.md
@@ -21,20 +21,50 @@ $ python setup.py install # install py-common
## Usage
-**Note:** Before using the py-common in your operating system, you need to make a two-step preparation:
+**Note:** Before using the py-common in your application, you need to make the following preparations:
-1. Build executables with sdk-go-common cryption tools. To build these tools, you may need to install **golang** package **sdk-go-common**. For more details please refer to [sdk-go-common](https://github.com/arxanchain/sdk-go-common/tree/master/crypto/tools/README.md)
+### 1.Configure your encryption and signing libraries
-2. Copy executables **crypto-util** and **sign-util** into your py-common installation path `cryption/utils`.
+1. Build executables with sdk-go-common encryption tools. To build these executables, you need to install **golang** and download **sdk-go-common**. For more details please refer to [sdk-go-common](https://github.com/arxanchain/sdk-go-common/tree/master/crypto/tools/README.md).
-If you have no idea where your py-common is installed, use the following command to check out.
+2. Copy the executables **crypto-util** and **sign-util** into your py-common installation path `cryption/utils`.
+
+If you have no idea where your py-common is installed, use the following command to check it out (you need to leave the py-common code repo before running this command).
```sh
$ python -c 'import imp;print imp.find_module("cryption")[1]'
/usr/local/lib/python2.7/site-packages/py_common-1.5.0-py2.7.egg/cryption
```
-In this case, you should copy executables into path `/usr/local/lib/python2.7/site-packages/py_common-1.5.0-py2.7.egg/cryption/utils/`.
+In this case, you should create directory `/usr/local/lib/python2.7/site-packages/py_common-1.5.0-py2.7.egg/cryption/utils/`, and copy the executables into this path.
+
+### 2. Configure you certificates
+
+To communicate with the server, you need to download a TLS certificate, register api-key and download the corresponding private key file from your ArxanChain BaaS Chainconsole. Refer to [API cert management](http://www.arxanfintech.com/infocenter/html/chainconsole/manual.html#api) for more details.
+
+After downloading the two files, use the following command to convert your private key file into PEM format.
+
+```sh
+$ openssl ec -in apikey.key -outform PEM -out apikey.key
+```
+
+Then copy (rename as follows) your TLS certificate and PEM private key file into your py-common installation path as follows. Please pay special attention to the absolute path of your certificate `./py_common-1.5.0-py2.7.egg/cryption/ecc/certs`, which will be used to create a wallet client.
+
+```
+.
+├── py_common-1.5.0-py2.7.egg
+| └── cryption
+| ├── ecc
+| | └── certs
+| | ├── tls
+| | | └── tls.cert
+| | └── users
+| | └── pWEzB4yMM1518346407
+| | └── pWEzB4yMM1518346407.key
+| └── utils
+| ├── sign-util
+| └── crypto-util
+```
### Run unit test
diff --git a/rest/api/api.py b/rest/api/api.py
index 775245b..6bdad9a 100644
--- a/rest/api/api.py
+++ b/rest/api/api.py
@@ -34,12 +34,15 @@ APIKEY = "pWEzB4yMM1518346407"
def set_body(body, apikey, cert_path):
"""Set body encdypted.
- :param body: body dictionary to be encrypted
+ :param body: body dictionary or string to be encrypted
:param apikey: api key generated from server
:param cert_path: path of private key file and cert file
:Returns: crypted cipher text
"""
- return sign_and_encrypt(json.dumps(body), apikey, cert_path)
+ if isinstance(body, dict):
+ body = json.dumps(body)
+
+ return sign_and_encrypt(body, apikey, cert_path)
def set_sign_body(body, secret_key, did, nonce, apikey, cert_path):
"""Set body signed.
@@ -69,7 +72,12 @@ def do_post(url, headers, body, files=None):
:param body: body dictionary
:Returns: response
"""
- return requests.post(url, headers=headers, data=body, files=files)
+ return requests.post(
+ url,
+ headers=headers,
+ data=body,
+ files=files
+ )
def do_put(url, headers, body):
"""Start POST request.
@@ -91,7 +99,9 @@ def require_ok(resp, apikey, cert_path):
"""
client_err_msg = ""
if resp.status_code != STATUS_CODE_OK:
- logging.error("Status code: {}, Client Error, body: {}".format(resp.status_code, resp.text))
+ logging.error("Status code: {}, Client Error, body: {}".format(
+ resp.status_code,
+ resp.text))
if len(resp.text) <= 0:
client_err_msg = "Respond error: Body empty"
@@ -101,11 +111,16 @@ def require_ok(resp, apikey, cert_path):
result = {}
plain_body = ""
try:
- plain_body = decrypt_and_verify(resp.text, apikey, cert_path)
+ plain_body = decrypt_and_verify(
+ resp.text,
+ apikey,
+ cert_path
+ )
result = json.loads(plain_body)
except Exception:
logging.error("cannot decrypt_and_verify response body: {}".format(resp.text))
client_err_msg = resp.text
+
result["ClientErrMsg"] = client_err_msg
return result
@@ -120,18 +135,49 @@ def do_request(req_params, apikey, cert_path, request_func):
:param request_func: request function to be used
:Returns: time duration, response
"""
+
if len(cert_path) <= 0:
cert_path = CERT_PATH
if len(apikey) <= 0:
apikey = APIKEY
beg_time = time.time()
+
if request_func == do_get and "body" in req_params:
del req_params["body"]
else:
- req_body = set_body(req_params["body"], apikey, cert_path)
+ req_body = set_body(
+ req_params["body"],
+ apikey,
+ cert_path
+ )
req_params["body"] = req_body
- resp = require_ok(request_func(**req_params),
+
+ resp = require_ok(
+ request_func(**req_params),
apikey, cert_path)
+
time_dur = time.time() - beg_time
return time_dur, resp
+
+def do_prepare(prepared, apikey, cert_path):
+ """ Do requst with the given request object.
+ And calculate total time used.
+
+ :param requests.PreparedRequest object used to do the request
+ :param apikey: the api key authorized by the server
+ :param cert_path: path of private key file and cert file
+ :Returns: time duration, response
+ """
+ prepared.body = set_body(prepared.body, apikey, cert_path)
+ prepared.headers['Content-Length'] = str(len(prepared.body))
+ beg_time = time.time()
+ resp = require_ok(
+ requests.session().send(prepared),
+ apikey,
+ cert_path
+ )
+ time_dur = time.time() - beg_time
+
+ return time_dur, resp
+
| arxanchain/py-common | 963a267c3aa42571f778c7fb5efa29c4f6aa09a3 | diff --git a/test/test_api.py b/test/test_api.py
index 60eace4..57328fe 100644
--- a/test/test_api.py
+++ b/test/test_api.py
@@ -20,12 +20,13 @@ import json
import sys
import httpretty
import mock
+import requests
ROOTPATH = os.path.join(
os.path.dirname(__file__),
"../"
)
sys.path.append(ROOTPATH)
-from rest.api.api import set_body, set_sign_body, do_get, do_post, do_put, require_ok, do_request
+from rest.api.api import set_body, set_sign_body, do_get, do_post, do_put, require_ok, do_request, do_prepare
class Response(object):
def __init__(self, status_code, text):
@@ -38,6 +39,7 @@ class ApiTest(unittest.TestCase):
def setUp(self):
# Every test needs access to the request factory.
self.header = {}
+ self.url = "http://127.0.0.1"
self.status_not_found = 404
self.resp_not_found = "404 Not Found"
self.nonce = "nonce"
@@ -152,4 +154,53 @@ class ApiTest(unittest.TestCase):
)
self.assertEqual(self.resp_not_found, result["ClientErrMsg"])
-
+
+ def test_do_prepare_succ(self):
+ mock_send = mock.Mock(return_value=Response(self.status_ok, json.dumps(self.resp)))
+ mock_run_cmd = mock.Mock(side_effect=[self.cipher, json.dumps(self.resp)])
+ request_func = do_post
+ with mock.patch('cryption.crypto.run_cmd', mock_run_cmd):
+ with mock.patch('requests.Session.send', mock_send):
+ poeid_filepart = (
+ "",
+ "poe id",
+ )
+ files = {"poe_id": poeid_filepart}
+
+ _, result = do_prepare(
+ requests.Request(
+ "POST",
+ url=self.url,
+ files=files
+ ).prepare(),
+ self.apikey,
+ self.cert_path
+ )
+
+ self.assertEqual(0, result["ErrCode"])
+
+ def test_do_prepare_fail(self):
+ mock_send = mock.Mock(return_value=Response(self.status_not_found, self.resp_not_found))
+ mock_run_cmd = mock.Mock(side_effect=[self.cipher, {}])
+ with mock.patch('cryption.crypto.run_cmd', mock_run_cmd):
+ with mock.patch('requests.Session.send', mock_send):
+ poeid_filepart = (
+ "",
+ "poe id",
+ )
+ files = {
+ "poe_id": poeid_filepart,
+ }
+
+ _, result = do_prepare(
+ requests.Request(
+ "POST",
+ url=self.url,
+ files=files
+ ).prepare(),
+ self.apikey,
+ self.cert_path
+ )
+
+ self.assertEqual(self.resp_not_found, result["ClientErrMsg"])
+
| README add certs tree description
```
| └── certs
| ├── tls
| | └── tls.cert
| └── users
| └── pWEzB4yMM1518346407
| └── pWEzB4yMM1518346407.key
``` | 0.0 | 963a267c3aa42571f778c7fb5efa29c4f6aa09a3 | [
"test/test_api.py::ApiTest::test_do_prepare_fail",
"test/test_api.py::ApiTest::test_do_prepare_succ"
]
| []
| {
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2018-03-13 09:02:29+00:00 | apache-2.0 | 1,101 |
|
ashmastaflash__kal-wrapper-21 | diff --git a/kalibrate/fn.py b/kalibrate/fn.py
index df44306..ac51a43 100644
--- a/kalibrate/fn.py
+++ b/kalibrate/fn.py
@@ -1,5 +1,6 @@
-from . import sanity
+"""Utility functions for Kalibrate output paring."""
import decimal
+from . import sanity
@@ -18,11 +19,11 @@ def build_kal_scan_band_string(kal_bin, band, args):
"device": "-d",
"error": "-e"}
if not sanity.scan_band_is_valid(band):
- err_txt = "Unsupported band designation: %" % band
+ err_txt = "Unsupported band designation: %s" % band
raise ValueError(err_txt)
base_string = "%s -v -s %s" % (kal_bin, band)
base_string += options_string_builder(option_mapping, args)
- return(base_string)
+ return base_string
def build_kal_scan_channel_string(kal_bin, channel, args):
@@ -32,12 +33,14 @@ def build_kal_scan_channel_string(kal_bin, channel, args):
"error": "-e"}
base_string = "%s -v -c %s" % (kal_bin, channel)
base_string += options_string_builder(option_mapping, args)
- return(base_string)
+ return base_string
def herz_me(val):
"""Return integer value for Hz, translated from (MHz|kHz|Hz)."""
result = 0
+ if isinstance(val, bytes):
+ val = str(val)
if val.endswith("MHz"):
stripped = val.replace("MHz", "")
strip_fl = float(stripped)
@@ -49,24 +52,26 @@ def herz_me(val):
elif val.endswith("Hz"):
stripped = val.replace("Hz", "")
result = float(stripped)
- return(result)
+ return result
def determine_final_freq(base, direction, modifier):
"""Return integer for frequency."""
result = 0
+ if isinstance(direction, bytes):
+ direction = direction.decode("utf-8")
if direction == "+":
result = base + modifier
elif direction == "-":
result = base - modifier
- return(result)
+ return result
def to_eng(num_in):
"""Return number in engineering notation."""
x = decimal.Decimal(str(num_in))
eng_not = x.normalize().to_eng_string()
- return(eng_not)
+ return eng_not
def determine_scan_band(kal_out):
@@ -83,6 +88,7 @@ def determine_device(kal_out):
device = ""
while device == "":
for line in kal_out.splitlines():
+ line = line.decode("utf-8")
if "Using device " in line:
device = str(line.split(' ', 2)[-1])
if device == "":
@@ -97,7 +103,7 @@ def determine_scan_gain(kal_out):
def determine_sample_rate(kal_out):
"""Return sample rate from scan results."""
- return(extract_value_from_output("Exact sample rate", -2, kal_out))
+ return extract_value_from_output("Exact sample rate", -2, kal_out)
def extract_value_from_output(canary, split_offset, kal_out):
@@ -111,8 +117,9 @@ def extract_value_from_output(canary, split_offset, kal_out):
retval = ""
while retval == "":
for line in kal_out.splitlines():
+ line = line.decode("utf-8")
if canary in line:
- retval = str(line.split()[split_offset])
+ retval = line.split()[split_offset]
if retval == "":
retval = None
return retval
@@ -126,12 +133,13 @@ def determine_avg_absolute_error(kal_out):
def determine_chan_detect_threshold(kal_out):
"""Return channel detect threshold from kal output."""
- channel_detect_threshold = ""
- while channel_detect_threshold == "":
+ channel_detect_threshold = None
+ while not channel_detect_threshold:
for line in kal_out.splitlines():
+ line = line.decode("utf-8")
if "channel detect threshold: " in line:
channel_detect_threshold = str(line.split()[-1])
- if channel_detect_threshold == "":
+ if not channel_detect_threshold:
print("Unable to parse sample rate")
channel_detect_threshold = None
return channel_detect_threshold
@@ -139,18 +147,17 @@ def determine_chan_detect_threshold(kal_out):
def determine_band_channel(kal_out):
"""Return band, channel, target frequency from kal output."""
- band = ""
- channel = ""
- tgt_freq = ""
- while band == "":
+ band = None
+ channel = None
+ tgt_freq = None
+ while band is None:
for line in kal_out.splitlines():
+ line = line.decode("utf-8")
if "Using " in line and " channel " in line:
- band = str(line.split()[1])
- channel = str(line.split()[3])
- tgt_freq = str(line.split()[4]).replace(
+ band = line.split()[1]
+ channel = line.split()[3]
+ tgt_freq = line.split()[4].replace(
"(", "").replace(")", "")
- if band == "":
- band = None
return(band, channel, tgt_freq)
@@ -163,13 +170,14 @@ def parse_kal_scan(kal_out):
sample_rate = determine_sample_rate(kal_out)
chan_detect_threshold = determine_chan_detect_threshold(kal_out)
for line in kal_out.splitlines():
+ line = line.decode("utf-8")
if "chan:" in line:
- p_line = line.split(' ')
- chan = str(p_line[1])
- modifier = str(p_line[3])
- power = str(p_line[5])
- mod_raw = str(p_line[4]).replace(')\tpower:', '')
- base_raw = str((p_line[2]).replace('(', ''))
+ p_line = line.split(" ")
+ chan = p_line[1]
+ modifier = p_line[3]
+ power = p_line[5]
+ mod_raw = p_line[4].replace(')\tpower:', '')
+ base_raw = p_line[2].replace('(', '')
mod_freq = herz_me(mod_raw)
base_freq = herz_me(base_raw)
final_freq = to_eng(determine_final_freq(base_freq, modifier,
@@ -207,6 +215,7 @@ def get_measurements_from_kal_scan(kal_out):
"""Return a list of all measurements from kalibrate channel scan."""
result = []
for line in kal_out.splitlines():
+ line = line.decode("utf-8")
if "offset " in line:
p_line = line.split(' ')
result.append(p_line[-1])
| ashmastaflash/kal-wrapper | 52219f922914432ff03c4bfb3845cfc759cb18ea | diff --git a/test/unit/test_fn.py b/test/unit/test_fn.py
index 6b34d26..79aa232 100644
--- a/test/unit/test_fn.py
+++ b/test/unit/test_fn.py
@@ -1,6 +1,8 @@
+"""Test functions."""
+import pprint
from kalibrate import fn
-kal_scan_sample = """Found 1 device(s):
+kal_scan_sample = b"""Found 1 device(s):
0: Generic RTL2832U OEM
Using device 0: Generic RTL2832U OEM
@@ -16,7 +18,7 @@ GSM-850:
"""
-kal_freq_offset_sample = """Found 1 device(s):
+kal_freq_offset_sample = b"""Found 1 device(s):
0: Generic RTL2832U OEM
Using device 0: Generic RTL2832U OEM
@@ -180,6 +182,7 @@ class TestFn:
control_cdt = "261365.030729"
control_device = "0: Generic RTL2832U OEM"
kal_normalized = fn.parse_kal_scan(kal_scan_sample)
+ pprint.pprint(kal_normalized)
assert kal_normalized[0]["channel"] == control_channel
assert kal_normalized[0]["base_freq"] == control_base_freq
assert kal_normalized[0]["mod_freq"] == control_mod_freq
| Fix Py3 compat issue
Lots of string vs bytes comparison issues. | 0.0 | 52219f922914432ff03c4bfb3845cfc759cb18ea | [
"test/unit/test_fn.py::TestFn::test_parse_kal_scan",
"test/unit/test_fn.py::TestFn::test_parse_channel_scan"
]
| [
"test/unit/test_fn.py::TestFn::test_build_kal_scan_band_string_noargs",
"test/unit/test_fn.py::TestFn::test_build_kal_scan_band_string_args",
"test/unit/test_fn.py::TestFn::test_herz_me",
"test/unit/test_fn.py::TestFn::test_determine_final_freq",
"test/unit/test_fn.py::TestFn::test_to_eng"
]
| {
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2019-12-28 16:53:34+00:00 | bsd-2-clause | 1,102 |
|
asmeurer__removestar-41 | diff --git a/removestar/__main__.py b/removestar/__main__.py
index c5ae9dc..a724953 100755
--- a/removestar/__main__.py
+++ b/removestar/__main__.py
@@ -21,6 +21,7 @@ import sys
from . import __version__
from .helper import get_diff_text
+from .output import get_colored_diff, red
from .removestar import fix_code
@@ -30,7 +31,7 @@ class RawDescriptionHelpArgumentDefaultsHelpFormatter(
pass
-def main():
+def main(): # noqa: PLR0912
parser = argparse.ArgumentParser(
description=__doc__,
prog="removestar",
@@ -97,7 +98,7 @@ def main():
continue
if not os.path.isfile(file):
- print(f"Error: {file}: no such file or directory", file=sys.stderr)
+ print(red(f"Error: {file}: no such file or directory"), file=sys.stderr)
continue
with open(file, encoding="utf-8") as f:
@@ -114,7 +115,7 @@ def main():
)
except (RuntimeError, NotImplementedError) as e:
if not args.quiet:
- print(f"Error with {file}: {e}", file=sys.stderr)
+ print(red(f"Error with {file}: {e}"), file=sys.stderr)
continue
if new_code != code:
@@ -122,12 +123,24 @@ def main():
if args.in_place:
with open(file, "w", encoding="utf-8") as f:
f.write(new_code)
+ if not args.quiet:
+ print(
+ get_colored_diff(
+ get_diff_text(
+ io.StringIO(code).readlines(),
+ io.StringIO(new_code).readlines(),
+ file,
+ )
+ )
+ )
else:
print(
- get_diff_text(
- io.StringIO(code).readlines(),
- io.StringIO(new_code).readlines(),
- file,
+ get_colored_diff(
+ get_diff_text(
+ io.StringIO(code).readlines(),
+ io.StringIO(new_code).readlines(),
+ file,
+ )
)
)
diff --git a/removestar/output.py b/removestar/output.py
new file mode 100644
index 0000000..973fade
--- /dev/null
+++ b/removestar/output.py
@@ -0,0 +1,54 @@
+def bold(line):
+ return "\033[1m" + line + "\033[0m" # bold, reset
+
+
+def red(line):
+ return "\033[31m" + line + "\033[0m" # red, reset
+
+
+def yellow(line):
+ return "\033[33m" + line + "\033[0m" # yellow, reset
+
+
+def cyan(line):
+ return "\033[36m" + line + "\033[0m" # cyan, reset
+
+
+def green(line):
+ return "\033[32m" + line + "\033[0m" # green, reset
+
+
+def get_colored_diff(contents):
+ """Inject the ANSI color codes to the diff."""
+ # taken from https://github.com/psf/black/blob/main/src/black/output.py
+ # Copyright (c) 2018 Łukasz Langa
+
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
+ # of this software and associated documentation files (the "Software"), to deal
+ # in the Software without restriction, including without limitation the rights
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ # copies of the Software, and to permit persons to whom the Software is
+ # furnished to do so, subject to the following conditions:
+
+ # The above copyright notice and this permission notice shall be included in all
+ # copies or substantial portions of the Software.
+
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ # SOFTWARE.
+ lines = contents.split("\n")
+ for i, line in enumerate(lines):
+ if line.startswith(("+++", "---")):
+ line = bold(line) # bold, reset # noqa: PLW2901
+ elif line.startswith("@@"):
+ line = cyan(line) # cyan, reset # noqa: PLW2901
+ elif line.startswith("+"):
+ line = green(line) # green, reset # noqa: PLW2901
+ elif line.startswith("-"):
+ line = red(line) # red, reset # noqa: PLW2901
+ lines[i] = line
+ return "\n".join(lines)
diff --git a/removestar/removestar.py b/removestar/removestar.py
index 9180be7..3c6742e 100644
--- a/removestar/removestar.py
+++ b/removestar/removestar.py
@@ -9,6 +9,8 @@ from pathlib import Path
from pyflakes.checker import _MAGIC_GLOBALS, Checker, ModuleScope
from pyflakes.messages import ImportStarUsage, ImportStarUsed
+from .output import green, yellow
+
# quit and exit are not included in old versions of pyflakes
MAGIC_GLOBALS = set(_MAGIC_GLOBALS).union({"quit", "exit"})
@@ -65,13 +67,15 @@ def fix_code(
if not mods:
if not quiet:
print(
- f"Warning: {file}: could not find import for '{name}'",
+ yellow(f"Warning: {file}: could not find import for '{name}'"),
file=sys.stderr,
)
continue
if len(mods) > 1 and not quiet:
print(
- f"Warning: {file}: '{name}' comes from multiple modules: {', '.join(map(repr, mods))}. Using '{mods[-1]}'.",
+ yellow(
+ f"Warning: {file}: '{name}' comes from multiple modules: {', '.join(map(repr, mods))}. Using '{mods[-1]}'."
+ ),
file=sys.stderr,
)
@@ -157,22 +161,28 @@ def replace_imports(
if comment and is_noqa_comment_allowing_star_import(comment):
if verbose:
print(
- f"{verbose_prefix}Retaining 'from {mod} import *' due to noqa comment",
+ green(
+ f"{verbose_prefix}Retaining 'from {mod} import *' due to noqa comment"
+ ),
file=sys.stderr,
)
return original_import
if verbose:
print(
- f"{verbose_prefix}Replacing 'from {mod} import *' with '{new_import.strip()}'",
+ green(
+ f"{verbose_prefix}Replacing 'from {mod} import *' with '{new_import.strip()}'"
+ ),
file=sys.stderr,
)
if not new_import and comment:
if not quiet:
print(
- f"{warning_prefix}The removed star import statement for '{mod}' "
- f"had an inline comment which may not make sense without the import",
+ yellow(
+ f"{warning_prefix}The removed star import statement for '{mod}' "
+ f"had an inline comment which may not make sense without the import"
+ ),
file=sys.stderr,
)
return f"{comment}\n"
@@ -184,7 +194,7 @@ def replace_imports(
new_code, subs_made = star_import.subn(star_import_replacement, code)
if subs_made == 0 and not quiet:
print(
- f"{warning_prefix}Could not find the star imports for '{mod}'",
+ yellow(f"{warning_prefix}Could not find the star imports for '{mod}'"),
file=sys.stderr,
)
code = new_code
| asmeurer/removestar | 3692c33e7d54900669891bbaea714dfbcf751478 | diff --git a/tests/test_removestar.py b/tests/test_removestar.py
index 9c24839..a944ad3 100644
--- a/tests/test_removestar.py
+++ b/tests/test_removestar.py
@@ -7,6 +7,7 @@ from pathlib import Path
import pytest
from pyflakes.checker import Checker
+from removestar.output import get_colored_diff, green, red, yellow
from removestar.removestar import (
ExternalModuleError,
fix_code,
@@ -1317,8 +1318,8 @@ def test_replace_imports():
{"code": code_mod4, "repls": {".mod1": ["a"], ".mod2": ["b", "c"]}},
code_mod4_fixed,
[
- "Replacing 'from .mod1 import *' with 'from .mod1 import a'",
- "Replacing 'from .mod2 import *' with 'from .mod2 import b, c'",
+ green("Replacing 'from .mod1 import *' with 'from .mod1 import a'"),
+ green("Replacing 'from .mod2 import *' with 'from .mod2 import b, c'"),
],
),
(
@@ -1329,8 +1330,12 @@ def test_replace_imports():
},
code_mod4_fixed,
[
- "directory/mod4.py: Replacing 'from .mod1 import *' with 'from .mod1 import a'",
- "directory/mod4.py: Replacing 'from .mod2 import *' with 'from .mod2 import b, c'",
+ green(
+ "directory/mod4.py: Replacing 'from .mod1 import *' with 'from .mod1 import a'"
+ ),
+ green(
+ "directory/mod4.py: Replacing 'from .mod2 import *' with 'from .mod2 import b, c'"
+ ),
],
),
(
@@ -1340,9 +1345,9 @@ def test_replace_imports():
},
code_mod_commented_star_fixed,
[
- "Replacing 'from .mod3 import *' with 'from .mod3 import name'",
- "Retaining 'from .mod1 import *' due to noqa comment",
- "Retaining 'from .mod2 import *' due to noqa comment",
+ green("Replacing 'from .mod3 import *' with 'from .mod3 import name'"),
+ green("Retaining 'from .mod1 import *' due to noqa comment"),
+ green("Retaining 'from .mod2 import *' due to noqa comment"),
],
),
(
@@ -1353,9 +1358,15 @@ def test_replace_imports():
},
code_mod_commented_star_fixed,
[
- "directory/mod_commented_star.py: Replacing 'from .mod3 import *' with 'from .mod3 import name'",
- "directory/mod_commented_star.py: Retaining 'from .mod1 import *' due to noqa comment",
- "directory/mod_commented_star.py: Retaining 'from .mod2 import *' due to noqa comment",
+ green(
+ "directory/mod_commented_star.py: Replacing 'from .mod3 import *' with 'from .mod3 import name'"
+ ),
+ green(
+ "directory/mod_commented_star.py: Retaining 'from .mod1 import *' due to noqa comment"
+ ),
+ green(
+ "directory/mod_commented_star.py: Retaining 'from .mod2 import *' due to noqa comment"
+ ),
],
),
],
@@ -1388,8 +1399,12 @@ def test_replace_imports_warnings(capsys):
)
out, err = capsys.readouterr()
assert set(err.splitlines()) == {
- "Warning: module/mod_unfixable.py: Could not find the star imports for '.mod1'",
- "Warning: module/mod_unfixable.py: Could not find the star imports for '.mod2'",
+ yellow(
+ "Warning: module/mod_unfixable.py: Could not find the star imports for '.mod1'"
+ ),
+ yellow(
+ "Warning: module/mod_unfixable.py: Could not find the star imports for '.mod2'"
+ ),
}
assert (
@@ -1400,8 +1415,8 @@ def test_replace_imports_warnings(capsys):
)
out, err = capsys.readouterr()
assert set(err.splitlines()) == {
- "Warning: Could not find the star imports for '.mod1'",
- "Warning: Could not find the star imports for '.mod2'",
+ yellow("Warning: Could not find the star imports for '.mod1'"),
+ yellow("Warning: Could not find the star imports for '.mod2'"),
}
assert (
@@ -1423,7 +1438,9 @@ def test_replace_imports_warnings(capsys):
)
out, err = capsys.readouterr()
assert set(err.splitlines()) == {
- "Warning: module/mod_commented_unused_star.py: The removed star import statement for '.mod1' had an inline comment which may not make sense without the import",
+ yellow(
+ "Warning: module/mod_commented_unused_star.py: The removed star import statement for '.mod1' had an inline comment which may not make sense without the import"
+ ),
}
assert (
@@ -1434,7 +1451,9 @@ def test_replace_imports_warnings(capsys):
)
out, err = capsys.readouterr()
assert set(err.splitlines()) == {
- "Warning: The removed star import statement for '.mod1' had an inline comment which may not make sense without the import",
+ yellow(
+ "Warning: The removed star import statement for '.mod1' had an inline comment which may not make sense without the import"
+ ),
}
assert (
@@ -1656,9 +1675,12 @@ Warning: {directory}/mod_unfixable.py: Could not find the star imports for '.mod
Warning: {directory}/mod_commented_unused_star.py: The removed star import statement for '.mod1' had an inline comment which may not make sense without the import
""".splitlines()
)
+ colored_warnings = {yellow(warning) for warning in warnings}
- error = f"Error with {directory}/mod_bad.py: SyntaxError: invalid syntax (mod_bad.py, line 1)"
- assert set(p.stderr.splitlines()) == warnings.union({error})
+ error = red(
+ f"Error with {directory}/mod_bad.py: SyntaxError: invalid syntax (mod_bad.py, line 1)"
+ )
+ assert set(p.stderr.splitlines()) == colored_warnings.union({error})
diffs = [
f"""\
@@ -1780,7 +1802,7 @@ Warning: {directory}/mod_commented_unused_star.py: The removed star import state
]
unchanged = ["__init__.py", "mod_bad.py", "mod_unfixable.py"]
for d in diffs:
- assert d in p.stdout, p.stdout
+ assert get_colored_diff(d) in p.stdout, p.stdout
for mod_path in unchanged:
assert f"--- original/{directory}/{mod_path}" not in p.stdout
cmp = dircmp(directory, directory_orig)
@@ -1793,7 +1815,7 @@ Warning: {directory}/mod_commented_unused_star.py: The removed star import state
)
assert p.stderr == ""
for d in diffs:
- assert d in p.stdout
+ assert get_colored_diff(d) in p.stdout
cmp = dircmp(directory, directory_orig)
assert _dirs_equal(cmp)
@@ -1826,10 +1848,13 @@ Warning: {directory}/mod_commented_unused_star.py: The removed star import state
{directory}/submod_recursive/submod2.py: Replacing 'from . import *' with 'from . import a'
""".splitlines()
)
+ colored_changes = {green(change) for change in changes}
- assert set(p.stderr.splitlines()) == changes.union({error}).union(warnings)
+ assert set(p.stderr.splitlines()) == colored_changes.union({error}).union(
+ colored_warnings
+ )
for d in diffs:
- assert d in p.stdout, p.stdout
+ assert get_colored_diff(d) in p.stdout, p.stdout
cmp = dircmp(directory, directory_orig)
assert _dirs_equal(cmp)
@@ -1844,12 +1869,15 @@ Error with {directory}/mod6.py: Static determination of external module imports
Error with {directory}/mod7.py: Static determination of external module imports is not supported.
""".splitlines()
)
- assert set(p.stderr.splitlines()) == {error}.union(static_error).union(warnings)
+ colored_static_error = {red(err) for err in static_error}
+ assert set(p.stderr.splitlines()) == {error}.union(colored_static_error).union(
+ colored_warnings
+ )
for d in diffs:
if "mod6" in d:
- assert d not in p.stdout
+ assert get_colored_diff(d) not in p.stdout
else:
- assert d in p.stdout, p.stdout
+ assert get_colored_diff(d) in p.stdout, p.stdout
cmp = dircmp(directory, directory_orig)
assert _dirs_equal(cmp)
@@ -1869,9 +1897,9 @@ Error with {directory}/mod7.py: Static determination of external module imports
assert p.stderr == ""
for d in diffs:
if "mod6" in d:
- assert d not in p.stdout
+ assert get_colored_diff(d) not in p.stdout
else:
- assert d in p.stdout, p.stdout
+ assert get_colored_diff(d) in p.stdout, p.stdout
cmp = dircmp(directory, directory_orig)
assert _dirs_equal(cmp)
@@ -1934,6 +1962,7 @@ Error with {directory}/mod7.py: Static determination of external module imports
encoding="utf-8",
)
assert (
- p.stderr == f"Error: {directory}/notarealfile.py: no such file or directory\n"
+ p.stderr
+ == red(f"Error: {directory}/notarealfile.py: no such file or directory") + "\n"
)
assert p.stdout == ""
| Coloured output
The output right now is bland, but most of the pr-commit tools offer [rich](https://github.com/Textualize/rich) output for ease of reading. | 0.0 | 3692c33e7d54900669891bbaea714dfbcf751478 | [
"tests/test_removestar.py::test_names_to_replace",
"tests/test_removestar.py::test_star_imports",
"tests/test_removestar.py::test_get_names",
"tests/test_removestar.py::test_get_names_from_dir[True]",
"tests/test_removestar.py::test_get_names_from_dir[False]",
"tests/test_removestar.py::test_get_names_dynamically",
"tests/test_removestar.py::test_fix_code",
"tests/test_removestar.py::test_get_mod_filename[True]",
"tests/test_removestar.py::test_get_mod_filename[False]",
"tests/test_removestar.py::test_replace_imports",
"tests/test_removestar.py::test_replace_imports_verbose_messages[mod4",
"tests/test_removestar.py::test_replace_imports_verbose_messages[mod_commented_star",
"tests/test_removestar.py::test_replace_imports_warnings",
"tests/test_removestar.py::test_replace_imports_line_wrapping",
"tests/test_removestar.py::test_is_noqa_comment_allowing_star_import[True-#",
"tests/test_removestar.py::test_is_noqa_comment_allowing_star_import[True-#noqa-same",
"tests/test_removestar.py::test_is_noqa_comment_allowing_star_import[True-#noqa-upper",
"tests/test_removestar.py::test_is_noqa_comment_allowing_star_import[True-#noqa-lower",
"tests/test_removestar.py::test_is_noqa_comment_allowing_star_import[False-#",
"tests/test_removestar.py::test_is_noqa_comment_allowing_star_import[True-#noqa:F401-same",
"tests/test_removestar.py::test_is_noqa_comment_allowing_star_import[True-#noqa:F401-upper",
"tests/test_removestar.py::test_is_noqa_comment_allowing_star_import[True-#noqa:F401-lower",
"tests/test_removestar.py::test_is_noqa_comment_allowing_star_import[True-#\\tnoqa:\\tF401\\t-same",
"tests/test_removestar.py::test_is_noqa_comment_allowing_star_import[True-#\\tnoqa:\\tF401\\t-upper",
"tests/test_removestar.py::test_is_noqa_comment_allowing_star_import[True-#\\tnoqa:\\tF401\\t-lower",
"tests/test_removestar.py::test_is_noqa_comment_allowing_star_import[False-#-same",
"tests/test_removestar.py::test_is_noqa_comment_allowing_star_import[False-#-upper",
"tests/test_removestar.py::test_is_noqa_comment_allowing_star_import[False-#-lower",
"tests/test_removestar.py::test_is_noqa_comment_allowing_star_import[False--same",
"tests/test_removestar.py::test_is_noqa_comment_allowing_star_import[False--upper",
"tests/test_removestar.py::test_is_noqa_comment_allowing_star_import[False--lower",
"tests/test_removestar.py::test_cli"
]
| []
| {
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2023-09-04 19:53:55+00:00 | mit | 1,103 |
|
asottile__add-trailing-comma-12 | diff --git a/add_trailing_comma.py b/add_trailing_comma.py
index 7a571a7..882cb9d 100644
--- a/add_trailing_comma.py
+++ b/add_trailing_comma.py
@@ -7,6 +7,7 @@ import collections
import io
import sys
+from tokenize_rt import ESCAPED_NL
from tokenize_rt import src_to_tokens
from tokenize_rt import Token
from tokenize_rt import tokens_to_src
@@ -20,8 +21,8 @@ Literal = collections.namedtuple('Literal', ('node', 'backtrack'))
Literal.__new__.__defaults__ = (False,)
Fix = collections.namedtuple('Fix', ('braces', 'multi_arg', 'initial_indent'))
-NEWLINES = frozenset(('NEWLINE', 'NL'))
-NON_CODING_TOKENS = frozenset(('COMMENT', 'NL', UNIMPORTANT_WS))
+NEWLINES = frozenset((ESCAPED_NL, 'NEWLINE', 'NL'))
+NON_CODING_TOKENS = frozenset(('COMMENT', ESCAPED_NL, 'NL', UNIMPORTANT_WS))
INDENT_TOKENS = frozenset(('INDENT', UNIMPORTANT_WS))
START_BRACES = frozenset(('(', '{', '['))
END_BRACES = frozenset((')', '}', ']'))
diff --git a/setup.py b/setup.py
index 39c86ff..828d6a9 100644
--- a/setup.py
+++ b/setup.py
@@ -17,7 +17,7 @@ setup(
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
],
- install_requires=['tokenize-rt'],
+ install_requires=['tokenize-rt>=2'],
py_modules=['add_trailing_comma'],
entry_points={
'console_scripts': ['add-trailing-comma = add_trailing_comma:main'],
| asottile/add-trailing-comma | 49a0d757435b4962c58f8d4f48ba85c7f2f5256f | diff --git a/tests/add_trailing_comma_test.py b/tests/add_trailing_comma_test.py
index 9f67fb3..d41af5c 100644
--- a/tests/add_trailing_comma_test.py
+++ b/tests/add_trailing_comma_test.py
@@ -501,6 +501,11 @@ def test_fix_unhugs_py3_only(src, expected):
' 1, 2, 3, 4,\n'
' ],\n'
']',
+ # Regression test for #11
+ 'foo.\\\n'
+ ' bar(\n'
+ ' 5,\n'
+ ' )',
),
)
def test_noop_trailing_brace(src):
| escaped newlines are throwing off indent detection
This should be a noop:
```python
x = y.\
foo(
bar,
)
```
However, this is the current behaviour:
```diff
x = y.\
foo(
bar,
- )
+)
```
Might need help from https://github.com/asottile/tokenize-rt/issues/1 | 0.0 | 49a0d757435b4962c58f8d4f48ba85c7f2f5256f | [
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[foo.\\\\\\n"
]
| [
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x(1)]",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[tuple(\\n",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x(\\n",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x((\\n",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[(\\n",
"tests/add_trailing_comma_test.py::test_py35_plus_rewrite",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[(1,",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[[1,",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[{1,",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[{1:",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[y(*args1,",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[y(**kwargs1,",
"tests/add_trailing_comma_test.py::test_fixes_calls[x(\\n",
"tests/add_trailing_comma_test.py::test_fixes_calls[foo()(\\n",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[(1,",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[[1,",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[{1,",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[{1:",
"tests/add_trailing_comma_test.py::test_noop_tuple_literal_without_braces",
"tests/add_trailing_comma_test.py::test_noop_function_defs[def",
"tests/add_trailing_comma_test.py::test_fixes_defs[def",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f(x,",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f(\\n",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f((\\n",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f([\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[f(\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[f(a,\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[with",
"tests/add_trailing_comma_test.py::test_fix_unhugs[if",
"tests/add_trailing_comma_test.py::test_fix_unhugs[{'foo':",
"tests/add_trailing_comma_test.py::test_fix_unhugs[f(g(\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[{\"foo\":",
"tests/add_trailing_comma_test.py::test_fix_unhugs_py3_only[def",
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[[]]",
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[x",
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[y",
"tests/add_trailing_comma_test.py::test_fix_trailing_brace[x",
"tests/add_trailing_comma_test.py::test_main_trivial",
"tests/add_trailing_comma_test.py::test_main_noop",
"tests/add_trailing_comma_test.py::test_main_changes_a_file",
"tests/add_trailing_comma_test.py::test_main_syntax_error",
"tests/add_trailing_comma_test.py::test_main_non_utf8_bytes",
"tests/add_trailing_comma_test.py::test_main_py35_plus_argument_star_args",
"tests/add_trailing_comma_test.py::test_main_py35_plus_argument_star_star_kwargs"
]
| {
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
} | 2017-07-14 15:44:54+00:00 | mit | 1,104 |
|
asottile__add-trailing-comma-154 | diff --git a/add_trailing_comma/_plugins/literals.py b/add_trailing_comma/_plugins/literals.py
index c5771be..53bdcf6 100644
--- a/add_trailing_comma/_plugins/literals.py
+++ b/add_trailing_comma/_plugins/literals.py
@@ -26,12 +26,11 @@ def _fix_literal(
*,
one_el_tuple: bool,
) -> None:
- if tokens[i].src in START_BRACES: # pragma: no branch (<py38)
- fix_brace(
- tokens, find_simple(i, tokens),
- add_comma=True,
- remove_comma=not one_el_tuple,
- )
+ fix_brace(
+ tokens, find_simple(i, tokens),
+ add_comma=True,
+ remove_comma=not one_el_tuple,
+ )
@register(ast.Set)
@@ -90,6 +89,27 @@ def _fix_tuple(
)
+def _fix_tuple_py38(
+ i: int,
+ tokens: List[Token],
+ *,
+ one_el_tuple: bool,
+) -> None: # pragma: no cover (<py38)
+ if tokens[i].src in START_BRACES:
+ fix = find_simple(i, tokens)
+
+ # for tuples we *must* find a comma, otherwise it is not a tuple
+ if fix is None or not fix.multi_arg:
+ return
+
+ fix_brace(
+ tokens,
+ fix,
+ add_comma=True,
+ remove_comma=not one_el_tuple,
+ )
+
+
@register(ast.Tuple)
def visit_Tuple(
state: State,
@@ -105,5 +125,5 @@ def visit_Tuple(
func = functools.partial(_fix_tuple, one_el_tuple=is_one_el)
yield ast_to_offset(node), func
else: # pragma: no cover (py38+)
- func = functools.partial(_fix_literal, one_el_tuple=is_one_el)
+ func = functools.partial(_fix_tuple_py38, one_el_tuple=is_one_el)
yield ast_to_offset(node), func
| asottile/add-trailing-comma | d370e91067c0ee7f4a87b86e4f8f2c483fd59c21 | diff --git a/tests/features/literals_test.py b/tests/features/literals_test.py
index 74f543e..f5dda44 100644
--- a/tests/features/literals_test.py
+++ b/tests/features/literals_test.py
@@ -15,6 +15,12 @@ from add_trailing_comma._main import _fix_src
' pass\n'
'[x] = {y}',
pytest.param('x[1, 2, 3, 4]', id='multi-slice'),
+ pytest.param(
+ 'x = (\n'
+ ' object\n'
+ '), object\n',
+ id='regression test for #153',
+ ),
),
)
def test_noop_literals(src):
| Tool may change variables from type `T` to `tuple[T]`
Hey, I found this while using this as a pre-commit hook (big fan of the the project & your tools btw! :)).
It seems the tool may change turn a type into a tuple[type] in a situation like this;
```py
x = (
object # < comma is added here
), object
``` | 0.0 | d370e91067c0ee7f4a87b86e4f8f2c483fd59c21 | [
"tests/features/literals_test.py::test_noop_literals[regression"
]
| [
"tests/features/literals_test.py::test_noop_literals[(1,",
"tests/features/literals_test.py::test_noop_literals[[1,",
"tests/features/literals_test.py::test_noop_literals[{1,",
"tests/features/literals_test.py::test_noop_literals[{1:",
"tests/features/literals_test.py::test_noop_literals[if",
"tests/features/literals_test.py::test_noop_literals[multi-slice]",
"tests/features/literals_test.py::test_fixes_literals[x",
"tests/features/literals_test.py::test_fixes_literals[(\\n",
"tests/features/literals_test.py::test_fixes_literals[multi-line",
"tests/features/literals_test.py::test_fixes_literals[single",
"tests/features/literals_test.py::test_fixes_literals[nested",
"tests/features/literals_test.py::test_fixes_py35_plus_literals[x",
"tests/features/literals_test.py::test_noop_tuple_literal_without_braces"
]
| {
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
} | 2021-11-20 17:56:59+00:00 | mit | 1,105 |
|
asottile__add-trailing-comma-199 | diff --git a/README.md b/README.md
index bb6195f..b32fb42 100644
--- a/README.md
+++ b/README.md
@@ -178,6 +178,17 @@ Note that this would cause a **`SyntaxError`** in earlier python versions.
pass
```
+### trailing comma for with statement
+
+```diff
+ with (
+ open('f1', 'r') as f1,
+- open('f2', 'w') as f2
++ open('f2', 'w') as f2,
+ ):
+ pass
+```
+
### trailing comma for match statement
```diff
diff --git a/add_trailing_comma/_plugins/_with.py b/add_trailing_comma/_plugins/_with.py
new file mode 100644
index 0000000..ffbaf81
--- /dev/null
+++ b/add_trailing_comma/_plugins/_with.py
@@ -0,0 +1,34 @@
+from __future__ import annotations
+
+import ast
+import sys
+from typing import Iterable
+
+from tokenize_rt import Offset
+from tokenize_rt import Token
+
+from add_trailing_comma._ast_helpers import ast_to_offset
+from add_trailing_comma._data import register
+from add_trailing_comma._data import State
+from add_trailing_comma._data import TokenFunc
+from add_trailing_comma._token_helpers import find_simple
+from add_trailing_comma._token_helpers import fix_brace
+
+
+if sys.version_info >= (3, 9): # pragma: >=3.9 cover
+ def _fix_with(i: int, tokens: list[Token]) -> None:
+ i += 1
+ if tokens[i].name == 'UNIMPORTANT_WS':
+ i += 1
+ if tokens[i].src == '(':
+ fix = find_simple(i, tokens)
+ # only fix if outer parens are for the with items (next is ':')
+ if fix is not None and tokens[fix.braces[-1] + 1].src == ':':
+ fix_brace(tokens, fix, add_comma=True, remove_comma=True)
+
+ @register(ast.With)
+ def visit_With(
+ state: State,
+ node: ast.With,
+ ) -> Iterable[tuple[Offset, TokenFunc]]:
+ yield ast_to_offset(node), _fix_with
| asottile/add-trailing-comma | 4850deff3e74a0e7d0384c30ecea72a3fa594c4b | diff --git a/tests/features/with_test.py b/tests/features/with_test.py
new file mode 100644
index 0000000..2f64fa2
--- /dev/null
+++ b/tests/features/with_test.py
@@ -0,0 +1,106 @@
+from __future__ import annotations
+
+import sys
+
+import pytest
+
+from add_trailing_comma._main import _fix_src
+
+
[email protected](
+ 'src',
+ (
+ pytest.param(
+ 'from threading import Lock\n'
+ 'with (Lock() as l):\n'
+ ' pass',
+ id='simple named context manager',
+ ),
+ pytest.param(
+ 'with (\n'
+ ' open("wat")\n'
+ ') as f, open("2") as f2: pass',
+ id='parenthesized expression',
+ ),
+ pytest.param(
+ 'with open("/tmp/t.py") as f: pass',
+ id='old style',
+ ),
+ pytest.param(
+ 'with open("/tmp/t.py") as f, \\\n'
+ ' open("/tmp/y.py") as g: pass',
+ id='escaped newline',
+ ),
+ pytest.param(
+ 'with (open("/tmp/t.py") as f): pass',
+ id='single item',
+ ),
+ pytest.param(
+ 'with (open("/tmp/t.py") as t, open("/tmp/y.py") as y): pass',
+ id='single line',
+ ),
+ ),
+)
+def test_noop(src):
+ assert _fix_src(src, min_version=(2, 7)) == src
+
+
[email protected](sys.version_info < (3, 9), reason='py39+')
[email protected](
+ ('src', 'expected'),
+ (
+ pytest.param(
+ # Make sure that whitespace is not expected after "with"
+ 'with(\n'
+ ' open("/tmp/t.txt") as file1,\n'
+ ' open("/tmp/t.txt") as file2\n'
+ '): pass',
+
+ 'with(\n'
+ ' open("/tmp/t.txt") as file1,\n'
+ ' open("/tmp/t.txt") as file2,\n'
+ '): pass',
+ id='simple usecase',
+ ),
+ pytest.param(
+ 'from threading import lock\n'
+ 'with (lock() as l,\n'
+ ' open("/tmp/t.txt")):\n'
+ ' pass',
+
+ 'from threading import lock\n'
+ 'with (\n'
+ ' lock() as l,\n'
+ ' open("/tmp/t.txt"),\n'
+ '):\n'
+ ' pass',
+ id='unhug',
+ ),
+ pytest.param(
+ 'with (open(\n'
+ ' "a",\n'
+ ' some_other_really_long_parameter=True,\n'
+ ') as a, a.lock): pass',
+
+ 'with (\n'
+ ' open(\n'
+ ' "a",\n'
+ ' some_other_really_long_parameter=True,\n'
+ ' ) as a, a.lock,\n'
+ '): pass',
+ id='lower level linebreaks',
+ ),
+ pytest.param(
+ 'with (a as b, c as d,): pass\n',
+ 'with (a as b, c as d): pass\n',
+ id='remove unnecessary comma',
+ ),
+ pytest.param(
+ 'with (a as b,): pass\n',
+ 'with (a as b): pass\n',
+ id='remove unnecessary comma one item',
+ ),
+ ),
+)
+def test_py39_multiwith(src, expected):
+ assert _fix_src(src, min_version=(2, 7)) == expected
| --py39-plus: multi-with-statements
Add support for trailing commas for this:
```diff
with (
foo as bar,
- baz as womp
+ baz as womp,
):
# ...
``` | 0.0 | 4850deff3e74a0e7d0384c30ecea72a3fa594c4b | [
"tests/features/with_test.py::test_py39_multiwith[simple",
"tests/features/with_test.py::test_py39_multiwith[unhug]",
"tests/features/with_test.py::test_py39_multiwith[lower",
"tests/features/with_test.py::test_py39_multiwith[remove"
]
| [
"tests/features/with_test.py::test_noop[simple",
"tests/features/with_test.py::test_noop[parenthesized",
"tests/features/with_test.py::test_noop[old",
"tests/features/with_test.py::test_noop[escaped",
"tests/features/with_test.py::test_noop[single"
]
| {
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files"
],
"has_test_patch": true,
"is_lite": false
} | 2022-09-16 11:23:23+00:00 | mit | 1,106 |
|
asottile__add-trailing-comma-20 | diff --git a/add_trailing_comma.py b/add_trailing_comma.py
index d8f1939..9ef7e2f 100644
--- a/add_trailing_comma.py
+++ b/add_trailing_comma.py
@@ -64,16 +64,12 @@ class FindNodes(ast.NodeVisitor):
self.literals = {}
self.has_new_syntax = False
- def _visit_literal(self, node, key='elts', is_multiline=False, **kwargs):
- orig = node.lineno
-
+ def _visit_literal(self, node, key='elts', **kwargs):
for elt in getattr(node, key):
- if elt.lineno > orig:
- is_multiline = True
if _is_star_arg(elt): # pragma: no cover (PY35+)
self.has_new_syntax = True
- if is_multiline:
+ if getattr(node, key):
key = Offset(node.lineno, node.col_offset)
self.literals[key] = Literal(node, **kwargs)
self.generic_visit(node)
@@ -87,13 +83,10 @@ class FindNodes(ast.NodeVisitor):
self._visit_literal(node, key='values')
def visit_Tuple(self, node):
- # tuples lie about things, so we pretend they are all multiline
- # and tell the later machinery to backtrack
- self._visit_literal(node, is_multiline=True, backtrack=True)
+ # tuples lie about things so we tell the later machiner to backtrack
+ self._visit_literal(node, backtrack=True)
def visit_Call(self, node):
- orig = node.lineno
-
argnodes = node.args + node.keywords
py2_starargs = getattr(node, 'starargs', None)
if py2_starargs: # pragma: no cover (<PY35)
@@ -103,7 +96,6 @@ class FindNodes(ast.NodeVisitor):
argnodes.append(py2_kwargs)
arg_offsets = set()
- is_multiline = False
has_starargs = bool(py2_starargs or py2_kwargs)
for argnode in argnodes:
if (
@@ -115,8 +107,6 @@ class FindNodes(ast.NodeVisitor):
offset = _to_offset(argnode)
# multiline strings have invalid position, ignore them
if offset.utf8_byte_offset != -1: # pragma: no branch (cpy bug)
- if offset.line > orig:
- is_multiline = True
arg_offsets.add(offset)
# If the sole argument is a generator, don't add a trailing comma as
@@ -125,7 +115,7 @@ class FindNodes(ast.NodeVisitor):
len(argnodes) == 1 and isinstance(argnodes[0], ast.GeneratorExp)
)
- if is_multiline and not only_a_generator:
+ if arg_offsets and not only_a_generator:
key = Offset(node.lineno, node.col_offset)
self.calls[key] = Call(node, has_starargs, arg_offsets)
@@ -144,16 +134,12 @@ class FindNodes(ast.NodeVisitor):
getattr(node.args, 'kwonlyargs', None)
)
- orig = node.lineno
- is_multiline = False
offsets = set()
for argnode in node.args.args:
offset = _to_offset(argnode)
- if offset.line > orig:
- is_multiline = True
offsets.add(offset)
- if is_multiline and not has_starargs:
+ if offsets and not has_starargs:
key = Offset(node.lineno, node.col_offset)
self.funcs[key] = Func(node, offsets)
@@ -181,7 +167,7 @@ def _find_simple(first_brace, tokens):
last_brace = i
- # This was not actually a multi-line call, despite the ast telling us that
+ # Check if we're actually multi-line
if tokens[first_brace].line == tokens[last_brace].line:
return
| asottile/add-trailing-comma | e6cfc6a9976fc305b0054b30995b5407fea833a5 | diff --git a/tests/add_trailing_comma_test.py b/tests/add_trailing_comma_test.py
index 01ee421..450e3a0 100644
--- a/tests/add_trailing_comma_test.py
+++ b/tests/add_trailing_comma_test.py
@@ -257,6 +257,7 @@ def test_noop_tuple_literal_without_braces():
@pytest.mark.parametrize(
'src',
(
+ 'def f(): pass',
'def f(arg1, arg2): pass',
'def f(\n'
' arg1,\n'
@@ -354,6 +355,22 @@ def test_noop_unhugs(src):
' c,\n'
')',
),
+ (
+ 'def f(\n'
+ ' *args): pass',
+
+ 'def f(\n'
+ ' *args\n'
+ '): pass',
+ ),
+ (
+ 'def f(\n'
+ ' **kwargs): pass',
+
+ 'def f(\n'
+ ' **kwargs\n'
+ '): pass',
+ ),
# if there's already a trailing comma, don't add a new one
(
'f(\n'
@@ -493,6 +510,16 @@ def test_noop_unhugs(src):
' ),\n'
')',
),
+ # Regression test for #16
+ (
+ 'x("foo"\n'
+ ' "bar")',
+
+ 'x(\n'
+ ' "foo"\n'
+ ' "bar",\n'
+ ')',
+ ),
),
)
def test_fix_unhugs(src, expected):
@@ -503,23 +530,6 @@ def test_fix_unhugs(src, expected):
@pytest.mark.parametrize(
('src', 'expected'),
(
- # python 2 doesn't give offset information for starargs
- (
- 'def f(\n'
- ' *args): pass',
-
- 'def f(\n'
- ' *args\n'
- '): pass',
- ),
- (
- 'def f(\n'
- ' **kwargs): pass',
-
- 'def f(\n'
- ' **kwargs\n'
- '): pass',
- ),
# python 2 doesn't kwonlyargs
(
'def f(\n'
| Two iterations are required to resolve func(multi line string literal)
### input
```python
f('long'
'literal')
```
### output 1
```python
f(
'long'
'literal'
)
```
### output 2
```python
f(
'long'
'literal',
)
```
This _should_ resolve in a single pass | 0.0 | e6cfc6a9976fc305b0054b30995b5407fea833a5 | [
"tests/add_trailing_comma_test.py::test_fix_unhugs[x(\"foo\",",
"tests/add_trailing_comma_test.py::test_fix_unhugs[x(\"foo\"\\n"
]
| [
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x(1)]",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[tuple(\\n",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x(\\n",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x((\\n",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[(\\n",
"tests/add_trailing_comma_test.py::test_py35_plus_rewrite",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[(1,",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[[1,",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[{1,",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[{1:",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[y(*args1,",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[y(**kwargs1,",
"tests/add_trailing_comma_test.py::test_fixes_calls[x(\\n",
"tests/add_trailing_comma_test.py::test_fixes_calls[foo()(\\n",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[(1,",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[[1,",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[{1,",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[{1:",
"tests/add_trailing_comma_test.py::test_noop_tuple_literal_without_braces",
"tests/add_trailing_comma_test.py::test_noop_function_defs[def",
"tests/add_trailing_comma_test.py::test_fixes_defs[def",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f(x,",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f(\\n",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f((\\n",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f([\\n",
"tests/add_trailing_comma_test.py::test_noop_unhugs[textwrap.dedent(\"\"\"\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[f(\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[f(a,\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[def",
"tests/add_trailing_comma_test.py::test_fix_unhugs[with",
"tests/add_trailing_comma_test.py::test_fix_unhugs[if",
"tests/add_trailing_comma_test.py::test_fix_unhugs[{'foo':",
"tests/add_trailing_comma_test.py::test_fix_unhugs[f(g(\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[{\"foo\":",
"tests/add_trailing_comma_test.py::test_fix_unhugs_py3_only[def",
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[[]]",
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[x",
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[y",
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[foo.\\\\\\n",
"tests/add_trailing_comma_test.py::test_fix_trailing_brace[x",
"tests/add_trailing_comma_test.py::test_main_trivial",
"tests/add_trailing_comma_test.py::test_main_noop",
"tests/add_trailing_comma_test.py::test_main_changes_a_file",
"tests/add_trailing_comma_test.py::test_main_syntax_error",
"tests/add_trailing_comma_test.py::test_main_non_utf8_bytes",
"tests/add_trailing_comma_test.py::test_main_py35_plus_argument_star_args",
"tests/add_trailing_comma_test.py::test_main_py35_plus_argument_star_star_kwargs"
]
| {
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2017-07-16 21:34:30+00:00 | mit | 1,107 |
|
asottile__add-trailing-comma-23 | diff --git a/add_trailing_comma.py b/add_trailing_comma.py
index 784e00b..da4d733 100644
--- a/add_trailing_comma.py
+++ b/add_trailing_comma.py
@@ -59,7 +59,8 @@ def _is_star_star_kwarg(node):
class FindNodes(ast.NodeVisitor):
def __init__(self):
- self.calls = {}
+ # multiple calls can report their starting position as the same
+ self.calls = collections.defaultdict(list)
self.funcs = {}
self.literals = {}
@@ -109,7 +110,7 @@ class FindNodes(ast.NodeVisitor):
if arg_offsets and not only_a_generator:
key = Offset(node.lineno, node.col_offset)
- self.calls[key] = Call(node, has_starargs, arg_offsets)
+ self.calls[key].append(Call(node, has_starargs, arg_offsets))
self.generic_visit(node)
@@ -312,33 +313,30 @@ def _fix_src(contents_text, py35_plus):
tokens = src_to_tokens(contents_text)
for i, token in _changing_list(tokens):
key = Offset(token.line, token.utf8_byte_offset)
- add_comma = True
- fix_data = None
+ fixes = []
if key in visitor.calls:
- call = visitor.calls[key]
- # Only fix stararg calls if asked to
- add_comma = not call.star_args or py35_plus
- fix_data = _find_call(call, i, tokens)
+ for call in visitor.calls[key]:
+ # Only fix stararg calls if asked to
+ add_comma = not call.star_args or py35_plus
+ fixes.append((add_comma, _find_call(call, i, tokens)))
elif key in visitor.funcs:
- func = visitor.funcs[key]
# functions can be treated as calls
- fix_data = _find_call(func, i, tokens)
+ fixes.append((True, _find_call(visitor.funcs[key], i, tokens)))
# Handle parenthesized things
elif token.src == '(':
- fix_data = _find_simple(i, tokens)
- add_comma = False
-
- if fix_data is not None:
- _fix_brace(fix_data, add_comma, tokens)
+ fixes.append((False, _find_simple(i, tokens)))
# need to additionally handle literals afterwards as tuples report
# their starting index as the first element, which may be one of the
# above things.
if key in visitor.literals:
fix_data = _find_literal(visitor.literals[key], i, tokens)
+ fixes.append((True, fix_data))
+
+ for add_comma, fix_data in fixes:
if fix_data is not None:
- _fix_brace(fix_data, True, tokens)
+ _fix_brace(fix_data, add_comma, tokens)
return tokens_to_src(tokens)
| asottile/add-trailing-comma | 47aa870cde65d699237d345df17bfb1ca03bd3f7 | diff --git a/tests/add_trailing_comma_test.py b/tests/add_trailing_comma_test.py
index 12b7326..5229737 100644
--- a/tests/add_trailing_comma_test.py
+++ b/tests/add_trailing_comma_test.py
@@ -114,6 +114,16 @@ def test_py35_plus_rewrite():
' 1,\n'
')',
),
+ # Regression test for #22
+ (
+ 'x({}).y(\n'
+ ' x\n'
+ ')',
+
+ 'x({}).y(\n'
+ ' x,\n'
+ ')',
+ ),
),
)
def test_fixes_calls(src, expected):
| Regression f({}).y(...) is not adding commas
Seems I broke this in 0.4.3 because now the first function is being considered:
```python
f({}).y(
x
)
```
should add a comma on the `x` line. | 0.0 | 47aa870cde65d699237d345df17bfb1ca03bd3f7 | [
"tests/add_trailing_comma_test.py::test_fixes_calls[x({}).y(\\n"
]
| [
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x(1)]",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[tuple(\\n",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x(\\n",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x((\\n",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[(\\n",
"tests/add_trailing_comma_test.py::test_py35_plus_rewrite",
"tests/add_trailing_comma_test.py::test_fixes_calls[x(\\n",
"tests/add_trailing_comma_test.py::test_fixes_calls[foo()(\\n",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[(1,",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[[1,",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[{1,",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[{1:",
"tests/add_trailing_comma_test.py::test_noop_tuple_literal_without_braces",
"tests/add_trailing_comma_test.py::test_noop_function_defs[def",
"tests/add_trailing_comma_test.py::test_fixes_defs[def",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f(x,",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f(\\n",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f((\\n",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f([\\n",
"tests/add_trailing_comma_test.py::test_noop_unhugs[textwrap.dedent(\"\"\"\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[f(\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[f(a,\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[def",
"tests/add_trailing_comma_test.py::test_fix_unhugs[with",
"tests/add_trailing_comma_test.py::test_fix_unhugs[if",
"tests/add_trailing_comma_test.py::test_fix_unhugs[{'foo':",
"tests/add_trailing_comma_test.py::test_fix_unhugs[f(g(\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[{\"foo\":",
"tests/add_trailing_comma_test.py::test_fix_unhugs[x(\"foo\",",
"tests/add_trailing_comma_test.py::test_fix_unhugs[x(\"foo\"\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs_py3_only[def",
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[[]]",
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[x",
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[y",
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[foo.\\\\\\n",
"tests/add_trailing_comma_test.py::test_fix_trailing_brace[x",
"tests/add_trailing_comma_test.py::test_main_trivial",
"tests/add_trailing_comma_test.py::test_main_noop",
"tests/add_trailing_comma_test.py::test_main_changes_a_file",
"tests/add_trailing_comma_test.py::test_main_syntax_error",
"tests/add_trailing_comma_test.py::test_main_non_utf8_bytes",
"tests/add_trailing_comma_test.py::test_main_py35_plus_argument_star_args",
"tests/add_trailing_comma_test.py::test_main_py35_plus_argument_star_star_kwargs"
]
| {
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
} | 2017-07-17 17:47:46+00:00 | mit | 1,108 |
|
asottile__add-trailing-comma-28 | diff --git a/add_trailing_comma.py b/add_trailing_comma.py
index 926f9de..2e71af5 100644
--- a/add_trailing_comma.py
+++ b/add_trailing_comma.py
@@ -325,6 +325,9 @@ def _fix_src(contents_text, py35_plus, py36_plus):
tokens = src_to_tokens(contents_text)
for i, token in _changing_list(tokens):
+ # DEDENT is a zero length token
+ if not token.src:
+ continue
key = Offset(token.line, token.utf8_byte_offset)
fixes = []
| asottile/add-trailing-comma | 3343fe9ba1b396342d27a73fafa88807b47fc254 | diff --git a/tests/add_trailing_comma_test.py b/tests/add_trailing_comma_test.py
index 82e51eb..a370743 100644
--- a/tests/add_trailing_comma_test.py
+++ b/tests/add_trailing_comma_test.py
@@ -149,9 +149,13 @@ def test_fixes_calls(src, expected):
'[1, 2, 3, 4]',
'{1, 2, 3, 4}',
'{1: 2, 3: 4}',
+ # Regression test for #26
+ 'if True:\n'
+ ' pass\n'
+ '[x] = {y}',
),
)
-def test_noop_one_line_literals(src):
+def test_noop_literals(src):
assert _fix_src(src, py35_plus=False, py36_plus=False) == src
| AssertionError on valid syntax
```python
with a:
pass
[b] = {1}
```
produces this error with add-trailing-comma version 0.5.1:
```
Traceback (most recent call last):
File "/nail/home/ckuehl/.pre-commit/repo3dPrz7/py_env-python2.7/bin/add-trailing-comma", line 9, in <module>
load_entry_point('add-trailing-comma==0.5.1', 'console_scripts', 'add-trailing-comma')()
File "/nail/home/ckuehl/.pre-commit/repo3dPrz7/py_env-python2.7/local/lib/python2.7/site-packages/add_trailing_comma.py", line 378, in main
ret |= fix_file(filename, args)
File "/nail/home/ckuehl/.pre-commit/repo3dPrz7/py_env-python2.7/local/lib/python2.7/site-packages/add_trailing_comma.py", line 358, in fix_file
contents_text = _fix_src(contents_text, args.py35_plus)
File "/nail/home/ckuehl/.pre-commit/repo3dPrz7/py_env-python2.7/local/lib/python2.7/site-packages/add_trailing_comma.py", line 333, in _fix_src
fixes.append((True, _find_simple(i, tokens)))
File "/nail/home/ckuehl/.pre-commit/repo3dPrz7/py_env-python2.7/local/lib/python2.7/site-packages/add_trailing_comma.py", line 154, in _find_simple
raise AssertionError('Past end?')
AssertionError: Past end
``` | 0.0 | 3343fe9ba1b396342d27a73fafa88807b47fc254 | [
"tests/add_trailing_comma_test.py::test_noop_literals[if"
]
| [
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x(1)]",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[tuple(\\n",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x(\\n",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x((\\n",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[(\\n",
"tests/add_trailing_comma_test.py::test_py35_plus_rewrite",
"tests/add_trailing_comma_test.py::test_fixes_calls[x(\\n",
"tests/add_trailing_comma_test.py::test_fixes_calls[foo()(\\n",
"tests/add_trailing_comma_test.py::test_fixes_calls[x({}).y(\\n",
"tests/add_trailing_comma_test.py::test_noop_literals[(1,",
"tests/add_trailing_comma_test.py::test_noop_literals[[1,",
"tests/add_trailing_comma_test.py::test_noop_literals[{1,",
"tests/add_trailing_comma_test.py::test_noop_literals[{1:",
"tests/add_trailing_comma_test.py::test_noop_tuple_literal_without_braces",
"tests/add_trailing_comma_test.py::test_noop_function_defs[def",
"tests/add_trailing_comma_test.py::test_fixes_defs[def",
"tests/add_trailing_comma_test.py::test_fixes_defs_py36_plus[def",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f(x,",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f(\\n",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f((\\n",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f([\\n",
"tests/add_trailing_comma_test.py::test_noop_unhugs[textwrap.dedent(\"\"\"\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[f(\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[f(a,\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[def",
"tests/add_trailing_comma_test.py::test_fix_unhugs[with",
"tests/add_trailing_comma_test.py::test_fix_unhugs[if",
"tests/add_trailing_comma_test.py::test_fix_unhugs[{'foo':",
"tests/add_trailing_comma_test.py::test_fix_unhugs[f(g(\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[{\"foo\":",
"tests/add_trailing_comma_test.py::test_fix_unhugs[x(\"foo\",",
"tests/add_trailing_comma_test.py::test_fix_unhugs[x(\"foo\"\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs_py3_only[def",
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[[]]",
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[x",
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[y",
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[foo.\\\\\\n",
"tests/add_trailing_comma_test.py::test_fix_trailing_brace[x",
"tests/add_trailing_comma_test.py::test_main_trivial",
"tests/add_trailing_comma_test.py::test_main_noop",
"tests/add_trailing_comma_test.py::test_main_changes_a_file",
"tests/add_trailing_comma_test.py::test_main_syntax_error",
"tests/add_trailing_comma_test.py::test_main_non_utf8_bytes",
"tests/add_trailing_comma_test.py::test_main_py35_plus_argument_star_args",
"tests/add_trailing_comma_test.py::test_main_py35_plus_argument_star_star_kwargs",
"tests/add_trailing_comma_test.py::test_main_py36_plus_implies_py35_plus",
"tests/add_trailing_comma_test.py::test_main_py36_plus_function_trailing_commas"
]
| {
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
} | 2017-07-18 18:45:04+00:00 | mit | 1,109 |
|
asottile__add-trailing-comma-34 | diff --git a/add_trailing_comma.py b/add_trailing_comma.py
index 006dfe8..846894c 100644
--- a/add_trailing_comma.py
+++ b/add_trailing_comma.py
@@ -341,11 +341,11 @@ def _fix_src(contents_text, py35_plus, py36_plus):
add_comma = not func.star_args or py36_plus
# functions can be treated as calls
fixes.append((add_comma, _find_call(func, i, tokens)))
- # Handle parenthesized things
- elif token.src == '(':
- fixes.append((False, _find_simple(i, tokens)))
elif key in visitor.literals:
fixes.append((True, _find_simple(i, tokens)))
+ # Handle parenthesized things, unhug of tuples, and comprehensions
+ elif token.src in START_BRACES:
+ fixes.append((False, _find_simple(i, tokens)))
for add_comma, fix_data in fixes:
if fix_data is not None:
| asottile/add-trailing-comma | bd9cb3fdaecb61b016162d2071de2cf0ef631eaa | diff --git a/tests/add_trailing_comma_test.py b/tests/add_trailing_comma_test.py
index a663f99..ff4a4a8 100644
--- a/tests/add_trailing_comma_test.py
+++ b/tests/add_trailing_comma_test.py
@@ -577,6 +577,23 @@ def test_noop_unhugs(src):
' ], None,\n'
')',
),
+ # Regression test for #32
+ (
+ '[a()\n'
+ ' for b in c\n'
+ ' if (\n'
+ ' d\n'
+ ' )\n'
+ ']',
+
+ '[\n'
+ ' a()\n'
+ ' for b in c\n'
+ ' if (\n'
+ ' d\n'
+ ' )\n'
+ ']',
+ ),
),
)
def test_fix_unhugs(src, expected):
| autopep8 and add-trailing-comma fight over indentation
Here's an (admittedly poorly-formatted) reproduction. FWIW I prefer add-trailing-comma's approach, but curious to hear your thoughts. This is obviously weird indentation but ideally they wouldn't fight. Do we need to make autopep8 smarter?
## what add-trailing-comma wants
```python
[a()
for b in c
if (
d
)
]
```
## what autopep8 wants
```python
[a()
for b in c
if (
d
)
]
``` | 0.0 | bd9cb3fdaecb61b016162d2071de2cf0ef631eaa | [
"tests/add_trailing_comma_test.py::test_fix_unhugs[[a()\\n"
]
| [
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x(1)]",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[tuple(\\n",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x(\\n",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x((\\n",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[(\\n",
"tests/add_trailing_comma_test.py::test_py35_plus_rewrite",
"tests/add_trailing_comma_test.py::test_fixes_calls[x(\\n",
"tests/add_trailing_comma_test.py::test_fixes_calls[foo()(\\n",
"tests/add_trailing_comma_test.py::test_fixes_calls[x({}).y(\\n",
"tests/add_trailing_comma_test.py::test_noop_literals[(1,",
"tests/add_trailing_comma_test.py::test_noop_literals[[1,",
"tests/add_trailing_comma_test.py::test_noop_literals[{1,",
"tests/add_trailing_comma_test.py::test_noop_literals[{1:",
"tests/add_trailing_comma_test.py::test_noop_literals[if",
"tests/add_trailing_comma_test.py::test_noop_tuple_literal_without_braces",
"tests/add_trailing_comma_test.py::test_noop_function_defs[def",
"tests/add_trailing_comma_test.py::test_fixes_defs[def",
"tests/add_trailing_comma_test.py::test_fixes_defs_py36_plus[def",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f(x,",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f(\\n",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f((\\n",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f([\\n",
"tests/add_trailing_comma_test.py::test_noop_unhugs[textwrap.dedent(\"\"\"\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[f(\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[f(a,\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[def",
"tests/add_trailing_comma_test.py::test_fix_unhugs[with",
"tests/add_trailing_comma_test.py::test_fix_unhugs[if",
"tests/add_trailing_comma_test.py::test_fix_unhugs[{'foo':",
"tests/add_trailing_comma_test.py::test_fix_unhugs[f(g(\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[{\"foo\":",
"tests/add_trailing_comma_test.py::test_fix_unhugs[x(\"foo\",",
"tests/add_trailing_comma_test.py::test_fix_unhugs[x(\"foo\"\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs_py3_only[def",
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[[]]",
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[x",
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[y",
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[foo.\\\\\\n",
"tests/add_trailing_comma_test.py::test_fix_trailing_brace[x",
"tests/add_trailing_comma_test.py::test_main_trivial",
"tests/add_trailing_comma_test.py::test_main_noop",
"tests/add_trailing_comma_test.py::test_main_changes_a_file",
"tests/add_trailing_comma_test.py::test_main_syntax_error",
"tests/add_trailing_comma_test.py::test_main_non_utf8_bytes",
"tests/add_trailing_comma_test.py::test_main_py35_plus_argument_star_args",
"tests/add_trailing_comma_test.py::test_main_py35_plus_argument_star_star_kwargs",
"tests/add_trailing_comma_test.py::test_main_py36_plus_implies_py35_plus",
"tests/add_trailing_comma_test.py::test_main_py36_plus_function_trailing_commas"
]
| {
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
} | 2017-07-26 10:20:33+00:00 | mit | 1,110 |
|
asottile__add-trailing-comma-71 | diff --git a/add_trailing_comma.py b/add_trailing_comma.py
index 9eaa33f..34e1012 100644
--- a/add_trailing_comma.py
+++ b/add_trailing_comma.py
@@ -271,7 +271,7 @@ def _fix_brace(fix_data, add_comma, tokens):
# Don't unhug single element things with a multi-line component
# inside.
not fix_data.multi_arg and
- hug_open and
+ tokens[first_brace + 1].src in START_BRACES and
tokens[last_brace - 1].src in END_BRACES or
# Don't unhug when containing a single token (such as a triple
# quoted string).
| asottile/add-trailing-comma | 36792566ac51d63a744c23a26d4d27b1ae7bc8a6 | diff --git a/tests/add_trailing_comma_test.py b/tests/add_trailing_comma_test.py
index fa4593d..9fde7b4 100644
--- a/tests/add_trailing_comma_test.py
+++ b/tests/add_trailing_comma_test.py
@@ -594,6 +594,17 @@ def test_noop_unhugs(src):
' )\n'
']',
),
+ pytest.param(
+ 'x = [x\n'
+ ' for x in y()]\n',
+
+ 'x = [\n'
+ ' x\n'
+ ' for x in y()\n'
+ ']\n',
+
+ id='#42: listcomp unhug ends in brace',
+ ),
),
)
def test_fix_unhugs(src, expected):
| False negative with statement ending with parens
```python
if (foo and
bar()):
pass
```
I expect to be rewritten to
```python
if (
foo and
bar()
):
pass
```
But I imagine the same code that prevents this from being rewritten is firing:
```python
foo('bar {}'.format(
'baz',
))
```
which I don't think should be rewritten.
Need to come up with a more clever heuristic here. | 0.0 | 36792566ac51d63a744c23a26d4d27b1ae7bc8a6 | [
"tests/add_trailing_comma_test.py::test_fix_unhugs[#42:"
]
| [
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x(1)]",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[tuple(\\n",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x(\\n",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x((\\n",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[(\\n",
"tests/add_trailing_comma_test.py::test_py35_plus_rewrite",
"tests/add_trailing_comma_test.py::test_fixes_calls[x(\\n",
"tests/add_trailing_comma_test.py::test_fixes_calls[foo()(\\n",
"tests/add_trailing_comma_test.py::test_fixes_calls[x({}).y(\\n",
"tests/add_trailing_comma_test.py::test_fixes_calls[(\\n",
"tests/add_trailing_comma_test.py::test_noop_literals[(1,",
"tests/add_trailing_comma_test.py::test_noop_literals[[1,",
"tests/add_trailing_comma_test.py::test_noop_literals[{1,",
"tests/add_trailing_comma_test.py::test_noop_literals[{1:",
"tests/add_trailing_comma_test.py::test_noop_literals[if",
"tests/add_trailing_comma_test.py::test_fixes_literals[x",
"tests/add_trailing_comma_test.py::test_fixes_py35_plus_literals[x",
"tests/add_trailing_comma_test.py::test_noop_tuple_literal_without_braces",
"tests/add_trailing_comma_test.py::test_noop_function_defs[def",
"tests/add_trailing_comma_test.py::test_fixes_defs[def",
"tests/add_trailing_comma_test.py::test_fixes_defs_py36_plus[def",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f(x,",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f(\\n",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f((\\n",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f([\\n",
"tests/add_trailing_comma_test.py::test_noop_unhugs[textwrap.dedent(\"\"\"\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[f(\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[f(a,\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[def",
"tests/add_trailing_comma_test.py::test_fix_unhugs[with",
"tests/add_trailing_comma_test.py::test_fix_unhugs[if",
"tests/add_trailing_comma_test.py::test_fix_unhugs[{'foo':",
"tests/add_trailing_comma_test.py::test_fix_unhugs[f(g(\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[{\"foo\":",
"tests/add_trailing_comma_test.py::test_fix_unhugs[x",
"tests/add_trailing_comma_test.py::test_fix_unhugs[x(\"foo\",",
"tests/add_trailing_comma_test.py::test_fix_unhugs[x(\"foo\"\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[[a()\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs_py3_only[def",
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[[]]",
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[x",
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[y",
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[foo.\\\\\\n",
"tests/add_trailing_comma_test.py::test_fix_trailing_brace[x",
"tests/add_trailing_comma_test.py::test_fix_from_import_noop[from",
"tests/add_trailing_comma_test.py::test_fix_from_import[from",
"tests/add_trailing_comma_test.py::test_fix_from_import[if",
"tests/add_trailing_comma_test.py::test_fix_classes_noop[class",
"tests/add_trailing_comma_test.py::test_fix_classes[class",
"tests/add_trailing_comma_test.py::test_fix_classes_py3_only_syntax[bases",
"tests/add_trailing_comma_test.py::test_fix_classes_py3_only_syntax[kws",
"tests/add_trailing_comma_test.py::test_fix_classes_py3_only_syntax[class",
"tests/add_trailing_comma_test.py::test_main_trivial",
"tests/add_trailing_comma_test.py::test_main_noop",
"tests/add_trailing_comma_test.py::test_main_changes_a_file",
"tests/add_trailing_comma_test.py::test_main_preserves_line_endings",
"tests/add_trailing_comma_test.py::test_main_syntax_error",
"tests/add_trailing_comma_test.py::test_main_non_utf8_bytes",
"tests/add_trailing_comma_test.py::test_main_py35_plus_argument_star_args",
"tests/add_trailing_comma_test.py::test_main_py35_plus_argument_star_star_kwargs",
"tests/add_trailing_comma_test.py::test_main_py36_plus_implies_py35_plus",
"tests/add_trailing_comma_test.py::test_main_py36_plus_function_trailing_commas"
]
| {
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
} | 2019-03-22 03:04:14+00:00 | mit | 1,111 |
|
asottile__add-trailing-comma-8 | diff --git a/add_trailing_comma.py b/add_trailing_comma.py
index 49e98c7..bd16709 100644
--- a/add_trailing_comma.py
+++ b/add_trailing_comma.py
@@ -15,9 +15,10 @@ from tokenize_rt import UNIMPORTANT_WS
Offset = collections.namedtuple('Offset', ('line', 'utf8_byte_offset'))
Call = collections.namedtuple('Call', ('node', 'star_args', 'arg_offsets'))
-Func = collections.namedtuple('Func', ('node', 'arg_offsets'))
+Func = collections.namedtuple('Func', ('node', 'star_args', 'arg_offsets'))
Literal = collections.namedtuple('Literal', ('node', 'braces', 'backtrack'))
Literal.__new__.__defaults__ = (False,)
+Fix = collections.namedtuple('Fix', ('braces', 'initial_indent'))
NEWLINES = frozenset(('NEWLINE', 'NL'))
NON_CODING_TOKENS = frozenset(('COMMENT', 'NL', UNIMPORTANT_WS))
@@ -141,28 +142,39 @@ class FindNodes(ast.NodeVisitor):
self.generic_visit(node)
def visit_FunctionDef(self, node):
- has_starargs = (
- node.args.vararg or node.args.kwarg or
- # python 3 only
- getattr(node.args, 'kwonlyargs', None)
- )
+ has_starargs = False
+ args = list(node.args.args)
+
+ if node.args.vararg:
+ if isinstance(node.args.vararg, ast.AST): # pragma: no cover (py3)
+ args.append(node.args.vararg)
+ has_starargs = True
+ if node.args.kwarg:
+ if isinstance(node.args.kwarg, ast.AST): # pragma: no cover (py3)
+ args.append(node.args.kwarg)
+ has_starargs = True
+ py3_kwonlyargs = getattr(node.args, 'kwonlyargs', None)
+ if py3_kwonlyargs: # pragma: no cover (py3)
+ args.extend(py3_kwonlyargs)
+ has_starargs = True
+
orig = node.lineno
is_multiline = False
offsets = set()
- for argnode in node.args.args:
+ for argnode in args:
offset = _to_offset(argnode)
if offset.line > orig:
is_multiline = True
offsets.add(offset)
- if is_multiline and not has_starargs:
+ if is_multiline:
key = Offset(node.lineno, node.col_offset)
- self.funcs[key] = Func(node, offsets)
+ self.funcs[key] = Func(node, has_starargs, offsets)
self.generic_visit(node)
-def _fix_inner(brace_start, brace_end, first_brace, tokens):
+def _find_simple(brace_start, brace_end, first_brace, tokens):
brace_stack = [first_brace]
for i in range(first_brace + 1, len(tokens)):
@@ -183,12 +195,6 @@ def _fix_inner(brace_start, brace_end, first_brace, tokens):
if tokens[first_brace].line == tokens[last_brace].line:
return
- # Figure out if either of the braces are "hugging"
- hug_open = tokens[first_brace + 1].name not in NON_CODING_TOKENS
- hug_close = tokens[last_brace - 1].name not in NON_CODING_TOKENS
- if hug_open and tokens[last_brace - 1].src in END_BRACES:
- hug_open = hug_close = False
-
# determine the initial indentation
i = first_brace
while i >= 0 and tokens[i].name not in NEWLINES:
@@ -199,51 +205,10 @@ def _fix_inner(brace_start, brace_end, first_brace, tokens):
else:
initial_indent = 0
- # fix open hugging
- if hug_open:
- new_indent = initial_indent + 4
-
- tokens[first_brace + 1:first_brace + 1] = [
- Token('NL', '\n'), Token(UNIMPORTANT_WS, ' ' * new_indent),
- ]
- last_brace += 2
-
- # Adust indentation for the rest of the things
- min_indent = None
- indents = []
- for i in range(first_brace + 3, last_brace):
- if tokens[i - 1].name == 'NL' and tokens[i].name == UNIMPORTANT_WS:
- if min_indent is None:
- min_indent = len(tokens[i].src)
- elif len(tokens[i].src) < min_indent:
- min_indent = len(tokens[i].src)
-
- indents.append(i)
+ return Fix(braces=(first_brace, last_brace), initial_indent=initial_indent)
- for i in indents:
- oldlen = len(tokens[i].src)
- newlen = oldlen - min_indent + new_indent
- tokens[i] = tokens[i]._replace(src=' ' * newlen)
- # fix close hugging
- if hug_close:
- tokens[last_brace:last_brace] = [
- Token('NL', '\n'),
- Token(UNIMPORTANT_WS, ' ' * initial_indent),
- ]
- last_brace += 2
-
- # From there, we can walk backwards and decide whether a comma is needed
- i = last_brace - 1
- while tokens[i].name in NON_CODING_TOKENS:
- i -= 1
-
- # If we're not a hugging paren, we can insert a comma
- if tokens[i].src != ',' and i + 1 != last_brace:
- tokens.insert(i + 1, Token('OP', ','))
-
-
-def _fix_call(call, i, tokens):
+def _find_call(call, i, tokens):
# When we get a `call` object, the ast refers to it as this:
#
# func_name(arg, arg, arg)
@@ -273,10 +238,10 @@ def _fix_call(call, i, tokens):
else:
raise AssertionError('Past end?')
- _fix_inner(brace_start, brace_end, first_brace, tokens)
+ return _find_simple(brace_start, brace_end, first_brace, tokens)
-def _fix_literal(literal, i, tokens):
+def _find_literal(literal, i, tokens):
brace_start, brace_end = literal.braces
# tuples are evil, we need to backtrack to find the opening paren
@@ -289,7 +254,60 @@ def _fix_literal(literal, i, tokens):
if tokens[i].src != brace_start:
return
- _fix_inner(brace_start, brace_end, i, tokens)
+ return _find_simple(brace_start, brace_end, i, tokens)
+
+
+def _fix_comma_and_unhug(fix_data, add_comma, tokens):
+ first_brace, last_brace = fix_data.braces
+
+ # Figure out if either of the braces are "hugging"
+ hug_open = tokens[first_brace + 1].name not in NON_CODING_TOKENS
+ hug_close = tokens[last_brace - 1].name not in NON_CODING_TOKENS
+ if hug_open and tokens[last_brace - 1].src in END_BRACES:
+ hug_open = hug_close = False
+
+ # fix open hugging
+ if hug_open:
+ new_indent = fix_data.initial_indent + 4
+
+ tokens[first_brace + 1:first_brace + 1] = [
+ Token('NL', '\n'), Token(UNIMPORTANT_WS, ' ' * new_indent),
+ ]
+ last_brace += 2
+
+ # Adust indentation for the rest of the things
+ min_indent = None
+ indents = []
+ for i in range(first_brace + 3, last_brace):
+ if tokens[i - 1].name == 'NL' and tokens[i].name == UNIMPORTANT_WS:
+ if min_indent is None:
+ min_indent = len(tokens[i].src)
+ elif len(tokens[i].src) < min_indent:
+ min_indent = len(tokens[i].src)
+
+ indents.append(i)
+
+ for i in indents:
+ oldlen = len(tokens[i].src)
+ newlen = oldlen - min_indent + new_indent
+ tokens[i] = tokens[i]._replace(src=' ' * newlen)
+
+ # fix close hugging
+ if hug_close:
+ tokens[last_brace:last_brace] = [
+ Token('NL', '\n'),
+ Token(UNIMPORTANT_WS, ' ' * fix_data.initial_indent),
+ ]
+ last_brace += 2
+
+ # From there, we can walk backwards and decide whether a comma is needed
+ i = last_brace - 1
+ while tokens[i].name in NON_CODING_TOKENS:
+ i -= 1
+
+ # If we're not a hugging paren, we can insert a comma
+ if add_comma and tokens[i].src != ',' and i + 1 != last_brace:
+ tokens.insert(i + 1, Token('OP', ','))
def _fix_src(contents_text, py35_plus):
@@ -305,16 +323,25 @@ def _fix_src(contents_text, py35_plus):
tokens = src_to_tokens(contents_text)
for i, token in reversed(tuple(enumerate(tokens))):
key = Offset(token.line, token.utf8_byte_offset)
+ add_comma = True
+ fix_data = None
+
if key in visitor.calls:
call = visitor.calls[key]
# Only fix stararg calls if asked to
- if not call.star_args or py35_plus:
- _fix_call(call, i, tokens)
- elif key in visitor.literals:
- _fix_literal(visitor.literals[key], i, tokens)
+ add_comma = not call.star_args or py35_plus
+ fix_data = _find_call(call, i, tokens)
elif key in visitor.funcs:
+ func = visitor.funcs[key]
+ # any amount of starargs excludes adding a comma for defs
+ add_comma = not func.star_args
# functions can be treated as calls
- _fix_call(visitor.funcs[key], i, tokens)
+ fix_data = _find_call(func, i, tokens)
+ elif key in visitor.literals:
+ fix_data = _find_literal(visitor.literals[key], i, tokens)
+
+ if fix_data is not None:
+ _fix_comma_and_unhug(fix_data, add_comma, tokens)
return tokens_to_src(tokens)
| asottile/add-trailing-comma | 8d87f678b13ac1497b688173e94d21d8371746dc | diff --git a/tests/add_trailing_comma_test.py b/tests/add_trailing_comma_test.py
index 810b741..c016b12 100644
--- a/tests/add_trailing_comma_test.py
+++ b/tests/add_trailing_comma_test.py
@@ -11,6 +11,7 @@ from add_trailing_comma import _fix_src
from add_trailing_comma import main
+xfailif_py2 = pytest.mark.xfail(sys.version_info < (3,), reason='py3+')
xfailif_lt_py35 = pytest.mark.xfail(sys.version_info < (3, 5), reason='py35+')
@@ -264,7 +265,7 @@ def test_noop_tuple_literal_without_braces():
# *args forbid trailing commas
'def f(\n'
' *args\n'
- '): pass'
+ '): pass',
# **kwargs forbid trailing commas
'def f(\n'
' **kwargs\n'
@@ -415,12 +416,56 @@ def test_noop_unhugs(src):
' 1,\n'
')',
),
+ (
+ 'f(\n'
+ ' *args)',
+
+ 'f(\n'
+ ' *args\n'
+ ')',
+ ),
),
)
def test_fix_unhugs(src, expected):
assert _fix_src(src, py35_plus=False) == expected
+@xfailif_py2
[email protected](
+ ('src', 'expected'),
+ (
+ # python 2 doesn't give offset information for starargs
+ (
+ 'def f(\n'
+ ' *args): pass',
+
+ 'def f(\n'
+ ' *args\n'
+ '): pass',
+ ),
+ (
+ 'def f(\n'
+ ' **kwargs): pass',
+
+ 'def f(\n'
+ ' **kwargs\n'
+ '): pass',
+ ),
+ # python 2 doesn't kwonlyargs
+ (
+ 'def f(\n'
+ ' *, kw=1, kw2=2): pass',
+
+ 'def f(\n'
+ ' *, kw=1, kw2=2\n'
+ '): pass',
+ ),
+ ),
+)
+def test_fix_unhugs_py3_only(src, expected):
+ assert _fix_src(src, py35_plus=False) == expected
+
+
def test_main_trivial():
assert main(()) == 0
| microbug: `def f(*args):` and `f(*args)` are not unhugged
### minimal example
```python
def f(
*args): pass
f(
*args)
```
### expected
```python
def f(
*args
): pass
f(
*args
)
```
### actual
no change
### explain
these two types are pruned during the trailing comma determination, but should still apply unhugging | 0.0 | 8d87f678b13ac1497b688173e94d21d8371746dc | [
"tests/add_trailing_comma_test.py::test_fix_unhugs[f(\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs_py3_only[def"
]
| [
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x(1)]",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[tuple(\\n",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x(\\n",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x((\\n",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[(\\n",
"tests/add_trailing_comma_test.py::test_py35_plus_rewrite",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[(1,",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[[1,",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[{1,",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[{1:",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[y(*args1,",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[y(**kwargs1,",
"tests/add_trailing_comma_test.py::test_fixes_calls[x(\\n",
"tests/add_trailing_comma_test.py::test_fixes_calls[foo()(\\n",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[(1,",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[[1,",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[{1,",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[{1:",
"tests/add_trailing_comma_test.py::test_noop_tuple_literal_without_braces",
"tests/add_trailing_comma_test.py::test_noop_function_defs[def",
"tests/add_trailing_comma_test.py::test_fixes_defs[def",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f(x,",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f(\\n",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f((\\n",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f([\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[f(a,\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[with",
"tests/add_trailing_comma_test.py::test_fix_unhugs[if",
"tests/add_trailing_comma_test.py::test_fix_unhugs[{'foo':",
"tests/add_trailing_comma_test.py::test_fix_unhugs[f(g(\\n",
"tests/add_trailing_comma_test.py::test_main_trivial",
"tests/add_trailing_comma_test.py::test_main_noop",
"tests/add_trailing_comma_test.py::test_main_changes_a_file",
"tests/add_trailing_comma_test.py::test_main_syntax_error",
"tests/add_trailing_comma_test.py::test_main_non_utf8_bytes",
"tests/add_trailing_comma_test.py::test_main_py35_plus_argument_star_args",
"tests/add_trailing_comma_test.py::test_main_py35_plus_argument_star_star_kwargs"
]
| {
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2017-07-14 02:56:11+00:00 | mit | 1,112 |
|
asottile__all-repos-138 | diff --git a/all_repos/github_api.py b/all_repos/github_api.py
index b55f91c..2ac931d 100644
--- a/all_repos/github_api.py
+++ b/all_repos/github_api.py
@@ -44,12 +44,19 @@ def get_all(url: str, **kwargs: Any) -> List[Dict[str, Any]]:
return ret
+def _strip_trailing_dot_git(ssh_url: str) -> str:
+ if ssh_url.endswith('.git'):
+ return ssh_url[:-1 * len('.git')]
+ else:
+ return ssh_url
+
+
def filter_repos(
repos: List[Dict[str, Any]], *,
forks: bool, private: bool, collaborator: bool, archived: bool,
) -> Dict[str, str]:
return {
- repo['full_name']: '[email protected]:{}'.format(repo['full_name'])
+ repo['full_name']: _strip_trailing_dot_git(repo['ssh_url'])
for repo in repos
if (
(forks or not repo['fork']) and
| asottile/all-repos | f051a2f764277facc446cd36ac0248103efa0e14 | diff --git a/tests/github_api_test.py b/tests/github_api_test.py
index 4eff101..a87d06e 100644
--- a/tests/github_api_test.py
+++ b/tests/github_api_test.py
@@ -1,5 +1,6 @@
import pytest
+from all_repos.github_api import _strip_trailing_dot_git
from all_repos.github_api import better_repr
from all_repos.github_api import get_all
from testing.mock_http import FakeResponse
@@ -52,3 +53,25 @@ def test_get_all(mock_urlopen):
ret = get_all('https://example.com/api')
assert ret == ['page1_1', 'page1_2', 'page2_1', 'page2_2', 'page3_1']
+
+
[email protected](
+ ('val', 'expected'),
+ (
+ ('', ''),
+ (
+ '[email protected]:sass/libsass-python',
+ '[email protected]:sass/libsass-python',
+ ),
+ (
+ '[email protected]:sass/libsass-python.git',
+ '[email protected]:sass/libsass-python',
+ ),
+ (
+ '[email protected]:.git/example',
+ '[email protected]:.git/example',
+ ),
+ ),
+)
+def test_strip_trailing_dot_git(val, expected):
+ assert _strip_trailing_dot_git(val) == expected
| Cloning does not work for GitHub Enterprise installations with custom domain
Looks like `github.com` is hardcoded in the repo url:
https://github.com/asottile/all-repos/blob/651ab491d1c3a799f677c107c2edde561364c013/all_repos/github_api.py#L52
I could get the command to work by changing this to my custom domain name.
Am I doing something wrong? We should add an option to allow setting this? | 0.0 | f051a2f764277facc446cd36ac0248103efa0e14 | [
"tests/github_api_test.py::test_better_repr[val0-expected0]",
"tests/github_api_test.py::test_better_repr[val1-expected1]",
"tests/github_api_test.py::test_better_repr[val2-expected2]",
"tests/github_api_test.py::test_better_repr[val3-expected3]",
"tests/github_api_test.py::test_better_repr[val4-expected4]",
"tests/github_api_test.py::test_get_all",
"tests/github_api_test.py::test_strip_trailing_dot_git[-]",
"tests/github_api_test.py::test_strip_trailing_dot_git[[email protected]:sass/[email protected]:sass/libsass-python]",
"tests/github_api_test.py::test_strip_trailing_dot_git[[email protected]:sass/[email protected]:sass/libsass-python]",
"tests/github_api_test.py::test_strip_trailing_dot_git[[email protected]:.git/[email protected]:.git/example]"
]
| []
| {
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
} | 2020-10-02 08:28:40+00:00 | mit | 1,113 |
|
asottile__all-repos-25 | diff --git a/all_repos/find_files.py b/all_repos/find_files.py
index c6826b8..e230264 100644
--- a/all_repos/find_files.py
+++ b/all_repos/find_files.py
@@ -7,6 +7,7 @@ import sys
from all_repos import cli
from all_repos import color
from all_repos.config import load_config
+from all_repos.util import zsplit
def ls_files(config, repo):
@@ -15,7 +16,7 @@ def ls_files(config, repo):
('git', '-C', path, 'ls-files', '-z'),
stdout=subprocess.PIPE, check=True,
)
- return path, ret.stdout.rstrip(b'\0').split(b'\0')
+ return path, zsplit(ret.stdout)
def find_files(config, regex):
diff --git a/all_repos/sed.py b/all_repos/sed.py
new file mode 100644
index 0000000..b6b685d
--- /dev/null
+++ b/all_repos/sed.py
@@ -0,0 +1,69 @@
+import argparse
+import functools
+import os.path
+import shlex
+import subprocess
+
+from all_repos import autofix_lib
+from all_repos import cli
+from all_repos.util import zsplit
+
+
+def find_repos(config, *, ls_files_cmd):
+ for repo in config.get_cloned_repos():
+ repo_dir = os.path.join(config.output_dir, repo)
+ if subprocess.run(
+ ('git', '-C', repo_dir, *ls_files_cmd[1:]),
+ check=True, stdout=subprocess.PIPE,
+ ).stdout:
+ yield repo_dir
+
+
+def apply_fix(*, ls_files_cmd, sed_cmd):
+ filenames = zsplit(subprocess.check_output(ls_files_cmd))
+ filenames = [f.decode() for f in filenames]
+ autofix_lib.run(*sed_cmd, *filenames)
+
+
+def _quote_cmd(cmd):
+ return ' '.join(shlex.quote(arg) for arg in cmd)
+
+
+def main(argv=None):
+ parser = argparse.ArgumentParser()
+ cli.add_fixer_args(parser)
+ parser.add_argument(
+ '-r', '--regexp-extended',
+ action='store_true',
+ help='use extended regular expressions in the script.',
+ )
+ parser.add_argument('--branch-name', default='all-repos-sed')
+ parser.add_argument('--commit-msg')
+ parser.add_argument('pattern')
+ parser.add_argument('filenames_glob', help='(passed to ls-files)')
+ args = parser.parse_args(argv)
+
+ dash_r = ('-r',) if args.regexp_extended else ()
+ sed_cmd = ('sed', '-i', *dash_r, args.pattern)
+ ls_files_cmd = ('git', 'ls-files', '-z', '--', args.filenames_glob)
+
+ msg = f'{_quote_cmd(ls_files_cmd)} | xargs -0 {_quote_cmd(sed_cmd)}'
+ msg = args.commit_msg or msg
+
+ repos, config, commit, autofix_settings = autofix_lib.from_cli(
+ args,
+ find_repos=functools.partial(find_repos, ls_files_cmd=ls_files_cmd),
+ msg=msg, branch_name=args.branch_name,
+ )
+
+ autofix_lib.fix(
+ repos,
+ apply_fix=functools.partial(
+ apply_fix, ls_files_cmd=ls_files_cmd, sed_cmd=sed_cmd,
+ ),
+ config=config, commit=commit, autofix_settings=autofix_settings,
+ )
+
+
+if __name__ == '__main__':
+ exit(main())
diff --git a/all_repos/util.py b/all_repos/util.py
new file mode 100644
index 0000000..a689acb
--- /dev/null
+++ b/all_repos/util.py
@@ -0,0 +1,5 @@
+def zsplit(bs):
+ if bs:
+ return bs.rstrip(b'\0').split(b'\0')
+ else:
+ return []
diff --git a/setup.py b/setup.py
index 5b79eb7..51c20ae 100644
--- a/setup.py
+++ b/setup.py
@@ -20,6 +20,7 @@ setup(
'all-repos-clone=all_repos.clone:main',
'all-repos-find-files=all_repos.find_files:main',
'all-repos-grep=all_repos.grep:main',
+ 'all-repos-sed=all_repos.sed:main',
],
},
)
| asottile/all-repos | 0c1622739a951e5904debb622386e0840c751164 | diff --git a/tests/sed_test.py b/tests/sed_test.py
new file mode 100644
index 0000000..bee579c
--- /dev/null
+++ b/tests/sed_test.py
@@ -0,0 +1,25 @@
+from all_repos import clone
+from all_repos.sed import main
+from testing.git import write_file_commit
+
+
+def test_main(file_config_files):
+ clone.main(('--config-filename', str(file_config_files.cfg)))
+ assert not main((
+ '--config-filename', str(file_config_files.cfg),
+ 's/HAI/BAI/g', '*',
+ ))
+ assert file_config_files.dir1.join('f').read() == 'OBAI\n'
+ assert file_config_files.dir2.join('f').read() == 'OHELLO\n'
+
+
+def test_main_custom_file_pattern(file_config_files):
+ write_file_commit(file_config_files.dir1, 'g', 'OHAI\n')
+ clone.main(('--config-filename', str(file_config_files.cfg)))
+ assert not main((
+ '--config-filename', str(file_config_files.cfg),
+ 's/AI/IE/g', 'g',
+ ))
+ assert file_config_files.dir1.join('f').read() == 'OHAI\n'
+ assert file_config_files.dir1.join('g').read() == 'OHIE\n'
+ assert file_config_files.dir2.join('f').read() == 'OHELLO\n'
diff --git a/tests/util_test.py b/tests/util_test.py
new file mode 100644
index 0000000..474efb8
--- /dev/null
+++ b/tests/util_test.py
@@ -0,0 +1,15 @@
+import pytest
+
+from all_repos.util import zsplit
+
+
[email protected](
+ ('bs', 'expected'),
+ (
+ (b'', []),
+ (b'\0', [b'']),
+ (b'a\0b\0', [b'a', b'b']),
+ ),
+)
+def test_zsplit(bs, expected):
+ assert zsplit(bs) == expected
| Make an `all-repos-sed`
Sometimes I don't want to write the full gamut of a fixer and such and really just want some nice `sed -i`.
I think the interface would be something like:
```
$ all-repos-sed 's/foo/bar/g' -- baz.f
```
And this tool would make a fixer which does essentially (for each repository):
```
git ls-files -- baz.f | xargs sed -i 's/foo/bar/g'
``` | 0.0 | 0c1622739a951e5904debb622386e0840c751164 | [
"tests/util_test.py::test_zsplit[-expected0]",
"tests/util_test.py::test_zsplit[\\x00-expected1]",
"tests/util_test.py::test_zsplit[a\\x00b\\x00-expected2]"
]
| []
| {
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
} | 2017-10-21 15:48:17+00:00 | mit | 1,114 |
|
asottile__all-repos-29 | diff --git a/README.md b/README.md
index d81aa01..d0fd9e3 100644
--- a/README.md
+++ b/README.md
@@ -184,6 +184,29 @@ Clones all repositories available to a user on github.
- `forks` (default `false`): whether to include repositories which are forks.
- `private` (default `false`): whether to include private repositories.
+### `all_repos.source.gitolite`
+
+Clones all repositories available to a user on a
+[gitolite](http://gitolite.com/gitolite/index.html) host.
+
+#### Required `source_settings`
+
+- `username`: the user to SSH to the server as (usually `git`)
+- `hostname`: the hostname of your gitolite server (e.g. `git.mycompany.com`)
+
+The gitolite API is served over SSH. It is assumed that when `all-repos-clone`
+is called, it's possible to make SSH connections with the username and hostname
+configured here in order to query that API.
+
+#### Optional `source_settings`
+
+- `mirror_path` (default `None`): an optional mirror to clone repositories from.
+ This is a Python format string, and can use the variable `repo_name`.
+
+ This can be anything git understands, such as another remote server (e.g.
+ `gitmirror.mycompany.com:{repo_name}`) or a local path (e.g.
+ `/gitolite/git/{repo_name}.git`).
+
#### Directory location
```
diff --git a/all_repos/source/gitolite.py b/all_repos/source/gitolite.py
new file mode 100644
index 0000000..4cb9cc2
--- /dev/null
+++ b/all_repos/source/gitolite.py
@@ -0,0 +1,39 @@
+import collections
+import json
+import subprocess
+from typing import Dict
+from typing import List
+
+
+class Settings(collections.namedtuple(
+ 'Settings', ('username', 'hostname', 'mirror_path'),
+)):
+
+ __slots__ = ()
+
+ def clone_url(self, repo_name):
+ return (
+ self.mirror_path or
+ f'{self.username}@{self.hostname}:{{repo_name}}'
+ ).format(repo_name=repo_name)
+
+
+Settings.__new__.__defaults__ = (None,)
+
+
+def _repo_names_from_source(settings: Settings) -> List[str]:
+ info = subprocess.check_output(
+ ('ssh', f'{settings.username}@{settings.hostname}', 'info', '-json'),
+ )
+ info = json.loads(info.decode('UTF-8'))
+ return set(info['repos'])
+
+
+def list_repos(settings: Settings) -> Dict[str, str]:
+ return {
+ # Repo names have ".git" appended to avoid naming conflicts between
+ # repos and directories in the gitolite hierarchy (a path could
+ # otherwise be both).
+ f'{repo_name}.git': settings.clone_url(repo_name)
+ for repo_name in _repo_names_from_source(settings)
+ }
diff --git a/tox.ini b/tox.ini
index 4b6f9a9..5487f6c 100644
--- a/tox.ini
+++ b/tox.ini
@@ -12,7 +12,7 @@ commands =
pre-commit run --all-files
[testenv:venv]
-envdir = venv-{[tox]project}
+envdir = venv
commands =
[pep8]
| asottile/all-repos | 6835d50a1d65e98a44a21386ce6ec37703ce8f93 | diff --git a/tests/source/gitolite_test.py b/tests/source/gitolite_test.py
new file mode 100644
index 0000000..a76d4a9
--- /dev/null
+++ b/tests/source/gitolite_test.py
@@ -0,0 +1,78 @@
+import json
+import subprocess
+from unittest import mock
+
+import pytest
+
+from all_repos.source import gitolite
+
+
[email protected]
+def settings():
+ return gitolite.Settings(
+ username='git',
+ hostname='git.mycompany.com',
+ )
+
+
+def test_clone_url_default(settings):
+ assert settings.clone_url('some_package') == (
+ '[email protected]:some_package'
+ )
+ assert settings.clone_url('some/nested/package') == (
+ '[email protected]:some/nested/package'
+ )
+
+
+def test_clone_url_custom_mirror_path(settings):
+ settings = settings._replace(
+ mirror_path='/gitolite/git/{repo_name}.git',
+ )
+ assert settings.clone_url('some_package') == (
+ '/gitolite/git/some_package.git'
+ )
+ assert settings.clone_url('some/nested/package') == (
+ '/gitolite/git/some/nested/package.git'
+ )
+
+
[email protected]
+def fake_info_response():
+ response = json.dumps({
+ 'repos': {
+ 'some_rw_repo': {
+ 'perms': {'R': 1, 'W': 1},
+ },
+ 'some_ro_repo': {
+ 'perms': {'R': 1},
+ },
+ },
+ 'gitolite_version': '1.2.3',
+ 'USER': 'git@somehost',
+ 'GL_USER': 'someuser',
+ 'git_version': '1.2.3',
+ }).encode('UTF-8')
+
+ side_effect = {
+ ('ssh', '[email protected]', 'info', '-json'): response,
+ }.__getitem__
+
+ with mock.patch.object(
+ subprocess, 'check_output', side_effect=side_effect,
+ ):
+ yield
+
+
[email protected]('fake_info_response')
+def test_repo_names_from_source(settings):
+ assert gitolite._repo_names_from_source(settings) == {
+ 'some_rw_repo', 'some_ro_repo',
+ }
+
+
[email protected]('fake_info_response')
+def test_list_repos(settings):
+ assert gitolite.list_repos(settings) == {
+ 'some_rw_repo.git': '[email protected]:some_rw_repo',
+ 'some_ro_repo.git': '[email protected]:some_ro_repo',
+ }
| gitolite source
Hi friend :)
I'd like to add a gitolite source and wanted to run the approach by you before sending a PR. A couple questions:
1. For getting the list of repositories, seems like the only way provided via the gitolite API itself is [the `info` command](http://gitolite.com/gitolite/user/#the-info-command). It provides a handy `-json` flag, e.g.:
```
$ ssh [email protected] info -json
{
"repos" : {
"a_repo" : {
"perms" : {
"W" : 1,
"R" : 1
}
},
"some_repo-DEPRECATED" : {
"perms" : {
# "W" key is omitted entirely
"R" : 1
}
}
}
"gitolite_version" : "1.2.3",
"USER" : "git@somehost",
"GL_USER" : "ckuehl",
"git_version" : "1.2.3"
}
```
Potentially we could provide other ways to get this list (e.g. something like y/gits), but I'm thinking if I wanted to do that, I'd just format that into a JSON file and use the JSON file source.
So, does over SSH sound okay? And if so, do you prefer subprocessing out to ssh or using paramiko? (I think I prefer the subprocess approach just to avoid adding a new dep?)
2. Actually I guess I only had one question. This looks pretty straightforward to implement :P | 0.0 | 6835d50a1d65e98a44a21386ce6ec37703ce8f93 | [
"tests/source/gitolite_test.py::test_clone_url_default",
"tests/source/gitolite_test.py::test_clone_url_custom_mirror_path",
"tests/source/gitolite_test.py::test_repo_names_from_source",
"tests/source/gitolite_test.py::test_list_repos"
]
| []
| {
"failed_lite_validators": [
"has_hyperlinks",
"has_added_files",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
} | 2017-10-21 21:50:45+00:00 | mit | 1,115 |
|
asottile__all-repos-322 | diff --git a/all_repos/push/azure_repos_pull_request.py b/all_repos/push/azure_repos_pull_request.py
index 4d1e075..58d71e8 100644
--- a/all_repos/push/azure_repos_pull_request.py
+++ b/all_repos/push/azure_repos_pull_request.py
@@ -19,6 +19,7 @@ class Settings(NamedTuple):
base_url: str = 'https://dev.azure.com'
api_key: str | None = None
api_key_env: str | None = None
+ draft: bool = False
def __repr__(self) -> str:
return hide_api_key_repr(self)
@@ -53,6 +54,7 @@ def make_pull_request(
'description': body.decode().strip(),
'sourceRefName': f'refs/heads/{head}',
'targetRefName': f'refs/heads/{autofix_lib.target_branch()}',
+ 'isDraft': settings.draft,
}).encode()
pull_request_url = (
| asottile/all-repos | 96c25dcfb9be6f2fc21625df1c723282e1cc80a3 | diff --git a/tests/push/azure_repos_pull_request_test.py b/tests/push/azure_repos_pull_request_test.py
index c36c400..b993a97 100644
--- a/tests/push/azure_repos_pull_request_test.py
+++ b/tests/push/azure_repos_pull_request_test.py
@@ -82,5 +82,6 @@ def test_settings_repr():
" base_url='https://dev.azure.com',\n"
' api_key=...,\n'
' api_key_env=None,\n'
+ ' draft=False,\n'
')'
)
| Support creating draft PRs for AzDO
Hey, I successfully configured the tool to run with Azure DevOps, but I noticed that config included in the repo always causes creating standard PRs. In my organization, a standard PR automatically triggers CI, so it creates high queues for CI agents when a lot of PRs are created.
For me, the simplest solution was to create draft PRs instead. AzDO supports `isDraft` parameter in the API call ([docs](https://learn.microsoft.com/en-us/rest/api/azure/devops/git/pull-requests/create?view=azure-devops-rest-6.0)), so I did a minimal change in #322, and it seems to be working fine after adding `"draft": true` in `"push_settings"` in all-repos JSON config.
I thought that it might be useful for somebody out there, hence this issue and PR. Shall we merge it? | 0.0 | 96c25dcfb9be6f2fc21625df1c723282e1cc80a3 | [
"tests/push/azure_repos_pull_request_test.py::test_settings_repr"
]
| []
| {
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
} | 2024-01-24 16:34:16+00:00 | mit | 1,116 |
|
asottile__all-repos-72 | diff --git a/README.md b/README.md
index 821c955..35f0218 100644
--- a/README.md
+++ b/README.md
@@ -362,6 +362,14 @@ branch.
to the upstream repository.
+### `all_repos.push.readonly`
+
+Does nothing.
+
+#### `push_settings`
+
+There are no configurable settings for `readonly`.
+
## Writing your own push module
First create a module. This module must have the following api:
diff --git a/all_repos/push/readonly.py b/all_repos/push/readonly.py
new file mode 100644
index 0000000..d74af6b
--- /dev/null
+++ b/all_repos/push/readonly.py
@@ -0,0 +1,7 @@
+import collections
+
+Settings = collections.namedtuple('Settings', ())
+
+
+def push(settings: Settings, branch_name: str) -> None:
+ return None
| asottile/all-repos | 50174533dc9b9b70ca80ac38f8675e2e3ba400ee | diff --git a/tests/push/readonly_test.py b/tests/push/readonly_test.py
new file mode 100644
index 0000000..eaba410
--- /dev/null
+++ b/tests/push/readonly_test.py
@@ -0,0 +1,6 @@
+from all_repos.push import readonly
+
+
+def test_does_not_fire_missiles():
+ # does nothing, assert it doesn't crash
+ readonly.push(readonly.Settings(), 'branch')
| Allow a "read only" mode where `push_settings` are optional
Basically make the configuration lazily load the various parts for pushing / pulling
This would enable one to make a r/o all-repos
I guess this could just be implemented as a builtin `noop` push and then:
```json
"push": "all_repos.push.noop",
"push_settings": {}
``` | 0.0 | 50174533dc9b9b70ca80ac38f8675e2e3ba400ee | [
"tests/push/readonly_test.py::test_does_not_fire_missiles"
]
| []
| {
"failed_lite_validators": [
"has_added_files"
],
"has_test_patch": true,
"is_lite": false
} | 2019-01-15 22:30:32+00:00 | mit | 1,117 |
|
asottile__all-repos-91 | diff --git a/all_repos/github_api.py b/all_repos/github_api.py
index c35a60c..066b1f2 100644
--- a/all_repos/github_api.py
+++ b/all_repos/github_api.py
@@ -49,7 +49,7 @@ def get_all(url: str, **kwargs: Any) -> List[Dict[str, Any]]:
def filter_repos(
repos: List[Dict[str, Any]], *,
- forks: bool, private: bool, collaborator: bool,
+ forks: bool, private: bool, collaborator: bool, archived: bool,
) -> Dict[str, str]:
return {
repo['full_name']: '[email protected]:{}'.format(repo['full_name'])
@@ -57,7 +57,8 @@ def filter_repos(
if (
(forks or not repo['fork']) and
(private or not repo['private']) and
- (collaborator or repo['permissions']['admin'])
+ (collaborator or repo['permissions']['admin']) and
+ (archived or not repo['archived'])
)
}
diff --git a/all_repos/source/github.py b/all_repos/source/github.py
index 58c5828..d8988fd 100644
--- a/all_repos/source/github.py
+++ b/all_repos/source/github.py
@@ -10,6 +10,7 @@ class Settings(NamedTuple):
collaborator: bool = False
forks: bool = False
private: bool = False
+ archived: bool = False
def list_repos(settings: Settings) -> Dict[str, str]:
@@ -22,4 +23,5 @@ def list_repos(settings: Settings) -> Dict[str, str]:
forks=settings.forks,
private=settings.private,
collaborator=settings.collaborator,
+ archived=settings.archived,
)
diff --git a/all_repos/source/github_org.py b/all_repos/source/github_org.py
index 4c9b32c..d1c8d5c 100644
--- a/all_repos/source/github_org.py
+++ b/all_repos/source/github_org.py
@@ -10,6 +10,7 @@ class Settings(NamedTuple):
collaborator: bool = True
forks: bool = False
private: bool = False
+ archived: bool = False
def list_repos(settings: Settings) -> Dict[str, str]:
@@ -22,4 +23,5 @@ def list_repos(settings: Settings) -> Dict[str, str]:
forks=settings.forks,
private=settings.private,
collaborator=settings.collaborator,
+ archived=settings.archived,
)
| asottile/all-repos | 27aba1dda4e69ae598f8e676006c86c61eab5559 | diff --git a/testing/resources/github/eecs381-p4.json b/testing/resources/github/eecs381-p4.json
index ff74ee2..51be5ff 100644
--- a/testing/resources/github/eecs381-p4.json
+++ b/testing/resources/github/eecs381-p4.json
@@ -1,1 +1,1 @@
-{"issues_url":"https://api.github.com/repos/asottile/eecs381-p4/issues{/number}","deployments_url":"https://api.github.com/repos/asottile/eecs381-p4/deployments","stargazers_count":0,"forks_url":"https://api.github.com/repos/asottile/eecs381-p4/forks","mirror_url":null,"subscription_url":"https://api.github.com/repos/asottile/eecs381-p4/subscription","notifications_url":"https://api.github.com/repos/asottile/eecs381-p4/notifications{?since,all,participating}","collaborators_url":"https://api.github.com/repos/asottile/eecs381-p4/collaborators{/collaborator}","updated_at":"2016-06-13T00:47:54Z","private":true,"pulls_url":"https://api.github.com/repos/asottile/eecs381-p4/pulls{/number}","issue_comment_url":"https://api.github.com/repos/asottile/eecs381-p4/issues/comments{/number}","labels_url":"https://api.github.com/repos/asottile/eecs381-p4/labels{/name}","has_wiki":true,"full_name":"asottile/eecs381-p4","owner":{"following_url":"https://api.github.com/users/asottile/following{/other_user}","events_url":"https://api.github.com/users/asottile/events{/privacy}","organizations_url":"https://api.github.com/users/asottile/orgs","url":"https://api.github.com/users/asottile","gists_url":"https://api.github.com/users/asottile/gists{/gist_id}","html_url":"https://github.com/asottile","subscriptions_url":"https://api.github.com/users/asottile/subscriptions","avatar_url":"https://avatars3.githubusercontent.com/u/1810591?v=4","repos_url":"https://api.github.com/users/asottile/repos","received_events_url":"https://api.github.com/users/asottile/received_events","gravatar_id":"","starred_url":"https://api.github.com/users/asottile/starred{/owner}{/repo}","site_admin":false,"login":"asottile","type":"User","id":1810591,"followers_url":"https://api.github.com/users/asottile/followers"},"statuses_url":"https://api.github.com/repos/asottile/eecs381-p4/statuses/{sha}","id":60996329,"keys_url":"https://api.github.com/repos/asottile/eecs381-p4/keys{/key_id}","issue_events_url":"https://api.github.com/repos/asottile/eecs381-p4/issues/events{/number}","tags_url":"https://api.github.com/repos/asottile/eecs381-p4/tags","downloads_url":"https://api.github.com/repos/asottile/eecs381-p4/downloads","assignees_url":"https://api.github.com/repos/asottile/eecs381-p4/assignees{/user}","contents_url":"https://api.github.com/repos/asottile/eecs381-p4/contents/{+path}","has_pages":false,"git_refs_url":"https://api.github.com/repos/asottile/eecs381-p4/git/refs{/sha}","open_issues_count":0,"has_projects":true,"clone_url":"https://github.com/asottile/eecs381-p4.git","watchers_count":0,"git_tags_url":"https://api.github.com/repos/asottile/eecs381-p4/git/tags{/sha}","milestones_url":"https://api.github.com/repos/asottile/eecs381-p4/milestones{/number}","languages_url":"https://api.github.com/repos/asottile/eecs381-p4/languages","size":379,"homepage":null,"fork":false,"commits_url":"https://api.github.com/repos/asottile/eecs381-p4/commits{/sha}","releases_url":"https://api.github.com/repos/asottile/eecs381-p4/releases{/id}","description":null,"archive_url":"https://api.github.com/repos/asottile/eecs381-p4/{archive_format}{/ref}","comments_url":"https://api.github.com/repos/asottile/eecs381-p4/comments{/number}","events_url":"https://api.github.com/repos/asottile/eecs381-p4/events","contributors_url":"https://api.github.com/repos/asottile/eecs381-p4/contributors","html_url":"https://github.com/asottile/eecs381-p4","forks":0,"compare_url":"https://api.github.com/repos/asottile/eecs381-p4/compare/{base}...{head}","open_issues":0,"git_url":"git://github.com/asottile/eecs381-p4.git","svn_url":"https://github.com/asottile/eecs381-p4","merges_url":"https://api.github.com/repos/asottile/eecs381-p4/merges","has_issues":true,"ssh_url":"[email protected]:asottile/eecs381-p4.git","blobs_url":"https://api.github.com/repos/asottile/eecs381-p4/git/blobs{/sha}","git_commits_url":"https://api.github.com/repos/asottile/eecs381-p4/git/commits{/sha}","hooks_url":"https://api.github.com/repos/asottile/eecs381-p4/hooks","has_downloads":true,"watchers":0,"name":"eecs381-p4","language":"C++","url":"https://api.github.com/repos/asottile/eecs381-p4","created_at":"2016-06-13T00:36:50Z","pushed_at":"2016-06-13T00:47:52Z","forks_count":0,"default_branch":"master","teams_url":"https://api.github.com/repos/asottile/eecs381-p4/teams","trees_url":"https://api.github.com/repos/asottile/eecs381-p4/git/trees{/sha}","branches_url":"https://api.github.com/repos/asottile/eecs381-p4/branches{/branch}","subscribers_url":"https://api.github.com/repos/asottile/eecs381-p4/subscribers","permissions":{"admin":true,"push":true,"pull":true},"stargazers_url":"https://api.github.com/repos/asottile/eecs381-p4/stargazers"}
+{"archived": false, "issues_url":"https://api.github.com/repos/asottile/eecs381-p4/issues{/number}","deployments_url":"https://api.github.com/repos/asottile/eecs381-p4/deployments","stargazers_count":0,"forks_url":"https://api.github.com/repos/asottile/eecs381-p4/forks","mirror_url":null,"subscription_url":"https://api.github.com/repos/asottile/eecs381-p4/subscription","notifications_url":"https://api.github.com/repos/asottile/eecs381-p4/notifications{?since,all,participating}","collaborators_url":"https://api.github.com/repos/asottile/eecs381-p4/collaborators{/collaborator}","updated_at":"2016-06-13T00:47:54Z","private":true,"pulls_url":"https://api.github.com/repos/asottile/eecs381-p4/pulls{/number}","issue_comment_url":"https://api.github.com/repos/asottile/eecs381-p4/issues/comments{/number}","labels_url":"https://api.github.com/repos/asottile/eecs381-p4/labels{/name}","has_wiki":true,"full_name":"asottile/eecs381-p4","owner":{"following_url":"https://api.github.com/users/asottile/following{/other_user}","events_url":"https://api.github.com/users/asottile/events{/privacy}","organizations_url":"https://api.github.com/users/asottile/orgs","url":"https://api.github.com/users/asottile","gists_url":"https://api.github.com/users/asottile/gists{/gist_id}","html_url":"https://github.com/asottile","subscriptions_url":"https://api.github.com/users/asottile/subscriptions","avatar_url":"https://avatars3.githubusercontent.com/u/1810591?v=4","repos_url":"https://api.github.com/users/asottile/repos","received_events_url":"https://api.github.com/users/asottile/received_events","gravatar_id":"","starred_url":"https://api.github.com/users/asottile/starred{/owner}{/repo}","site_admin":false,"login":"asottile","type":"User","id":1810591,"followers_url":"https://api.github.com/users/asottile/followers"},"statuses_url":"https://api.github.com/repos/asottile/eecs381-p4/statuses/{sha}","id":60996329,"keys_url":"https://api.github.com/repos/asottile/eecs381-p4/keys{/key_id}","issue_events_url":"https://api.github.com/repos/asottile/eecs381-p4/issues/events{/number}","tags_url":"https://api.github.com/repos/asottile/eecs381-p4/tags","downloads_url":"https://api.github.com/repos/asottile/eecs381-p4/downloads","assignees_url":"https://api.github.com/repos/asottile/eecs381-p4/assignees{/user}","contents_url":"https://api.github.com/repos/asottile/eecs381-p4/contents/{+path}","has_pages":false,"git_refs_url":"https://api.github.com/repos/asottile/eecs381-p4/git/refs{/sha}","open_issues_count":0,"has_projects":true,"clone_url":"https://github.com/asottile/eecs381-p4.git","watchers_count":0,"git_tags_url":"https://api.github.com/repos/asottile/eecs381-p4/git/tags{/sha}","milestones_url":"https://api.github.com/repos/asottile/eecs381-p4/milestones{/number}","languages_url":"https://api.github.com/repos/asottile/eecs381-p4/languages","size":379,"homepage":null,"fork":false,"commits_url":"https://api.github.com/repos/asottile/eecs381-p4/commits{/sha}","releases_url":"https://api.github.com/repos/asottile/eecs381-p4/releases{/id}","description":null,"archive_url":"https://api.github.com/repos/asottile/eecs381-p4/{archive_format}{/ref}","comments_url":"https://api.github.com/repos/asottile/eecs381-p4/comments{/number}","events_url":"https://api.github.com/repos/asottile/eecs381-p4/events","contributors_url":"https://api.github.com/repos/asottile/eecs381-p4/contributors","html_url":"https://github.com/asottile/eecs381-p4","forks":0,"compare_url":"https://api.github.com/repos/asottile/eecs381-p4/compare/{base}...{head}","open_issues":0,"git_url":"git://github.com/asottile/eecs381-p4.git","svn_url":"https://github.com/asottile/eecs381-p4","merges_url":"https://api.github.com/repos/asottile/eecs381-p4/merges","has_issues":true,"ssh_url":"[email protected]:asottile/eecs381-p4.git","blobs_url":"https://api.github.com/repos/asottile/eecs381-p4/git/blobs{/sha}","git_commits_url":"https://api.github.com/repos/asottile/eecs381-p4/git/commits{/sha}","hooks_url":"https://api.github.com/repos/asottile/eecs381-p4/hooks","has_downloads":true,"watchers":0,"name":"eecs381-p4","language":"C++","url":"https://api.github.com/repos/asottile/eecs381-p4","created_at":"2016-06-13T00:36:50Z","pushed_at":"2016-06-13T00:47:52Z","forks_count":0,"default_branch":"master","teams_url":"https://api.github.com/repos/asottile/eecs381-p4/teams","trees_url":"https://api.github.com/repos/asottile/eecs381-p4/git/trees{/sha}","branches_url":"https://api.github.com/repos/asottile/eecs381-p4/branches{/branch}","subscribers_url":"https://api.github.com/repos/asottile/eecs381-p4/subscribers","permissions":{"admin":true,"push":true,"pull":true},"stargazers_url":"https://api.github.com/repos/asottile/eecs381-p4/stargazers"}
diff --git a/testing/resources/github/git-code-debt.json b/testing/resources/github/git-code-debt.json
index d9bd125..98ca315 100644
--- a/testing/resources/github/git-code-debt.json
+++ b/testing/resources/github/git-code-debt.json
@@ -1,1 +1,1 @@
-{"issues_url":"https://api.github.com/repos/asottile/git-code-debt/issues{/number}","deployments_url":"https://api.github.com/repos/asottile/git-code-debt/deployments","stargazers_count":291,"forks_url":"https://api.github.com/repos/asottile/git-code-debt/forks","mirror_url":null,"subscription_url":"https://api.github.com/repos/asottile/git-code-debt/subscription","notifications_url":"https://api.github.com/repos/asottile/git-code-debt/notifications{?since,all,participating}","collaborators_url":"https://api.github.com/repos/asottile/git-code-debt/collaborators{/collaborator}","updated_at":"2017-08-10T03:25:15Z","private":false,"pulls_url":"https://api.github.com/repos/asottile/git-code-debt/pulls{/number}","issue_comment_url":"https://api.github.com/repos/asottile/git-code-debt/issues/comments{/number}","labels_url":"https://api.github.com/repos/asottile/git-code-debt/labels{/name}","has_wiki":true,"full_name":"asottile/git-code-debt","owner":{"following_url":"https://api.github.com/users/asottile/following{/other_user}","events_url":"https://api.github.com/users/asottile/events{/privacy}","organizations_url":"https://api.github.com/users/asottile/orgs","url":"https://api.github.com/users/asottile","gists_url":"https://api.github.com/users/asottile/gists{/gist_id}","html_url":"https://github.com/asottile","subscriptions_url":"https://api.github.com/users/asottile/subscriptions","avatar_url":"https://avatars3.githubusercontent.com/u/1810591?v=4","repos_url":"https://api.github.com/users/asottile/repos","received_events_url":"https://api.github.com/users/asottile/received_events","gravatar_id":"","starred_url":"https://api.github.com/users/asottile/starred{/owner}{/repo}","site_admin":false,"login":"asottile","type":"User","id":1810591,"followers_url":"https://api.github.com/users/asottile/followers"},"statuses_url":"https://api.github.com/repos/asottile/git-code-debt/statuses/{sha}","id":14399837,"keys_url":"https://api.github.com/repos/asottile/git-code-debt/keys{/key_id}","issue_events_url":"https://api.github.com/repos/asottile/git-code-debt/issues/events{/number}","tags_url":"https://api.github.com/repos/asottile/git-code-debt/tags","downloads_url":"https://api.github.com/repos/asottile/git-code-debt/downloads","assignees_url":"https://api.github.com/repos/asottile/git-code-debt/assignees{/user}","contents_url":"https://api.github.com/repos/asottile/git-code-debt/contents/{+path}","has_pages":false,"git_refs_url":"https://api.github.com/repos/asottile/git-code-debt/git/refs{/sha}","open_issues_count":3,"has_projects":true,"clone_url":"https://github.com/asottile/git-code-debt.git","watchers_count":291,"git_tags_url":"https://api.github.com/repos/asottile/git-code-debt/git/tags{/sha}","milestones_url":"https://api.github.com/repos/asottile/git-code-debt/milestones{/number}","languages_url":"https://api.github.com/repos/asottile/git-code-debt/languages","size":465,"homepage":null,"fork":false,"commits_url":"https://api.github.com/repos/asottile/git-code-debt/commits{/sha}","releases_url":"https://api.github.com/repos/asottile/git-code-debt/releases{/id}","description":"A dashboard for monitoring code debt in a git repository.","archive_url":"https://api.github.com/repos/asottile/git-code-debt/{archive_format}{/ref}","comments_url":"https://api.github.com/repos/asottile/git-code-debt/comments{/number}","events_url":"https://api.github.com/repos/asottile/git-code-debt/events","contributors_url":"https://api.github.com/repos/asottile/git-code-debt/contributors","html_url":"https://github.com/asottile/git-code-debt","forks":17,"compare_url":"https://api.github.com/repos/asottile/git-code-debt/compare/{base}...{head}","open_issues":3,"git_url":"git://github.com/asottile/git-code-debt.git","svn_url":"https://github.com/asottile/git-code-debt","merges_url":"https://api.github.com/repos/asottile/git-code-debt/merges","has_issues":true,"ssh_url":"[email protected]:asottile/git-code-debt.git","blobs_url":"https://api.github.com/repos/asottile/git-code-debt/git/blobs{/sha}","git_commits_url":"https://api.github.com/repos/asottile/git-code-debt/git/commits{/sha}","hooks_url":"https://api.github.com/repos/asottile/git-code-debt/hooks","has_downloads":true,"watchers":291,"name":"git-code-debt","language":"Python","url":"https://api.github.com/repos/asottile/git-code-debt","created_at":"2013-11-14T16:05:41Z","pushed_at":"2017-08-15T02:16:02Z","forks_count":17,"default_branch":"master","teams_url":"https://api.github.com/repos/asottile/git-code-debt/teams","trees_url":"https://api.github.com/repos/asottile/git-code-debt/git/trees{/sha}","branches_url":"https://api.github.com/repos/asottile/git-code-debt/branches{/branch}","subscribers_url":"https://api.github.com/repos/asottile/git-code-debt/subscribers","permissions":{"admin":true,"push":true,"pull":true},"stargazers_url":"https://api.github.com/repos/asottile/git-code-debt/stargazers"}
+{"archived": false, "issues_url":"https://api.github.com/repos/asottile/git-code-debt/issues{/number}","deployments_url":"https://api.github.com/repos/asottile/git-code-debt/deployments","stargazers_count":291,"forks_url":"https://api.github.com/repos/asottile/git-code-debt/forks","mirror_url":null,"subscription_url":"https://api.github.com/repos/asottile/git-code-debt/subscription","notifications_url":"https://api.github.com/repos/asottile/git-code-debt/notifications{?since,all,participating}","collaborators_url":"https://api.github.com/repos/asottile/git-code-debt/collaborators{/collaborator}","updated_at":"2017-08-10T03:25:15Z","private":false,"pulls_url":"https://api.github.com/repos/asottile/git-code-debt/pulls{/number}","issue_comment_url":"https://api.github.com/repos/asottile/git-code-debt/issues/comments{/number}","labels_url":"https://api.github.com/repos/asottile/git-code-debt/labels{/name}","has_wiki":true,"full_name":"asottile/git-code-debt","owner":{"following_url":"https://api.github.com/users/asottile/following{/other_user}","events_url":"https://api.github.com/users/asottile/events{/privacy}","organizations_url":"https://api.github.com/users/asottile/orgs","url":"https://api.github.com/users/asottile","gists_url":"https://api.github.com/users/asottile/gists{/gist_id}","html_url":"https://github.com/asottile","subscriptions_url":"https://api.github.com/users/asottile/subscriptions","avatar_url":"https://avatars3.githubusercontent.com/u/1810591?v=4","repos_url":"https://api.github.com/users/asottile/repos","received_events_url":"https://api.github.com/users/asottile/received_events","gravatar_id":"","starred_url":"https://api.github.com/users/asottile/starred{/owner}{/repo}","site_admin":false,"login":"asottile","type":"User","id":1810591,"followers_url":"https://api.github.com/users/asottile/followers"},"statuses_url":"https://api.github.com/repos/asottile/git-code-debt/statuses/{sha}","id":14399837,"keys_url":"https://api.github.com/repos/asottile/git-code-debt/keys{/key_id}","issue_events_url":"https://api.github.com/repos/asottile/git-code-debt/issues/events{/number}","tags_url":"https://api.github.com/repos/asottile/git-code-debt/tags","downloads_url":"https://api.github.com/repos/asottile/git-code-debt/downloads","assignees_url":"https://api.github.com/repos/asottile/git-code-debt/assignees{/user}","contents_url":"https://api.github.com/repos/asottile/git-code-debt/contents/{+path}","has_pages":false,"git_refs_url":"https://api.github.com/repos/asottile/git-code-debt/git/refs{/sha}","open_issues_count":3,"has_projects":true,"clone_url":"https://github.com/asottile/git-code-debt.git","watchers_count":291,"git_tags_url":"https://api.github.com/repos/asottile/git-code-debt/git/tags{/sha}","milestones_url":"https://api.github.com/repos/asottile/git-code-debt/milestones{/number}","languages_url":"https://api.github.com/repos/asottile/git-code-debt/languages","size":465,"homepage":null,"fork":false,"commits_url":"https://api.github.com/repos/asottile/git-code-debt/commits{/sha}","releases_url":"https://api.github.com/repos/asottile/git-code-debt/releases{/id}","description":"A dashboard for monitoring code debt in a git repository.","archive_url":"https://api.github.com/repos/asottile/git-code-debt/{archive_format}{/ref}","comments_url":"https://api.github.com/repos/asottile/git-code-debt/comments{/number}","events_url":"https://api.github.com/repos/asottile/git-code-debt/events","contributors_url":"https://api.github.com/repos/asottile/git-code-debt/contributors","html_url":"https://github.com/asottile/git-code-debt","forks":17,"compare_url":"https://api.github.com/repos/asottile/git-code-debt/compare/{base}...{head}","open_issues":3,"git_url":"git://github.com/asottile/git-code-debt.git","svn_url":"https://github.com/asottile/git-code-debt","merges_url":"https://api.github.com/repos/asottile/git-code-debt/merges","has_issues":true,"ssh_url":"[email protected]:asottile/git-code-debt.git","blobs_url":"https://api.github.com/repos/asottile/git-code-debt/git/blobs{/sha}","git_commits_url":"https://api.github.com/repos/asottile/git-code-debt/git/commits{/sha}","hooks_url":"https://api.github.com/repos/asottile/git-code-debt/hooks","has_downloads":true,"watchers":291,"name":"git-code-debt","language":"Python","url":"https://api.github.com/repos/asottile/git-code-debt","created_at":"2013-11-14T16:05:41Z","pushed_at":"2017-08-15T02:16:02Z","forks_count":17,"default_branch":"master","teams_url":"https://api.github.com/repos/asottile/git-code-debt/teams","trees_url":"https://api.github.com/repos/asottile/git-code-debt/git/trees{/sha}","branches_url":"https://api.github.com/repos/asottile/git-code-debt/branches{/branch}","subscribers_url":"https://api.github.com/repos/asottile/git-code-debt/subscribers","permissions":{"admin":true,"push":true,"pull":true},"stargazers_url":"https://api.github.com/repos/asottile/git-code-debt/stargazers"}
diff --git a/testing/resources/github/libsass-python.json b/testing/resources/github/libsass-python.json
index b7a8a28..36183c6 100644
--- a/testing/resources/github/libsass-python.json
+++ b/testing/resources/github/libsass-python.json
@@ -1,1 +1,1 @@
-{"issues_url":"https://api.github.com/repos/sass/libsass-python/issues{/number}","deployments_url":"https://api.github.com/repos/sass/libsass-python/deployments","stargazers_count":330,"forks_url":"https://api.github.com/repos/sass/libsass-python/forks","mirror_url":null,"subscription_url":"https://api.github.com/repos/sass/libsass-python/subscription","notifications_url":"https://api.github.com/repos/sass/libsass-python/notifications{?since,all,participating}","collaborators_url":"https://api.github.com/repos/sass/libsass-python/collaborators{/collaborator}","updated_at":"2017-08-29T03:53:21Z","private":false,"pulls_url":"https://api.github.com/repos/sass/libsass-python/pulls{/number}","issue_comment_url":"https://api.github.com/repos/sass/libsass-python/issues/comments{/number}","labels_url":"https://api.github.com/repos/sass/libsass-python/labels{/name}","has_wiki":false,"full_name":"sass/libsass-python","owner":{"following_url":"https://api.github.com/users/sass/following{/other_user}","events_url":"https://api.github.com/users/sass/events{/privacy}","organizations_url":"https://api.github.com/users/sass/orgs","url":"https://api.github.com/users/sass","gists_url":"https://api.github.com/users/sass/gists{/gist_id}","html_url":"https://github.com/sass","subscriptions_url":"https://api.github.com/users/sass/subscriptions","avatar_url":"https://avatars1.githubusercontent.com/u/12431?v=4","repos_url":"https://api.github.com/users/sass/repos","received_events_url":"https://api.github.com/users/sass/received_events","gravatar_id":"","starred_url":"https://api.github.com/users/sass/starred{/owner}{/repo}","site_admin":false,"login":"sass","type":"User","id":12431,"followers_url":"https://api.github.com/users/sass/followers"},"statuses_url":"https://api.github.com/repos/sass/libsass-python/statuses/{sha}","id":5433677,"keys_url":"https://api.github.com/repos/sass/libsass-python/keys{/key_id}","issue_events_url":"https://api.github.com/repos/sass/libsass-python/issues/events{/number}","tags_url":"https://api.github.com/repos/sass/libsass-python/tags","downloads_url":"https://api.github.com/repos/sass/libsass-python/downloads","assignees_url":"https://api.github.com/repos/sass/libsass-python/assignees{/user}","contents_url":"https://api.github.com/repos/sass/libsass-python/contents/{+path}","has_pages":true,"git_refs_url":"https://api.github.com/repos/sass/libsass-python/git/refs{/sha}","open_issues_count":10,"has_projects":false,"clone_url":"https://github.com/sass/libsass-python.git","watchers_count":330,"git_tags_url":"https://api.github.com/repos/sass/libsass-python/git/tags{/sha}","milestones_url":"https://api.github.com/repos/sass/libsass-python/milestones{/number}","languages_url":"https://api.github.com/repos/sass/libsass-python/languages","size":7081,"homepage":"https://hongminhee.org/libsass-python/","fork":false,"commits_url":"https://api.github.com/repos/sass/libsass-python/commits{/sha}","releases_url":"https://api.github.com/repos/sass/libsass-python/releases{/id}","description":"A straightforward binding of libsass for Python. Compile Sass/SCSS in Python with no Ruby stack at all!","archive_url":"https://api.github.com/repos/sass/libsass-python/{archive_format}{/ref}","comments_url":"https://api.github.com/repos/sass/libsass-python/comments{/number}","events_url":"https://api.github.com/repos/sass/libsass-python/events","contributors_url":"https://api.github.com/repos/sass/libsass-python/contributors","html_url":"https://github.com/sass/libsass-python","forks":30,"compare_url":"https://api.github.com/repos/sass/libsass-python/compare/{base}...{head}","open_issues":10,"git_url":"git://github.com/sass/libsass-python.git","svn_url":"https://github.com/sass/libsass-python","merges_url":"https://api.github.com/repos/sass/libsass-python/merges","has_issues":true,"ssh_url":"[email protected]:sass/libsass-python.git","blobs_url":"https://api.github.com/repos/sass/libsass-python/git/blobs{/sha}","git_commits_url":"https://api.github.com/repos/sass/libsass-python/git/commits{/sha}","hooks_url":"https://api.github.com/repos/sass/libsass-python/hooks","has_downloads":true,"watchers":330,"name":"libsass-python","language":"Python","url":"https://api.github.com/repos/sass/libsass-python","created_at":"2012-08-16T01:31:33Z","pushed_at":"2017-08-08T14:53:30Z","forks_count":30,"default_branch":"master","teams_url":"https://api.github.com/repos/sass/libsass-python/teams","trees_url":"https://api.github.com/repos/sass/libsass-python/git/trees{/sha}","branches_url":"https://api.github.com/repos/sass/libsass-python/branches{/branch}","subscribers_url":"https://api.github.com/repos/sass/libsass-python/subscribers","permissions":{"admin":false,"push":true,"pull":true},"stargazers_url":"https://api.github.com/repos/sass/libsass-python/stargazers"}
+{"archived": false, "issues_url":"https://api.github.com/repos/sass/libsass-python/issues{/number}","deployments_url":"https://api.github.com/repos/sass/libsass-python/deployments","stargazers_count":330,"forks_url":"https://api.github.com/repos/sass/libsass-python/forks","mirror_url":null,"subscription_url":"https://api.github.com/repos/sass/libsass-python/subscription","notifications_url":"https://api.github.com/repos/sass/libsass-python/notifications{?since,all,participating}","collaborators_url":"https://api.github.com/repos/sass/libsass-python/collaborators{/collaborator}","updated_at":"2017-08-29T03:53:21Z","private":false,"pulls_url":"https://api.github.com/repos/sass/libsass-python/pulls{/number}","issue_comment_url":"https://api.github.com/repos/sass/libsass-python/issues/comments{/number}","labels_url":"https://api.github.com/repos/sass/libsass-python/labels{/name}","has_wiki":false,"full_name":"sass/libsass-python","owner":{"following_url":"https://api.github.com/users/sass/following{/other_user}","events_url":"https://api.github.com/users/sass/events{/privacy}","organizations_url":"https://api.github.com/users/sass/orgs","url":"https://api.github.com/users/sass","gists_url":"https://api.github.com/users/sass/gists{/gist_id}","html_url":"https://github.com/sass","subscriptions_url":"https://api.github.com/users/sass/subscriptions","avatar_url":"https://avatars1.githubusercontent.com/u/12431?v=4","repos_url":"https://api.github.com/users/sass/repos","received_events_url":"https://api.github.com/users/sass/received_events","gravatar_id":"","starred_url":"https://api.github.com/users/sass/starred{/owner}{/repo}","site_admin":false,"login":"sass","type":"User","id":12431,"followers_url":"https://api.github.com/users/sass/followers"},"statuses_url":"https://api.github.com/repos/sass/libsass-python/statuses/{sha}","id":5433677,"keys_url":"https://api.github.com/repos/sass/libsass-python/keys{/key_id}","issue_events_url":"https://api.github.com/repos/sass/libsass-python/issues/events{/number}","tags_url":"https://api.github.com/repos/sass/libsass-python/tags","downloads_url":"https://api.github.com/repos/sass/libsass-python/downloads","assignees_url":"https://api.github.com/repos/sass/libsass-python/assignees{/user}","contents_url":"https://api.github.com/repos/sass/libsass-python/contents/{+path}","has_pages":true,"git_refs_url":"https://api.github.com/repos/sass/libsass-python/git/refs{/sha}","open_issues_count":10,"has_projects":false,"clone_url":"https://github.com/sass/libsass-python.git","watchers_count":330,"git_tags_url":"https://api.github.com/repos/sass/libsass-python/git/tags{/sha}","milestones_url":"https://api.github.com/repos/sass/libsass-python/milestones{/number}","languages_url":"https://api.github.com/repos/sass/libsass-python/languages","size":7081,"homepage":"https://hongminhee.org/libsass-python/","fork":false,"commits_url":"https://api.github.com/repos/sass/libsass-python/commits{/sha}","releases_url":"https://api.github.com/repos/sass/libsass-python/releases{/id}","description":"A straightforward binding of libsass for Python. Compile Sass/SCSS in Python with no Ruby stack at all!","archive_url":"https://api.github.com/repos/sass/libsass-python/{archive_format}{/ref}","comments_url":"https://api.github.com/repos/sass/libsass-python/comments{/number}","events_url":"https://api.github.com/repos/sass/libsass-python/events","contributors_url":"https://api.github.com/repos/sass/libsass-python/contributors","html_url":"https://github.com/sass/libsass-python","forks":30,"compare_url":"https://api.github.com/repos/sass/libsass-python/compare/{base}...{head}","open_issues":10,"git_url":"git://github.com/sass/libsass-python.git","svn_url":"https://github.com/sass/libsass-python","merges_url":"https://api.github.com/repos/sass/libsass-python/merges","has_issues":true,"ssh_url":"[email protected]:sass/libsass-python.git","blobs_url":"https://api.github.com/repos/sass/libsass-python/git/blobs{/sha}","git_commits_url":"https://api.github.com/repos/sass/libsass-python/git/commits{/sha}","hooks_url":"https://api.github.com/repos/sass/libsass-python/hooks","has_downloads":true,"watchers":330,"name":"libsass-python","language":"Python","url":"https://api.github.com/repos/sass/libsass-python","created_at":"2012-08-16T01:31:33Z","pushed_at":"2017-08-08T14:53:30Z","forks_count":30,"default_branch":"master","teams_url":"https://api.github.com/repos/sass/libsass-python/teams","trees_url":"https://api.github.com/repos/sass/libsass-python/git/trees{/sha}","branches_url":"https://api.github.com/repos/sass/libsass-python/branches{/branch}","subscribers_url":"https://api.github.com/repos/sass/libsass-python/subscribers","permissions":{"admin":false,"push":true,"pull":true},"stargazers_url":"https://api.github.com/repos/sass/libsass-python/stargazers"}
diff --git a/testing/resources/github/poi-map.json b/testing/resources/github/poi-map.json
new file mode 100644
index 0000000..7e4face
--- /dev/null
+++ b/testing/resources/github/poi-map.json
@@ -0,0 +1,1 @@
+{"id": 65545548, "node_id": "MDEwOlJlcG9zaXRvcnk2NTU0NTU0OA==", "name": "poi-map", "full_name": "asottile-archive/poi-map", "private": false, "owner": {"login": "asottile-archive", "id": 40324693, "node_id": "MDEyOk9yZ2FuaXphdGlvbjQwMzI0Njkz", "avatar_url": "https://avatars2.githubusercontent.com/u/40324693?v=4", "gravatar_id": "", "url": "https://api.github.com/users/asottile-archive", "html_url": "https://github.com/asottile-archive", "followers_url": "https://api.github.com/users/asottile-archive/followers", "following_url": "https://api.github.com/users/asottile-archive/following{/other_user}", "gists_url": "https://api.github.com/users/asottile-archive/gists{/gist_id}", "starred_url": "https://api.github.com/users/asottile-archive/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/asottile-archive/subscriptions", "organizations_url": "https://api.github.com/users/asottile-archive/orgs", "repos_url": "https://api.github.com/users/asottile-archive/repos", "events_url": "https://api.github.com/users/asottile-archive/events{/privacy}", "received_events_url": "https://api.github.com/users/asottile-archive/received_events", "type": "Organization", "site_admin": false}, "html_url": "https://github.com/asottile-archive/poi-map", "description": "A static-ish map to put numbered locations on a google map", "fork": false, "url": "https://api.github.com/repos/asottile-archive/poi-map", "forks_url": "https://api.github.com/repos/asottile-archive/poi-map/forks", "keys_url": "https://api.github.com/repos/asottile-archive/poi-map/keys{/key_id}", "collaborators_url": "https://api.github.com/repos/asottile-archive/poi-map/collaborators{/collaborator}", "teams_url": "https://api.github.com/repos/asottile-archive/poi-map/teams", "hooks_url": "https://api.github.com/repos/asottile-archive/poi-map/hooks", "issue_events_url": "https://api.github.com/repos/asottile-archive/poi-map/issues/events{/number}", "events_url": "https://api.github.com/repos/asottile-archive/poi-map/events", "assignees_url": "https://api.github.com/repos/asottile-archive/poi-map/assignees{/user}", "branches_url": "https://api.github.com/repos/asottile-archive/poi-map/branches{/branch}", "tags_url": "https://api.github.com/repos/asottile-archive/poi-map/tags", "blobs_url": "https://api.github.com/repos/asottile-archive/poi-map/git/blobs{/sha}", "git_tags_url": "https://api.github.com/repos/asottile-archive/poi-map/git/tags{/sha}", "git_refs_url": "https://api.github.com/repos/asottile-archive/poi-map/git/refs{/sha}", "trees_url": "https://api.github.com/repos/asottile-archive/poi-map/git/trees{/sha}", "statuses_url": "https://api.github.com/repos/asottile-archive/poi-map/statuses/{sha}", "languages_url": "https://api.github.com/repos/asottile-archive/poi-map/languages", "stargazers_url": "https://api.github.com/repos/asottile-archive/poi-map/stargazers", "contributors_url": "https://api.github.com/repos/asottile-archive/poi-map/contributors", "subscribers_url": "https://api.github.com/repos/asottile-archive/poi-map/subscribers", "subscription_url": "https://api.github.com/repos/asottile-archive/poi-map/subscription", "commits_url": "https://api.github.com/repos/asottile-archive/poi-map/commits{/sha}", "git_commits_url": "https://api.github.com/repos/asottile-archive/poi-map/git/commits{/sha}", "comments_url": "https://api.github.com/repos/asottile-archive/poi-map/comments{/number}", "issue_comment_url": "https://api.github.com/repos/asottile-archive/poi-map/issues/comments{/number}", "contents_url": "https://api.github.com/repos/asottile-archive/poi-map/contents/{+path}", "compare_url": "https://api.github.com/repos/asottile-archive/poi-map/compare/{base}...{head}", "merges_url": "https://api.github.com/repos/asottile-archive/poi-map/merges", "archive_url": "https://api.github.com/repos/asottile-archive/poi-map/{archive_format}{/ref}", "downloads_url": "https://api.github.com/repos/asottile-archive/poi-map/downloads", "issues_url": "https://api.github.com/repos/asottile-archive/poi-map/issues{/number}", "pulls_url": "https://api.github.com/repos/asottile-archive/poi-map/pulls{/number}", "milestones_url": "https://api.github.com/repos/asottile-archive/poi-map/milestones{/number}", "notifications_url": "https://api.github.com/repos/asottile-archive/poi-map/notifications{?since,all,participating}", "labels_url": "https://api.github.com/repos/asottile-archive/poi-map/labels{/name}", "releases_url": "https://api.github.com/repos/asottile-archive/poi-map/releases{/id}", "deployments_url": "https://api.github.com/repos/asottile-archive/poi-map/deployments", "created_at": "2016-08-12T10:35:22Z", "updated_at": "2018-11-27T19:40:15Z", "pushed_at": "2018-11-27T19:39:47Z", "git_url": "git://github.com/asottile-archive/poi-map.git", "ssh_url": "[email protected]:asottile-archive/poi-map.git", "clone_url": "https://github.com/asottile-archive/poi-map.git", "svn_url": "https://github.com/asottile-archive/poi-map", "homepage": null, "size": 15, "stargazers_count": 1, "watchers_count": 1, "language": "Python", "has_issues": true, "has_projects": true, "has_downloads": true, "has_wiki": true, "has_pages": false, "forks_count": 0, "mirror_url": null, "archived": true, "open_issues_count": 0, "license": null, "forks": 0, "open_issues": 0, "watchers": 1, "default_branch": "master", "permissions": {"admin": true, "push": true, "pull": true}}
diff --git a/testing/resources/github/tox.json b/testing/resources/github/tox.json
index b11589f..46aaff1 100644
--- a/testing/resources/github/tox.json
+++ b/testing/resources/github/tox.json
@@ -1,1 +1,1 @@
-{"issues_url":"https://api.github.com/repos/asottile/tox/issues{/number}","deployments_url":"https://api.github.com/repos/asottile/tox/deployments","stargazers_count":0,"forks_url":"https://api.github.com/repos/asottile/tox/forks","mirror_url":null,"subscription_url":"https://api.github.com/repos/asottile/tox/subscription","notifications_url":"https://api.github.com/repos/asottile/tox/notifications{?since,all,participating}","collaborators_url":"https://api.github.com/repos/asottile/tox/collaborators{/collaborator}","updated_at":"2017-02-04T22:48:10Z","private":false,"pulls_url":"https://api.github.com/repos/asottile/tox/pulls{/number}","issue_comment_url":"https://api.github.com/repos/asottile/tox/issues/comments{/number}","labels_url":"https://api.github.com/repos/asottile/tox/labels{/name}","has_wiki":false,"full_name":"asottile/tox","owner":{"following_url":"https://api.github.com/users/asottile/following{/other_user}","events_url":"https://api.github.com/users/asottile/events{/privacy}","organizations_url":"https://api.github.com/users/asottile/orgs","url":"https://api.github.com/users/asottile","gists_url":"https://api.github.com/users/asottile/gists{/gist_id}","html_url":"https://github.com/asottile","subscriptions_url":"https://api.github.com/users/asottile/subscriptions","avatar_url":"https://avatars3.githubusercontent.com/u/1810591?v=4","repos_url":"https://api.github.com/users/asottile/repos","received_events_url":"https://api.github.com/users/asottile/received_events","gravatar_id":"","starred_url":"https://api.github.com/users/asottile/starred{/owner}{/repo}","site_admin":false,"login":"asottile","type":"User","id":1810591,"followers_url":"https://api.github.com/users/asottile/followers"},"statuses_url":"https://api.github.com/repos/asottile/tox/statuses/{sha}","id":80953784,"keys_url":"https://api.github.com/repos/asottile/tox/keys{/key_id}","issue_events_url":"https://api.github.com/repos/asottile/tox/issues/events{/number}","tags_url":"https://api.github.com/repos/asottile/tox/tags","downloads_url":"https://api.github.com/repos/asottile/tox/downloads","assignees_url":"https://api.github.com/repos/asottile/tox/assignees{/user}","contents_url":"https://api.github.com/repos/asottile/tox/contents/{+path}","has_pages":false,"git_refs_url":"https://api.github.com/repos/asottile/tox/git/refs{/sha}","open_issues_count":0,"has_projects":true,"clone_url":"https://github.com/asottile/tox.git","watchers_count":0,"git_tags_url":"https://api.github.com/repos/asottile/tox/git/tags{/sha}","milestones_url":"https://api.github.com/repos/asottile/tox/milestones{/number}","languages_url":"https://api.github.com/repos/asottile/tox/languages","size":8594,"homepage":"https://tox.readthedocs.org","fork":true,"commits_url":"https://api.github.com/repos/asottile/tox/commits{/sha}","releases_url":"https://api.github.com/repos/asottile/tox/releases{/id}","description":"virtualenv management and test command line tool","archive_url":"https://api.github.com/repos/asottile/tox/{archive_format}{/ref}","comments_url":"https://api.github.com/repos/asottile/tox/comments{/number}","events_url":"https://api.github.com/repos/asottile/tox/events","contributors_url":"https://api.github.com/repos/asottile/tox/contributors","html_url":"https://github.com/asottile/tox","forks":0,"compare_url":"https://api.github.com/repos/asottile/tox/compare/{base}...{head}","open_issues":0,"git_url":"git://github.com/asottile/tox.git","svn_url":"https://github.com/asottile/tox","merges_url":"https://api.github.com/repos/asottile/tox/merges","has_issues":false,"ssh_url":"[email protected]:asottile/tox.git","blobs_url":"https://api.github.com/repos/asottile/tox/git/blobs{/sha}","git_commits_url":"https://api.github.com/repos/asottile/tox/git/commits{/sha}","hooks_url":"https://api.github.com/repos/asottile/tox/hooks","has_downloads":true,"watchers":0,"name":"tox","language":"Python","url":"https://api.github.com/repos/asottile/tox","created_at":"2017-02-04T22:48:07Z","pushed_at":"2017-08-05T13:38:26Z","forks_count":0,"default_branch":"master","teams_url":"https://api.github.com/repos/asottile/tox/teams","trees_url":"https://api.github.com/repos/asottile/tox/git/trees{/sha}","branches_url":"https://api.github.com/repos/asottile/tox/branches{/branch}","subscribers_url":"https://api.github.com/repos/asottile/tox/subscribers","permissions":{"admin":true,"push":true,"pull":true},"stargazers_url":"https://api.github.com/repos/asottile/tox/stargazers"}
+{"archived": false, "issues_url":"https://api.github.com/repos/asottile/tox/issues{/number}","deployments_url":"https://api.github.com/repos/asottile/tox/deployments","stargazers_count":0,"forks_url":"https://api.github.com/repos/asottile/tox/forks","mirror_url":null,"subscription_url":"https://api.github.com/repos/asottile/tox/subscription","notifications_url":"https://api.github.com/repos/asottile/tox/notifications{?since,all,participating}","collaborators_url":"https://api.github.com/repos/asottile/tox/collaborators{/collaborator}","updated_at":"2017-02-04T22:48:10Z","private":false,"pulls_url":"https://api.github.com/repos/asottile/tox/pulls{/number}","issue_comment_url":"https://api.github.com/repos/asottile/tox/issues/comments{/number}","labels_url":"https://api.github.com/repos/asottile/tox/labels{/name}","has_wiki":false,"full_name":"asottile/tox","owner":{"following_url":"https://api.github.com/users/asottile/following{/other_user}","events_url":"https://api.github.com/users/asottile/events{/privacy}","organizations_url":"https://api.github.com/users/asottile/orgs","url":"https://api.github.com/users/asottile","gists_url":"https://api.github.com/users/asottile/gists{/gist_id}","html_url":"https://github.com/asottile","subscriptions_url":"https://api.github.com/users/asottile/subscriptions","avatar_url":"https://avatars3.githubusercontent.com/u/1810591?v=4","repos_url":"https://api.github.com/users/asottile/repos","received_events_url":"https://api.github.com/users/asottile/received_events","gravatar_id":"","starred_url":"https://api.github.com/users/asottile/starred{/owner}{/repo}","site_admin":false,"login":"asottile","type":"User","id":1810591,"followers_url":"https://api.github.com/users/asottile/followers"},"statuses_url":"https://api.github.com/repos/asottile/tox/statuses/{sha}","id":80953784,"keys_url":"https://api.github.com/repos/asottile/tox/keys{/key_id}","issue_events_url":"https://api.github.com/repos/asottile/tox/issues/events{/number}","tags_url":"https://api.github.com/repos/asottile/tox/tags","downloads_url":"https://api.github.com/repos/asottile/tox/downloads","assignees_url":"https://api.github.com/repos/asottile/tox/assignees{/user}","contents_url":"https://api.github.com/repos/asottile/tox/contents/{+path}","has_pages":false,"git_refs_url":"https://api.github.com/repos/asottile/tox/git/refs{/sha}","open_issues_count":0,"has_projects":true,"clone_url":"https://github.com/asottile/tox.git","watchers_count":0,"git_tags_url":"https://api.github.com/repos/asottile/tox/git/tags{/sha}","milestones_url":"https://api.github.com/repos/asottile/tox/milestones{/number}","languages_url":"https://api.github.com/repos/asottile/tox/languages","size":8594,"homepage":"https://tox.readthedocs.org","fork":true,"commits_url":"https://api.github.com/repos/asottile/tox/commits{/sha}","releases_url":"https://api.github.com/repos/asottile/tox/releases{/id}","description":"virtualenv management and test command line tool","archive_url":"https://api.github.com/repos/asottile/tox/{archive_format}{/ref}","comments_url":"https://api.github.com/repos/asottile/tox/comments{/number}","events_url":"https://api.github.com/repos/asottile/tox/events","contributors_url":"https://api.github.com/repos/asottile/tox/contributors","html_url":"https://github.com/asottile/tox","forks":0,"compare_url":"https://api.github.com/repos/asottile/tox/compare/{base}...{head}","open_issues":0,"git_url":"git://github.com/asottile/tox.git","svn_url":"https://github.com/asottile/tox","merges_url":"https://api.github.com/repos/asottile/tox/merges","has_issues":false,"ssh_url":"[email protected]:asottile/tox.git","blobs_url":"https://api.github.com/repos/asottile/tox/git/blobs{/sha}","git_commits_url":"https://api.github.com/repos/asottile/tox/git/commits{/sha}","hooks_url":"https://api.github.com/repos/asottile/tox/hooks","has_downloads":true,"watchers":0,"name":"tox","language":"Python","url":"https://api.github.com/repos/asottile/tox","created_at":"2017-02-04T22:48:07Z","pushed_at":"2017-08-05T13:38:26Z","forks_count":0,"default_branch":"master","teams_url":"https://api.github.com/repos/asottile/tox/teams","trees_url":"https://api.github.com/repos/asottile/tox/git/trees{/sha}","branches_url":"https://api.github.com/repos/asottile/tox/branches{/branch}","subscribers_url":"https://api.github.com/repos/asottile/tox/subscribers","permissions":{"admin":true,"push":true,"pull":true},"stargazers_url":"https://api.github.com/repos/asottile/tox/stargazers"}
diff --git a/tests/source/github_test.py b/tests/source/github_test.py
index 88cd251..7e8efc8 100644
--- a/tests/source/github_test.py
+++ b/tests/source/github_test.py
@@ -24,6 +24,8 @@ def repos_response(mock_urlopen):
_resource_json('tox'),
# A private repo
_resource_json('eecs381-p4'),
+ # An archived repo
+ _resource_json('poi-map'),
]
mock_urlopen.side_effect = urlopen_side_effect({
'https://api.github.com/user/repos?per_page=100': FakeResponse(
@@ -44,6 +46,10 @@ def repos_response(mock_urlopen):
),
({'forks': True}, {'asottile/git-code-debt', 'asottile/tox'}),
({'private': True}, {'asottile/git-code-debt', 'asottile/eecs381-p4'}),
+ (
+ {'archived': True},
+ {'asottile/git-code-debt', 'asottile-archive/poi-map'},
+ ),
),
)
def test_list_repos(settings, expected_repo_names):
| github, github_org: don't clone archived repos by default
and have an `archived: true` option for enabling them | 0.0 | 27aba1dda4e69ae598f8e676006c86c61eab5559 | [
"tests/source/github_test.py::test_list_repos[settings0-expected_repo_names0]",
"tests/source/github_test.py::test_list_repos[settings1-expected_repo_names1]",
"tests/source/github_test.py::test_list_repos[settings2-expected_repo_names2]",
"tests/source/github_test.py::test_list_repos[settings3-expected_repo_names3]",
"tests/source/github_test.py::test_list_repos[settings4-expected_repo_names4]"
]
| []
| {
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | 2019-03-04 22:46:31+00:00 | mit | 1,118 |
|
asottile__astpretty-41 | diff --git a/astpretty.py b/astpretty.py
index d0c1b59..abc8444 100644
--- a/astpretty.py
+++ b/astpretty.py
@@ -75,7 +75,7 @@ def _leaf(node: 'ASTType', show_offsets: bool = True) -> str:
def pformat(
node: Union['ASTType', None, str],
- indent: str = ' ',
+ indent: Union[str, int] = ' ',
show_offsets: bool = True,
_indent: int = 0,
) -> str:
@@ -86,6 +86,11 @@ def pformat(
elif _is_leaf(node):
return _leaf(node, show_offsets=show_offsets)
else:
+ if isinstance(indent, int):
+ indent_s = indent * ' '
+ else:
+ indent_s = indent
+
class state:
indent = _indent
@@ -96,7 +101,7 @@ def pformat(
state.indent -= 1
def indentstr() -> str:
- return state.indent * indent
+ return state.indent * indent_s
def _pformat(el: Union['ASTType', None, str], _indent: int = 0) -> str:
return pformat(
| asottile/astpretty | 42ec5bd247e0b84bdbb556459d7d428bb8f34369 | diff --git a/tests/astpretty_test.py b/tests/astpretty_test.py
index b595d64..796950f 100644
--- a/tests/astpretty_test.py
+++ b/tests/astpretty_test.py
@@ -127,6 +127,21 @@ def test_pformat_custom_indent():
)
+def test_pformat_integer_indent():
+ node = _to_expr_value('[a, b, c]')
+ ret = astpretty.pformat(node, indent=3, show_offsets=False)
+ assert ret == (
+ 'List(\n'
+ ' elts=[\n'
+ " Name(id='a', ctx=Load()),\n"
+ " Name(id='b', ctx=Load()),\n"
+ " Name(id='c', ctx=Load()),\n"
+ ' ],\n'
+ ' ctx=Load(),\n'
+ ')'
+ )
+
+
def test_pformat_nested_node_without_line_information():
expected_38 = (
'Subscript(\n'
| Have an option for indent argument to be number of spaces
Like `json.dumps` or `ast.dump`, making indent have an option to be an integer would be nice.
I could probably make a PR for this, if the idea gets approval. | 0.0 | 42ec5bd247e0b84bdbb556459d7d428bb8f34369 | [
"tests/astpretty_test.py::test_pformat_integer_indent"
]
| [
"tests/astpretty_test.py::test_is_leaf_true[x]",
"tests/astpretty_test.py::test_is_leaf_true[\"y\"]",
"tests/astpretty_test.py::test_is_leaf_true[5]",
"tests/astpretty_test.py::test_is_leaf_true[[]]",
"tests/astpretty_test.py::test_is_leaf_has_attr_with_list_of_primitives",
"tests/astpretty_test.py::test_is_leaf_false[a.b]",
"tests/astpretty_test.py::test_is_leaf_false[[4]]",
"tests/astpretty_test.py::test_is_leaf_false[x()]",
"tests/astpretty_test.py::test_pformat_py35_regression",
"tests/astpretty_test.py::test_pformat_node",
"tests/astpretty_test.py::test_pformat_nested_with_offsets",
"tests/astpretty_test.py::test_pformat_nested_attr_empty_list",
"tests/astpretty_test.py::test_pformat_mixed_sub_nodes_and_primitives",
"tests/astpretty_test.py::test_pformat_nested_multiple_elements",
"tests/astpretty_test.py::test_pformat_custom_indent",
"tests/astpretty_test.py::test_pformat_leaf_node_with_list",
"tests/astpretty_test.py::test_pprint",
"tests/astpretty_test.py::test_main_with_offsets",
"tests/astpretty_test.py::test_main_hide_offsets",
"tests/astpretty_test.py::test_pformat_py38_type_comments"
]
| {
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
} | 2020-12-28 18:51:38+00:00 | mit | 1,119 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.