metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jcontesti/spotify-solar-radio-chart",
"score": 3
}
|
#### File: spotify-solar-radio-chart/correctors/google_misspelling_corrector.py
```python
from typing import Dict, Final, Optional
from googleapiclient.discovery import build
from settings import environment_settings
from . import misspelling_corrector
class GoogleMisspellingCorrector(misspelling_corrector.MisspellingCorrector):
"""
Class that allows to correct artists and songs titles using Google Search Console.
"""
GOOGLE_API_KEY: Final[str] = environment_settings.GOOGLE_API_KEY
GOOGLE_CSE_KEY: Final[str] = environment_settings.GOOGLE_CSE_KEY
SEPARATOR: Final[str] = " : "
def __init__(self, cache_path: str = "google_misspelling_corrections_cache.json"):
super().__init__(cache_path)
def correct(self, artist: str, song: str) -> Optional[Dict[str, str]]:
# If previously queried, return from cache
cached_corrected: Optional[str] = (
self._get_from_cached_misspelling_corrections(artist, song)
)
if cached_corrected:
return self._decode_artist_song(cached_corrected)
# Not typed because it returns a generic Resource
service = build(
serviceName="customsearch",
version="v1",
developerKey=environment_settings.GOOGLE_API_KEY,
cache_discovery=False,
)
query: str = artist + self.SEPARATOR + song
# Not typed because it returns a generic result
query_result = (
# pylint: disable=no-member
service.cse().list(q=query, cx=self.GOOGLE_CSE_KEY).execute()
)
corrected_result: Optional[Dict[str, str]] = None
if "spelling" in query_result:
corrected: str = (
query_result["spelling"]["correctedQuery"].split(self.SEPARATOR)
)
corrected_artist = corrected[0]
corrected_song = corrected[1] if len(corrected) == 2 else ""
# Cache queried values
self._cache_correction(artist, song, corrected_artist, corrected_song)
corrected_result = {"artist": corrected_artist, "song": corrected_song}
return corrected_result
```
#### File: spotify-solar-radio-chart/extracted_data/extracted_song.py
```python
from typing import List
class ExtractedSong:
"""Class that represents an song that has been extracted from any source."""
ARTISTS_SEPARATORS = [
' & ',
' ft ',
' ft. ',
' feat ',
' feat. ',
' presents ',
' pres. ',
' with ',
' and ',
', ',
]
SONGS_TITLES_SEPARATORS = [
'/',
' - ',
]
def __init__(self,
artist: str,
song_title: str,
album_title: str = "",
label: str = "") -> None:
self._artist = artist
self._song_title = song_title
self._album_title = album_title
self._label = label
@property
def artist(self) -> str:
"""Return the artist text."""
return self._artist
@property
def song_title(self) -> str:
"""Return the title text."""
return self._song_title
@property
def album_title(self) -> str:
"""Return the album name."""
return self._album_title
@property
def label(self) -> str:
"""Return the label of the song."""
return self._label
def get_separated_artists(self) -> List[str]:
"""
Split artists names into a list.
For instance "Artist A feat. Artist B" returns ["Artist A", "Artist B"].
:return: list of artists related to the song.
"""
replaced_artists = self._artist
for separator in self.ARTISTS_SEPARATORS:
replaced_artists = replaced_artists.replace(separator, "#")
if "#" in replaced_artists:
separated_artists = replaced_artists.split("#")
else:
separated_artists = [replaced_artists]
return separated_artists
def get_separated_titles(self) -> List[str]:
"""
Split songs titles into a list.
For instance "Song 1 / Song 2" returns ["Song 1", "Song 2"].
:return: list of different titles related to the extracted song.
"""
replaced_song_titles = self._song_title
for separator in self.SONGS_TITLES_SEPARATORS:
replaced_song_titles = replaced_song_titles.replace(separator, "#")
if "#" in replaced_song_titles:
separated_song_titles = replaced_song_titles.split("#")
else:
separated_song_titles = [replaced_song_titles]
return separated_song_titles
def format(self) -> None:
"""Format artists, titles and label."""
self._artist = self._artist.lower().strip(" \t\n\r")
self._song_title = self._song_title.lower().strip(" \t\n\r")
self._album_title = self._album_title.lower().strip(" \t\n\r")
self._label = self._label.lower().strip(" \t\n\r")
```
|
{
"source": "jcook00/q2-api-client",
"score": 2
}
|
#### File: clients/back_office/branch_client.py
```python
from q2_api_client.clients.base_q2_client import BaseQ2Client
from q2_api_client.endpoints.back_office_endpoints import BranchEndpoint
class BranchClient(BaseQ2Client):
def retrieve_branches(self):
"""POST /backoffice/v3/branch/retrieve
:return: Response object
:rtype: requests.Response
"""
endpoint = BranchEndpoint.RETRIEVE_BRANCH.value
return self._post(url=self._build_url(endpoint))
```
#### File: clients/back_office/custom_client.py
```python
from q2_api_client.clients.base_q2_client import BaseQ2Client
from q2_api_client.endpoints.back_office_endpoints import CustomEndpoint
class CustomClient(BaseQ2Client):
def search_deleted(self, request_body):
"""POST /custom/associate/searchDeleted
:param dict request_body: Dictionary object to send in the body of the request
:return: Response object
:rtype: requests.Response
"""
endpoint = CustomEndpoint.SEARCH_DELETED.value
return self._post(url=self._build_url(endpoint), json=request_body)
```
#### File: clients/back_office/user_login_client.py
```python
from q2_api_client.clients.base_q2_client import BaseQ2Client
from q2_api_client.endpoints.back_office_endpoints import UserLoginEndpoint
class UserLoginClient(BaseQ2Client):
def retrieve_user_logins(self, request_body):
"""POST /backoffice/v3/userLogin/retrieve
:param dict request_body: Dictionary object to send in the body of the request
:return: Response object
:rtype: requests.Response
"""
endpoint = UserLoginEndpoint.RETRIEVE_USER_LOGINS.value
return self._post(url=self._build_url(endpoint), json=request_body)
def retrieve_user_login(self, user_id):
"""POST /backoffice/v3/userLogin/{id}/retrieve
:param str user_id: path parameter
:return: Response object
:rtype: requests.Response
"""
endpoint = UserLoginEndpoint.RETRIEVE_USER_LOGIN.value.format(id=user_id)
return self._post(url=self._build_url(endpoint))
def patch_user_login(self, user_id, request_body):
"""POST /backoffice/v3/userLogin/{id}/patch
:param str user_id: path parameter
:param dict request_body: Dictionary object to send in the body of the request
:return: Response object
:rtype: requests.Response
"""
endpoint = UserLoginEndpoint.PATCH_USER_LOGIN.value.format(id=user_id)
return self._post(url=self._build_url(endpoint), json=request_body)
def update_user_login(self, user_id, request_body):
"""POST /backoffice/v3/userLogin/{id}/update
:param str user_id: path parameter
:param dict request_body: Dictionary object to send in the body of the request
:return: Response object
:rtype: requests.Response
"""
endpoint = UserLoginEndpoint.UPDATE_USER_LOGIN.value.format(id=user_id)
return self._post(url=self._build_url(endpoint), json=request_body)
def reset_device_registration(self, user_id):
"""POST /backoffice/v3/userLogin/{id}/resetDeviceRegistration
:param str user_id: path parameter
:return: Response object
:rtype: requests.Response
"""
endpoint = UserLoginEndpoint.RESET_DEVICE_REGISTRATION.value.format(id=user_id)
return self._post(url=self._build_url(endpoint))
def restore_user_login(self, user_id, request_body):
"""POST /backoffice/v3/userLogin/{id}/restore
:param str user_id: path parameter
:param dict request_body: Dictionary object to send in the body of the request
:return: Response object
:rtype: requests.Response
"""
endpoint = UserLoginEndpoint.RESTORE_USER_LOGIN.value.format(id=user_id)
return self._post(url=self._build_url(endpoint), json=request_body)
def change_user_password(self, user_id, request_body):
"""POST /backoffice/v3/userLogin/{id}/changePassword
:param str user_id: path parameter
:param dict request_body: Dictionary object to send in the body of the request
:return: Response object
:rtype: requests.Response
"""
endpoint = UserLoginEndpoint.CHANGE_PASSWORD.value.format(id=user_id)
return self._post(url=self._build_url(endpoint), json=request_body)
```
#### File: q2_api_client/clients/base_q2_client.py
```python
from q2_api_client.clients.rest_client import RestClient
from q2_api_client.endpoints.mobile_ws_endpoints import LoginEndpoint
from q2_api_client.utils.url import URL
class BaseQ2Client(RestClient):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._connect_timeout = kwargs.get('connect_timeout', 60)
self._read_timeout = kwargs.get('read_timeout', 120)
self._scheme = kwargs.get('scheme', "https")
self._base_path = kwargs.get('base_path')
self._headers['Accept'] = "application/json"
self._headers['q2token'] = kwargs.get('q2token') if kwargs.get('q2token') is not None else self._get_q2token()
def _get_q2token(self):
"""Sends a logon POST request and returns the Q2 token from the response headers.
:return: the q2token header value
:rtype: str
:raises HTTPError: failed to authenticate
"""
request_body = {'userId': self._username, 'password': <PASSWORD>}
endpoint = LoginEndpoint.LOGON_USER.value
response = self._post(url=self._build_url(endpoint), json=request_body)
response.raise_for_status()
return response.headers.get('q2token')
def _build_url(self, endpoint='/'):
"""Builds a URL using the endpoint.
:param str endpoint: the endpoint to add to the path
:return: the URL
:rtype: str
"""
path = endpoint if self._base_path is None else "".join((self._base_path, endpoint))
url = URL(scheme=self._scheme, host=self._host, port=self._port, path=path)
return url.build()
```
#### File: clients/central/gam_client.py
```python
from q2_api_client.clients.base_q2_client import BaseQ2Client
from q2_api_client.endpoints.central_endpoints import GAMEndpoint
class GAMClient(BaseQ2Client):
def get_gam_rights(self):
"""GET /central/gam/rights
:return: Response object
:rtype: requests.Response
"""
endpoint = GAMEndpoint.RIGHTS.value
return self._get(url=self._build_url(endpoint))
def get_gam_teams(self):
"""GET /central/gam/team
:return: Response object
:rtype: requests.Response
"""
endpoint = GAMEndpoint.TEAM.value
return self._get(url=self._build_url(endpoint))
def create_gam_team(self, request_body):
"""POST /central/gam/team
:param dict request_body: Dictionary object to send in the body of the request
:return: Response object
:rtype: requests.Response
"""
endpoint = GAMEndpoint.TEAM.value
return self._post(url=self._build_url(endpoint), json=request_body)
def get_gam_team(self, team_id):
"""GET /central/gam/team/{team_id}
:param int team_id: path parameter
:return: Response object
:rtype: requests.Response
"""
endpoint = GAMEndpoint.TEAM_ID.value.format(team_id=team_id)
return self._get(url=self._build_url(endpoint))
def delete_gam_team(self, team_id):
"""DELETE /central/gam/team/{team_id}
:param int team_id: path parameter
:return: Response object
:rtype: requests.Response
"""
endpoint = GAMEndpoint.TEAM_ID.value.format(team_id=team_id)
return self._delete(url=self._build_url(endpoint))
def get_gam_user(self, user_id):
"""GET /central/gam/user/{user_id}
:param int user_id: path parameter
:return: Response object
:rtype: requests.Response
"""
endpoint = GAMEndpoint.USER_ID.value.format(user_id=user_id)
return self._get(url=self._build_url(endpoint))
```
#### File: clients/hq/hq_client.py
```python
from q2_api_client.clients.hq.back_office_client import BackOfficeClient
from q2_api_client.clients.hq.core_client import CoreClient
from q2_api_client.clients.hq.front_end_client import FrontEndClient
from q2_api_client.clients.hq.token_client import TokenClient
class HQClient:
def __init__(self, **kwargs):
self._back_office = BackOfficeClient(**kwargs)
kwargs['q2token'] = self._back_office.get_header('q2token')
self._core = CoreClient(**kwargs)
self._front_end = FrontEndClient(**kwargs)
self._token = TokenClient(**kwargs)
@property
def back_office(self):
return self._back_office
@property
def core(self):
return self._core
@property
def front_end(self):
return self._front_end
@property
def token(self):
return self._token
```
#### File: clients/hq/token_client.py
```python
from q2_api_client.clients.base_q2_client import BaseQ2Client
from q2_api_client.endpoints.hq_endpoints import TokenEndpoint
class TokenClient(BaseQ2Client):
def get_token_info(self):
"""GET /hq/tokeninfo
:return: Response object
:rtype: requests.Response
"""
endpoint = TokenEndpoint.TOKEN_INFO.value
return self._get(url=self._build_url(endpoint))
```
#### File: clients/mobile_ws/account_client.py
```python
from q2_api_client.clients.base_q2_client import BaseQ2Client
from q2_api_client.endpoints.mobile_ws_endpoints import AccountEndpoint
class AccountClient(BaseQ2Client):
def get_accounts(self):
"""GET /mobilews/account
:return: Response object
:rtype: requests.Response
"""
endpoint = AccountEndpoint.ACCOUNT.value
return self._get(url=self._build_url(endpoint))
def set_pfm_enrollment_to_true(self):
"""GET /mobilews/account/Pfm/setEnrolledToTrue
:return: Response object
:rtype: requests.Response
"""
endpoint = AccountEndpoint.ACCOUNT_SET_ENROLLED.value
return self._get(url=self._build_url(endpoint))
def get_account_groups(self):
"""GET /mobilews/account/group
:return: Response object
:rtype: requests.Response
"""
endpoint = AccountEndpoint.ACCOUNT_GROUP.value
return self._get(url=self._build_url(endpoint))
def create_account_group(self, request_body):
"""POST /mobilews/account/group
:param dict request_body: Dictionary object to send in the body of the request
:return: Response object
:rtype: requests.Response
"""
endpoint = AccountEndpoint.ACCOUNT_GROUP.value
return self._post(url=self._build_url(endpoint), json=request_body)
def delete_account_group(self, group_id):
"""DELETE /mobilews/account/group/{id}
:param str group_id: path parameter
:return: Response object
:rtype: requests.Response
"""
endpoint = AccountEndpoint.ACCOUNT_GROUP_ID.value.format(id=group_id)
return self._delete(url=self._build_url(endpoint))
def update_account_group(self, group_id, request_body):
"""PUT /mobilews/account/group/{id}
:param str group_id: path parameter
:param dict request_body: Dictionary object to send in the body of the request
:return: Response object
:rtype: requests.Response
"""
endpoint = AccountEndpoint.ACCOUNT_GROUP_ID.value.format(id=group_id)
return self._put(url=self._build_url(endpoint), json=request_body)
def get_account_labels(self):
"""GET /mobilews/account/label
:return: Response object
:rtype: requests.Response
"""
endpoint = AccountEndpoint.ACCOUNT_LABEL.value
return self._get(url=self._build_url(endpoint))
def create_account_label(self, request_body):
"""POST /mobilews/account/label
:param dict request_body: Dictionary object to send in the body of the request
:return: Response object
:rtype: requests.Response
"""
endpoint = AccountEndpoint.ACCOUNT_LABEL.value
return self._post(url=self._build_url(endpoint), json=request_body)
def delete_account_label(self, label_id):
"""DELETE /mobilews/account/label/{id}
:param str label_id: path parameter
:return: Response object
:rtype: requests.Response
"""
endpoint = AccountEndpoint.ACCOUNT_LABEL_ID.value.format(id=label_id)
return self._delete(url=self._build_url(endpoint))
def update_account_label(self, label_id, request_body):
"""PUT /mobilews/account/label/{id}
:param str label_id: path parameter
:param dict request_body: Dictionary object to send in the body of the request
:return: Response object
:rtype: requests.Response
"""
endpoint = AccountEndpoint.ACCOUNT_LABEL_ID.value.format(id=label_id)
return self._put(url=self._build_url(endpoint), json=request_body)
def get_inquiry_link_history_template(self, account_id):
"""GET /mobilews/account/{id}/inquiryLink
:param str account_id: path parameter
:return: Response object
:rtype: requests.Response
"""
endpoint = AccountEndpoint.ACCOUNT_INQUIRY_LINK.value.format(id=account_id)
return self._get(url=self._build_url(endpoint))
def create_inquiry_link_history_template(self, account_id, request_body):
"""POST /mobilews/account/{id}/inquiryLink
:param str account_id: path parameter
:param dict request_body: Dictionary object to send in the body of the request
:return: Response object
:rtype: requests.Response
"""
endpoint = AccountEndpoint.ACCOUNT_INQUIRY_LINK.value.format(id=account_id)
return self._post(url=self._build_url(endpoint), json=request_body)
def get_account(self, account_id):
"""GET /mobilews/account/{id}
:param str account_id: path parameter
:return: Response object
:rtype: requests.Response
"""
endpoint = AccountEndpoint.ACCOUNT_ID.value.format(id=account_id)
return self._get(url=self._build_url(endpoint))
def create_account(self, account_id, request_body):
"""POST /mobilews/account/{id}
:param str account_id: path parameter
:param dict request_body: Dictionary object to send in the body of the request
:return: Response object
:rtype: requests.Response
"""
endpoint = AccountEndpoint.ACCOUNT_ID.value.format(id=account_id)
return self._post(url=self._build_url(endpoint), json=request_body)
def delete_account(self, account_id):
"""DELETE /mobilews/account/{id}
:param str account_id: path parameter
:return: Response object
:rtype: requests.Response
"""
endpoint = AccountEndpoint.ACCOUNT_ID.value.format(id=account_id)
return self._delete(url=self._build_url(endpoint))
def update_account(self, account_id, request_body):
"""PUT /mobilews/account/{id}
:param str account_id: path parameter
:param dict request_body: Dictionary object to send in the body of the request
:return: Response object
:rtype: requests.Response
"""
endpoint = AccountEndpoint.ACCOUNT_ID.value.format(id=account_id)
return self._put(url=self._build_url(endpoint), json=request_body)
def get_account_detail_click_value(self, account_id, hade_id):
"""GET /mobilews/account/{id}/{hadeId}
:param str account_id: path parameter
:param str hade_id: path parameter
:return: Response object
:rtype: requests.Response
"""
endpoint = AccountEndpoint.ACCOUNT_HADE_ID.value.format(id=account_id, hadeId=hade_id)
return self._get(url=self._build_url(endpoint))
```
#### File: clients/mobile_ws/audit_client.py
```python
from q2_api_client.clients.base_q2_client import BaseQ2Client
from q2_api_client.endpoints.mobile_ws_endpoints import AuditEndpoint
class AuditClient(BaseQ2Client):
def get_audit(self):
"""GET /mobilews/audit
:return: Response object
:rtype: requests.Response
"""
endpoint = AuditEndpoint.AUDIT.value
return self._get(url=self._build_url(endpoint))
def create_audit(self, request_body):
"""POST /mobilews/audit
:param dict request_body: Dictionary object to send in the body of the request
:return: Response object
:rtype: requests.Response
"""
endpoint = AuditEndpoint.AUDIT.value
return self._post(url=self._build_url(endpoint), json=request_body)
```
#### File: clients/mobile_ws/calendar_client.py
```python
from q2_api_client.clients.base_q2_client import BaseQ2Client
from q2_api_client.endpoints.mobile_ws_endpoints import CalendarEndpoint
class CalendarClient(BaseQ2Client):
def get_calendar(self):
"""GET /mobilews/calendar
:return: Response object
:rtype: requests.Response
"""
endpoint = CalendarEndpoint.CALENDAR.value
return self._get(url=self._build_url(endpoint))
def get_calendar_by_type(self, transaction_type):
"""GET /mobilews/calendar/{transactionType}
:param str transaction_type: path parameter
:return: Response object
:rtype: requests.Response
"""
endpoint = CalendarEndpoint.CALENDAR_TRANSACTION_TYPE.value.format(transactionType=transaction_type)
return self._get(url=self._build_url(endpoint))
```
#### File: clients/mobile_ws/config_client.py
```python
from q2_api_client.clients.base_q2_client import BaseQ2Client
from q2_api_client.endpoints.mobile_ws_endpoints import ConfigEndpoint
class ConfigClient(BaseQ2Client):
def get_uux_configuration(self):
"""GET /mobilews/uuxConfiguration
:return: Response object
:rtype: requests.Response
"""
endpoint = ConfigEndpoint.UUX_CONFIGURATION.value
return self._get(url=self._build_url(endpoint))
def delete_uux_configuration(self):
"""DELETE /mobilews/uuxConfiguration
:return: Response object
:rtype: requests.Response
"""
endpoint = ConfigEndpoint.UUX_CONFIGURATION.value
return self._delete(url=self._build_url(endpoint))
def create_uux_configuration(self, request_body):
"""POST /mobilews/uuxConfiguration
:param dict request_body: Dictionary object to send in the body of the request
:return: Response object
:rtype: requests.Response
"""
endpoint = ConfigEndpoint.UUX_CONFIGURATION.value
return self._post(url=self._build_url(endpoint), json=request_body)
def update_uux_configuration(self, request_body):
"""PUT /mobilews/uuxConfiguration
:param dict request_body: Dictionary object to send in the body of the request
:return: Response object
:rtype: requests.Response
"""
endpoint = ConfigEndpoint.UUX_CONFIGURATION.value
return self._put(url=self._build_url(endpoint), json=request_body)
```
#### File: clients/mobile_ws/login_client.py
```python
from q2_api_client.clients.base_q2_client import BaseQ2Client
from q2_api_client.endpoints.mobile_ws_endpoints import LoginEndpoint
class LoginClient(BaseQ2Client):
def login_csr(self, request_body):
"""POST /mobilews/logincsr
:param dict request_body: Dictionary object to send in the body of the request
:return: Response object
:rtype: requests.Response
"""
endpoint = LoginEndpoint.LOGIN_CSR.value
return self._post(url=self._build_url(endpoint), json=request_body)
def login_user(self, request_body):
"""POST /mobilews/logonUser
:param dict request_body: Dictionary object to send in the body of the request
:return: Response object
:rtype: requests.Response
"""
endpoint = LoginEndpoint.LOGON_USER.value
return self._post(url=self._build_url(endpoint), json=request_body)
def logout_user(self):
"""GET /mobilews/logoffUser
:return: Response object
:rtype: requests.Response
"""
endpoint = LoginEndpoint.LOGOFF_USER.value
return self._get(url=self._build_url(endpoint))
```
#### File: clients/mobile_ws/marketing_message_client.py
```python
from q2_api_client.clients.base_q2_client import BaseQ2Client
from q2_api_client.endpoints.mobile_ws_endpoints import MarketingMessageEndpoint
class MarketingMessageClient(BaseQ2Client):
def get_marketing_message(self, page_name):
"""GET /mobilews/marketingMessage/{pageNameAndSize}
:param str page_name: path parameter
:return: Response object
:rtype: requests.Response
"""
endpoint = MarketingMessageEndpoint.MARKETING_MESSAGE.value.format(pageNameAndSize=page_name)
return self._get(url=self._build_url(endpoint))
def create_marketing_message_unauth(self, request_body):
"""POST /mobilews/marketingMessageUnauth
:param dict request_body: Dictionary object to send in the body of the request
:return: Response object
:rtype: requests.Response
"""
endpoint = MarketingMessageEndpoint.MARKETING_MESSAGE_UNAUTH.value
return self._post(url=self._build_url(endpoint), json=request_body)
```
#### File: clients/mobile_ws/message_client.py
```python
from q2_api_client.clients.base_q2_client import BaseQ2Client
from q2_api_client.endpoints.mobile_ws_endpoints import MessageEndpoint
class MessageClient(BaseQ2Client):
def get_messages(self):
"""GET /mobilews/message
:return: Response object
:rtype: requests.Response
"""
endpoint = MessageEndpoint.MESSAGE.value
return self._get(url=self._build_url(endpoint))
def send_message(self, request_body):
"""POST /mobilews/message
:param dict request_body: Dictionary object to send in the body of the request
:return: Response object
:rtype: requests.Response
"""
endpoint = MessageEndpoint.MESSAGE.value
return self._post(url=self._build_url(endpoint), json=request_body)
def mark_message_as_read(self, request_body):
"""PUT /mobilews/message
:param dict request_body: Dictionary object to send in the body of the request
:return: Response object
:rtype: requests.Response
"""
endpoint = MessageEndpoint.MESSAGE.value
return self._put(url=self._build_url(endpoint), json=request_body)
def get_message_attachments(self):
"""GET /mobilews/message/attachment
:return: Response object
:rtype: requests.Response
"""
endpoint = MessageEndpoint.MESSAGE_ATTACHMENT.value
return self._get(url=self._build_url(endpoint))
def get_message_attachment(self, attachment_id):
"""GET /mobilews/message/attachment/{id}
:param str attachment_id: path parameter
:return: Response object
:rtype: requests.Response
"""
endpoint = MessageEndpoint.MESSAGE_ATTACHMENT_ID.value.format(id=attachment_id)
return self._get(url=self._build_url(endpoint))
def get_message_count(self):
"""GET /mobilews/message/count
:return: Response object
:rtype: requests.Response
"""
endpoint = MessageEndpoint.MESSAGE_COUNT.value
return self._get(url=self._build_url(endpoint))
def delete_messages(self, request_body):
"""PUT /mobilews/message/delete
:param dict request_body: Dictionary object to send in the body of the request
:return: Response object
:rtype: requests.Response
"""
endpoint = MessageEndpoint.MESSAGE_DELETE.value
return self._put(url=self._build_url(endpoint), json=request_body)
def remove_message_expiration(self, request_body):
"""PUT /mobilews/message/expire
:param dict request_body: Dictionary object to send in the body of the request
:return: Response object
:rtype: requests.Response
"""
endpoint = MessageEndpoint.MESSAGE_EXPIRE.value
return self._put(url=self._build_url(endpoint), json=request_body)
def get_message_recipients(self):
"""GET /mobilews/message/messageRecipient
:return: Response object
:rtype: requests.Response
"""
endpoint = MessageEndpoint.MESSAGE_RECIPIENT.value
return self._get(url=self._build_url(endpoint))
def get_message(self, message_id):
"""GET /mobilews/message/{id}
:param str message_id: path parameter
:return: Response object
:rtype: requests.Response
"""
endpoint = MessageEndpoint.MESSAGE_ID.value.format(id=message_id)
return self._get(url=self._build_url(endpoint))
def delete_message(self, message_id):
"""DELETE /mobilews/message/{id}
:param str message_id: path parameter
:return: Response object
:rtype: requests.Response
"""
endpoint = MessageEndpoint.MESSAGE_ID.value.format(id=message_id)
return self._delete(url=self._build_url(endpoint))
def send_reply_message(self, message_id, request_body):
"""POST /mobilews/message/{id}
:param str message_id: path parameter
:param dict request_body: Dictionary object to send in the body of the request
:return: Response object
:rtype: requests.Response
"""
endpoint = MessageEndpoint.MESSAGE_ID.value.format(id=message_id)
return self._post(url=self._build_url(endpoint), json=request_body)
```
#### File: clients/mobile_ws/pdf_client.py
```python
from q2_api_client.clients.base_q2_client import BaseQ2Client
from q2_api_client.endpoints.mobile_ws_endpoints import PDFEndpoint
class PDFClient(BaseQ2Client):
def get_pdf_acceptance_code(self):
"""GET /mobilews/pdf
:return: Response object
:rtype: requests.Response
"""
endpoint = PDFEndpoint.PDF.value
return self._get(url=self._build_url(endpoint))
def verify_pdf_acceptance_code(self, request_body):
"""PUT /mobilews/pdf/validate
:param dict request_body: Dictionary object to send in the body of the request
:return: Response object
:rtype: requests.Response
"""
endpoint = PDFEndpoint.PDF_VALIDATE.value
return self._put(url=self._build_url(endpoint), json=request_body)
```
#### File: clients/mobile_ws/pre_logon_client.py
```python
from q2_api_client.clients.base_q2_client import BaseQ2Client
from q2_api_client.endpoints.mobile_ws_endpoints import PreLogonEndpoint
class PreLogonClient(BaseQ2Client):
def pre_logon_user(self):
"""GET /mobilews/preLogonUser
:return: Response object
:rtype: requests.Response
"""
endpoint = PreLogonEndpoint.PRE_LOGON_USER.value
return self._get(url=self._build_url(endpoint))
def trigger_pre_logon_user(self, request_body):
"""POST /mobilews/preLogonUser
:param dict request_body: Dictionary object to send in the body of the request
:return: Response object
:rtype: requests.Response
"""
endpoint = PreLogonEndpoint.PRE_LOGON_USER.value
return self._post(url=self._build_url(endpoint), json=request_body)
def pre_logon_user_clear_cache(self):
"""GET /mobilews/preLogonUser/clearCache
:return: Response object
:rtype: requests.Response
"""
endpoint = PreLogonEndpoint.PRE_LOGON_USER_CLEAR_CACHE.value
return self._get(url=self._build_url(endpoint))
```
#### File: clients/mobile_ws/push_client.py
```python
from q2_api_client.clients.base_q2_client import BaseQ2Client
from q2_api_client.endpoints.mobile_ws_endpoints import PushEndpoint
class PushClient(BaseQ2Client):
def get_push_notification_targets(self):
"""GET /mobilews/push
:return: Response object
:rtype: requests.Response
"""
endpoint = PushEndpoint.PUSH.value
return self._get(url=self._build_url(endpoint))
def create_push_notification_target(self, request_body):
"""PUSH /mobilews/push
:param dict request_body: Dictionary object to send in the body of the request
:return: Response object
:rtype: requests.Response
"""
endpoint = PushEndpoint.PUSH.value
return self._post(url=self._build_url(endpoint), json=request_body)
def delete_push_notification_target(self, q2_token):
"""DELETE /mobilews/push/{qtwoToken}
:param str q2_token: path parameter
:return: Response object
:rtype: requests.Response
"""
endpoint = PushEndpoint.PUSH_TOKEN.value.format(qtwoToken=q2_token)
return self._delete(url=self._build_url(endpoint))
def update_push_notification_target(self, q2_token, request_body):
"""PUT /mobilews/push/{qtwoToken}
:param str q2_token: path parameter
:param dict request_body: Dictionary object to send in the body of the request
:return: Response object
:rtype: requests.Response
"""
endpoint = PushEndpoint.PUSH_TOKEN.value.format(qtwoToken=q2_token)
return self._put(url=self._build_url(endpoint), json=request_body)
```
#### File: clients/mobile_ws/rate_app_client.py
```python
from q2_api_client.clients.base_q2_client import BaseQ2Client
from q2_api_client.endpoints.mobile_ws_endpoints import RateAppEndpoint
class RateAppClient(BaseQ2Client):
def decline_rate_app(self):
"""PUT /mobilews/rateApp/decline
:return: Response object
:rtype: requests.Response
"""
endpoint = RateAppEndpoint.RATE_DECLINE.value
return self._put(url=self._build_url(endpoint))
def rate_app(self):
"""PUT /mobilews/rateApp/rate
:return: Response object
:rtype: requests.Response
"""
endpoint = RateAppEndpoint.RATE.value
return self._put(url=self._build_url(endpoint))
```
#### File: clients/mobile_ws/report_client.py
```python
from q2_api_client.clients.base_q2_client import BaseQ2Client
from q2_api_client.endpoints.mobile_ws_endpoints import ReportEndpoint
class ReportClient(BaseQ2Client):
def get_latest_report(self):
"""GET /mobilews/report/latest
:return: Response object
:rtype: requests.Response
"""
endpoint = ReportEndpoint.REPORT_LATEST.value
return self._get(url=self._build_url(endpoint))
def get_report_plans(self):
"""GET /mobilews/report/plan
:return: Response object
:rtype: requests.Response
"""
endpoint = ReportEndpoint.REPORT_PLAN.value
return self._get(url=self._build_url(endpoint))
def create_report_plan(self, request_body):
"""POST /mobilews/report/plan
:param dict request_body: Dictionary object to send in the body of the request
:return: Response object
:rtype: requests.Response
"""
endpoint = ReportEndpoint.REPORT_PLAN.value
return self._post(url=self._build_url(endpoint), json=request_body)
def get_report_plan(self, plan_id):
"""GET /mobilews/report/plan/{id}
:param str plan_id: path parameter
:return: Response object
:rtype: requests.Response
"""
endpoint = ReportEndpoint.REPORT_PLAN_ID.value.format(id=plan_id)
return self._get(url=self._build_url(endpoint))
def delete_report_plan(self, plan_id):
"""DELETE /mobilews/report/plan/{id}
:param str plan_id: path parameter
:return: Response object
:rtype: requests.Response
"""
endpoint = ReportEndpoint.REPORT_PLAN_ID.value.format(id=plan_id)
return self._delete(url=self._build_url(endpoint))
def update_report_plan(self, plan_id, request_body):
"""PUT /mobilews/report/plan/{id}
:param str plan_id: path parameter
:param dict request_body: Dictionary object to send in the body of the request
:return: Response object
:rtype: requests.Response
"""
endpoint = ReportEndpoint.REPORT_PLAN_ID.value.format(id=plan_id)
return self._put(url=self._build_url(endpoint), json=request_body)
def execute_report_plan(self, plan_id):
"""GET /mobilews/report/plan/{id}/run
:param str plan_id: path parameter
:return: Response object
:rtype: requests.Response
"""
endpoint = ReportEndpoint.REPORT_PLAN_ID_RUN.value.format(id=plan_id)
return self._get(url=self._build_url(endpoint))
def get_reports_run(self):
"""GET /mobilews/report/report
:return: Response object
:rtype: requests.Response
"""
endpoint = ReportEndpoint.REPORT.value
return self._get(url=self._build_url(endpoint))
def get_report_output_list(self, report_id):
"""GET /mobilews/report/report/{id}
:param str report_id: path parameter
:return: Response object
:rtype: requests.Response
"""
endpoint = ReportEndpoint.REPORT_ID.value.format(id=report_id)
return self._get(url=self._build_url(endpoint))
def get_report_detail(self, report_id, detail_id):
"""GET /mobilews/report/report/{id}/{detailId}
:param str report_id: path parameter
:param str detail_id: path parameter
:return: Response object
:rtype: requests.Response
"""
endpoint = ReportEndpoint.REPORT_DETAIL_ID.value.format(id=report_id, detailId=detail_id)
return self._get(url=self._build_url(endpoint))
def get_report_templates(self):
"""GET /mobilews/report/template
:return: Response object
:rtype: requests.Response
"""
endpoint = ReportEndpoint.REPORT_TEMPLATE.value
return self._get(url=self._build_url(endpoint))
def get_report_template(self, template_id):
"""GET /mobilews/report/template/{id}
:param str template_id: path parameter
:return: Response object
:rtype: requests.Response
"""
endpoint = ReportEndpoint.REPORT_TEMPLATE_ID.value.format(id=template_id)
return self._get(url=self._build_url(endpoint))
```
#### File: clients/mobile_ws/security_client.py
```python
from q2_api_client.clients.base_q2_client import BaseQ2Client
from q2_api_client.endpoints.mobile_ws_endpoints import SecurityEndpoint
class SecurityClient(BaseQ2Client):
def change_password(self, request_body):
"""POST /mobilews/changePassword
:param dict request_body: Dictionary object to send in the body of the request
:return: Response object
:rtype: requests.Response
"""
endpoint = SecurityEndpoint.CHANGE_PASSWORD.value
return self._post(url=self._build_url(endpoint), json=request_body)
def validate_password(self, request_body):
"""POST /mobilews/validatePassword
:param dict request_body: Dictionary object to send in the body of the request
:return: Response object
:rtype: requests.Response
"""
endpoint = SecurityEndpoint.VALIDATE_PASSWORD.value
return self._post(url=self._build_url(endpoint), json=request_body)
```
#### File: clients/mobile_ws/user_client.py
```python
from q2_api_client.clients.base_q2_client import BaseQ2Client
from q2_api_client.endpoints.mobile_ws_endpoints import UserEndpoint
class UserClient(BaseQ2Client):
def get_users(self):
"""GET /mobilews/user
:return: Response object
:rtype: requests.Response
"""
endpoint = UserEndpoint.USER.value
return self._get(url=self._build_url(endpoint))
def create_user(self, request_body):
"""POST /mobilews/user
:param dict request_body: Dictionary object to send in the body of the request
:return: Response object
:rtype: requests.Response
"""
endpoint = UserEndpoint.USER.value
return self._post(url=self._build_url(endpoint), json=request_body)
def get_user(self, user_id):
"""GET /mobilews/user/{id}
:param str user_id: path parameter
:return: Response object
:rtype: requests.Response
"""
endpoint = UserEndpoint.USER_ID.value.format(id=user_id)
return self._get(url=self._build_url(endpoint))
def delete_user(self, user_id):
"""DELETE /mobilews/user/{id}
:param str user_id: path parameter
:return: Response object
:rtype: requests.Response
"""
endpoint = UserEndpoint.USER_ID.value.format(id=user_id)
return self._delete(url=self._build_url(endpoint))
def update_user(self, user_id, request_body):
"""PUT /mobilews/user/{id}
:param str user_id: path parameter
:param dict request_body: Dictionary object to send in the body of the request
:return: Response object
:rtype: requests.Response
"""
endpoint = UserEndpoint.USER_ID.value.format(id=user_id)
return self._put(url=self._build_url(endpoint), json=request_body)
def update_user_role(self, user_id, request_body):
"""PUT /mobilews/user/role/{id}
:param str user_id: path parameter
:param dict request_body: Dictionary object to send in the body of the request
:return: Response object
:rtype: requests.Response
"""
endpoint = UserEndpoint.USER_ROLE_ID.value.format(id=user_id)
return self._put(url=self._build_url(endpoint), json=request_body)
def update_user_role_decision(self, user_id, request_body):
"""PUT /mobilews/user/role/{id}/decide
:param str user_id: path parameter
:param dict request_body: Dictionary object to send in the body of the request
:return: Response object
:rtype: requests.Response
"""
endpoint = UserEndpoint.USER_ROLE_DECIDE.value.format(id=user_id)
return self._put(url=self._build_url(endpoint), json=request_body)
def update_user_status(self, user_id, request_body):
"""PUT /mobilews/user/status/{id}
:param str user_id: path parameter
:param dict request_body: Dictionary object to send in the body of the request
:return: Response object
:rtype: requests.Response
"""
endpoint = UserEndpoint.USER_STATUS_ID.value.format(id=user_id)
return self._put(url=self._build_url(endpoint), json=request_body)
def update_user_status_decision(self, user_id, request_body):
"""PUT /mobilews/user/status/{id}/decide
:param str user_id: path parameter
:param dict request_body: Dictionary object to send in the body of the request
:return: Response object
:rtype: requests.Response
"""
endpoint = UserEndpoint.USER_STATUS_DECIDE.value.format(id=user_id)
return self._put(url=self._build_url(endpoint), json=request_body)
def get_user_profiles(self):
"""GET /mobilews/userProfile
:return: Response object
:rtype: requests.Response
"""
endpoint = UserEndpoint.USER_PROFILE.value
return self._get(url=self._build_url(endpoint))
def update_user_profiles(self, request_body):
"""PUT /mobilews/userProfile
:param dict request_body: Dictionary object to send in the body of the request
:return: Response object
:rtype: requests.Response
"""
endpoint = UserEndpoint.USER_PROFILE.value
return self._put(url=self._build_url(endpoint), json=request_body)
def update_user_profile_decision(self, user_id, request_body):
"""PUT /mobilews/userProfile/decide/{id}
:param str user_id: path parameter
:param dict request_body: Dictionary object to send in the body of the request
:return: Response object
:rtype: requests.Response
"""
endpoint = UserEndpoint.USER_PROFILE_DECIDE_ID.value.format(id=user_id)
return self._put(url=self._build_url(endpoint), json=request_body)
def get_user_profile_form(self):
"""GET /mobilews/userProfile/form
:return: Response object
:rtype: requests.Response
"""
endpoint = UserEndpoint.USER_PROFILE_FORM.value
return self._get(url=self._build_url(endpoint))
def get_user_profile(self, user_id):
"""GET /mobilews/userProfile/{id}
:param str user_id: path parameter
:return: Response object
:rtype: requests.Response
"""
endpoint = UserEndpoint.USER_PROFILE_ID.value.format(id=user_id)
return self._get(url=self._build_url(endpoint))
def update_user_profile(self, user_id, request_body):
"""PUT /mobilews/userProfile/{id}
:param str user_id: path parameter
:param dict request_body: Dictionary object to send in the body of the request
:return: Response object
:rtype: requests.Response
"""
endpoint = UserEndpoint.USER_PROFILE_ID.value.format(id=user_id)
return self._put(url=self._build_url(endpoint), json=request_body)
def create_user_agent(self, request_body):
"""POST /mobilews/useragent
:param dict request_body: Dictionary object to send in the body of the request
:return: Response object
:rtype: requests.Response
"""
endpoint = UserEndpoint.USER_AGENT.value
return self._post(url=self._build_url(endpoint), json=request_body)
```
#### File: clients/mobile_ws/v2_client.py
```python
from q2_api_client.clients.base_q2_client import BaseQ2Client
from q2_api_client.endpoints.mobile_ws_endpoints import V2Endpoint
class V2Client(BaseQ2Client):
def get_commercial_tax_payments(self):
"""GET /mobilews/v2/commercial/taxpayment
:return: Response object
:rtype: requests.Response
"""
endpoint = V2Endpoint.COMMERCIAL_TAX_PAYMENT.value
return self._get(url=self._build_url(endpoint))
def get_commercial_tax_payment(self, tax_payment_id):
"""GET /mobilews/v2/commercial/taxpayment/{id}
:param str tax_payment_id: path parameter
:return: Response object
:rtype: requests.Response
"""
endpoint = V2Endpoint.COMMERCIAL_TAX_PAYMENT_ID.value.format(id=tax_payment_id)
return self._get(url=self._build_url(endpoint))
```
#### File: clients/refresh_cache/refresh_cache_client.py
```python
from q2_api_client.clients.base_q2_client import BaseQ2Client
from q2_api_client.endpoints.refresh_cache_endpoints import RefreshCacheEndpoint
class RefreshCacheClient(BaseQ2Client):
def refresh_cache(self):
"""POST /refreshCache
:return: Response object
:rtype: requests.Response
"""
endpoint = RefreshCacheEndpoint.REFRESH_CACHE.value
return self._post(url=self._build_url(endpoint))
```
#### File: clients/v2/pfm_client.py
```python
from q2_api_client.clients.base_q2_client import BaseQ2Client
from q2_api_client.endpoints.v2_endpoints import PFMEndpoint
class PFMClient(BaseQ2Client):
def get_account(self, account_guid, member_guid=None):
"""GET /v2/pfm/accounts/{accountGuid}
:param str account_guid: path parameter
:param str member_guid: query parameter
:return: Response object
:rtype: requests.Response
"""
endpoint = PFMEndpoint.ACCOUNT_GUID.value.format(accountGuid=account_guid)
query_parameters = self._copy_query_parameters()
query_parameters['member_guid'] = member_guid
return self._get(url=self._build_url(endpoint), query_parameters=query_parameters)
def update_account(self, account_guid, request_body):
"""PUT /v2/pfm/accounts/{accountGuid}
:param str account_guid: path parameter
:param dict request_body: Dictionary object to send in the body of the request
:return: Response object
:rtype: requests.Response
"""
endpoint = PFMEndpoint.ACCOUNT_GUID.value.format(accountGuid=account_guid)
return self._put(url=self._build_url(endpoint), json=request_body)
def get_categories(self):
"""GET /v2/pfm/categories
:return: Response object
:rtype: requests.Response
"""
endpoint = PFMEndpoint.CATEGORIES.value
return self._get(url=self._build_url(endpoint))
def create_category(self, request_body):
"""POST /v2/pfm/categories
:param dict request_body: Dictionary object to send in the body of the request
:return: Response object
:rtype: requests.Response
"""
endpoint = PFMEndpoint.CATEGORIES.value
return self._post(url=self._build_url(endpoint), json=request_body)
def delete_category(self, category_guid):
"""DELETE /v2/pfm/categories/{categoryGuid}
:param str category_guid: path parameter
:return: Response object
:rtype: requests.Response
"""
endpoint = PFMEndpoint.CATEGORY_GUID.value.format(categoryGuid=category_guid)
return self._delete(url=self._build_url(endpoint))
def update_category(self, category_guid, request_body):
"""PUT /v2/pfm/categories/{categoryGuid}
:param str category_guid: path parameter
:param dict request_body: Dictionary object to send in the body of the request
:return: Response object
:rtype: requests.Response
"""
endpoint = PFMEndpoint.CATEGORY_GUID.value.format(categoryGuid=category_guid)
return self._put(url=self._build_url(endpoint), json=request_body)
def get_institutions(self, name=None, count=None):
"""GET /v2/pfm/institutions
:param str name: query parameter (string to search the institution names by)
:param int count: query parameter (number of results to return)
:return: Response object
:rtype: requests.Response
"""
endpoint = PFMEndpoint.INSTITUTIONS.value
query_parameters = self._copy_query_parameters()
query_parameters['name'] = name
query_parameters['count'] = count
return self._get(url=self._build_url(endpoint), query_parameters=query_parameters)
def get_institution(self, institution_guid):
"""GET /v2/pfm/institutions/{institutionGuid}
:param str institution_guid: path parameter
:return: Response object
:rtype: requests.Response
"""
endpoint = PFMEndpoint.INSTITUTION_GUID.value.format(institutionGuid=institution_guid)
return self._get(url=self._build_url(endpoint))
def get_institution_credentials(self, institution_guid):
"""GET /v2/pfm/institutions/{institutionGuid}/credentials
:param str institution_guid: path parameter
:return: Response object
:rtype: requests.Response
"""
endpoint = PFMEndpoint.INSTITUTION_CREDENTIALS.value.format(institutionGuid=institution_guid)
return self._get(url=self._build_url(endpoint))
def get_job(self, job_guid):
"""GET /v2/pfm/jobs/{jobGuid}
:param str job_guid: path parameter
:return: Response object
:rtype: requests.Response
"""
endpoint = PFMEndpoint.JOB.value.format(jobGuid=job_guid)
return self._get(url=self._build_url(endpoint))
def get_job_mfa_credentials(self, job_guid):
"""GET /v2/pfm/jobs/{jobGuid}/mfa_credentials
:param str job_guid: path parameter
:return: Response object
:rtype: requests.Response
"""
endpoint = PFMEndpoint.JOB_MFA_CREDENTIALS.value.format(jobGuid=job_guid)
return self._get(url=self._build_url(endpoint))
def resume_job(self, job_guid):
"""POST /v2/pfm/jobs/{jobGuid}/resume
:param str job_guid: path parameter
:return: Response object
:rtype: requests.Response
"""
endpoint = PFMEndpoint.JOB_RESUME.value.format(jobGuid=job_guid)
return self._post(url=self._build_url(endpoint))
def get_member(self, member_guid):
"""GET /v2/pfm/members/{memberGuid}
:param str member_guid: path parameter
:return: Response object
:rtype: requests.Response
"""
endpoint = PFMEndpoint.MEMBER_GUID.value.format(memberGuid=member_guid)
return self._get(url=self._build_url(endpoint))
def create_member(self, request_body):
"""POST /v2/pfm/members/
:param dict request_body: Dictionary object to send in the body of the request
:return: Response object
:rtype: requests.Response
"""
endpoint = PFMEndpoint.MEMBERS.value
return self._post(url=self._build_url(endpoint), json=request_body)
def update_member(self, member_guid, request_body):
"""PUT /v2/pfm/members/{memberGuid}
:param str member_guid: path parameter
:param dict request_body: Dictionary object to send in the body of the request
:return: Response object
:rtype: requests.Response
"""
endpoint = PFMEndpoint.MEMBER_GUID.value.format(memberGuid=member_guid)
return self._put(url=self._build_url(endpoint), json=request_body)
def delete_member(self, member_guid):
"""DELETE /v2/pfm/members/{memberGuid}
:param str member_guid: path parameter
:return: Response object
:rtype: requests.Response
"""
endpoint = PFMEndpoint.MEMBER_GUID.value.format(memberGuid=member_guid)
return self._delete(url=self._build_url(endpoint))
def delete_all_members(self):
"""DELETE /v2/pfm/members/all
:return: Response object
:rtype: requests.Response
"""
endpoint = PFMEndpoint.ALL_MEMBERS.value
return self._delete(url=self._build_url(endpoint))
def create_member_credentials(self, member_guid, request_body):
"""POST /v2/pfm/members/{memberGuid}/credentials
:param str member_guid: path parameter
:param dict request_body: Dictionary object to send in the body of the request
:return: Response object
:rtype: requests.Response
"""
endpoint = PFMEndpoint.MEMBER_CREDENTIALS.value.format(memberGuid=member_guid)
return self._post(url=self._build_url(endpoint), json=request_body)
def refresh_member(self, member_guid):
"""POST /v2/pfm/members/{memberGuid}/refresh
:param str member_guid: path parameter
:return: Response object
:rtype: requests.Response
"""
endpoint = PFMEndpoint.MEMBER_REFRESH.value.format(memberGuid=member_guid)
return self._post(url=self._build_url(endpoint))
def update_transaction(self, transaction_guid, request_body):
"""PUT /v2/pfm/transactions/{transactionGuid}
:param str transaction_guid: path parameter
:param dict request_body: Dictionary object to send in the body of the request
:return: Response object
:rtype: requests.Response
"""
endpoint = PFMEndpoint.TRANSACTION_GUID.value.format(transactionGuid=transaction_guid)
return self._put(url=self._build_url(endpoint), json=request_body)
def update_split_transaction(self, transaction_guid, request_body):
"""PUT /v2/pfm/transactions/{transactionGuid}/splits
:param str transaction_guid: path parameter
:param dict request_body: Dictionary object to send in the body of the request
:return: Response object
:rtype: requests.Response
"""
endpoint = PFMEndpoint.TRANSACTION_SPLITS.value.format(transactionGuid=transaction_guid)
return self._put(url=self._build_url(endpoint), json=request_body)
def create_user(self):
"""POST /v2/pfm/users
:return: Response object
:rtype: requests.Response
"""
endpoint = PFMEndpoint.USERS.value
return self._post(url=self._build_url(endpoint))
def get_widget(self, widget_short_name, no_redirect=None, q2token=None):
"""GET /v2/pfm/widgets/{widgetShortName}
:param str widget_short_name: path parameter
:param bool no_redirect: query parameter
(Flag to return url data and not send a 302 redirect)
:param str q2token: query parameter
(Allow passing in q2token by query string for authentication)
:return: Response object
:rtype: requests.Response
"""
endpoint = PFMEndpoint.WIDGET.value.format(widgetShortName=widget_short_name)
query_parameters = self._copy_query_parameters()
query_parameters['no_redirect'] = no_redirect
query_parameters['q2token'] = q2token
return self._get(url=self._build_url(endpoint), query_parameters=query_parameters)
```
#### File: clients/v3/file_client.py
```python
from q2_api_client.clients.base_q2_client import BaseQ2Client
from q2_api_client.endpoints.v3_endpoints import FileEndpoint
class FileClient(BaseQ2Client):
def retrieve_file(self, file_id):
"""POST /v3/file/{id}/retrieve
:param str file_id: path parameter
:return: Response object
:rtype: requests.Response
"""
endpoint = FileEndpoint.RETRIEVE_FILE.value.format(id=file_id)
return self._post(url=self._build_url(endpoint))
```
#### File: clients/v3/funds_transfer_client.py
```python
from q2_api_client.clients.base_q2_client import BaseQ2Client
from q2_api_client.endpoints.v3_endpoints import FundsTransferEndpoint
class FundsTransferClient(BaseQ2Client):
def get_funds_transfer_permission(self):
"""GET /v3/fundsTransfer/permission
:return: Response object
:rtype: requests.Response
"""
endpoint = FundsTransferEndpoint.FUNDS_TRANSFER_PERMISSION.value
return self._get(url=self._build_url(endpoint))
def create_funds_transfer(self, request_body):
"""POST /v3/fundsTransfer/create
:param dict request_body: Dictionary object to send in the body of the request
:return: Response object
:rtype: requests.Response
"""
endpoint = FundsTransferEndpoint.CREATE_FUNDS_TRANSFER.value
return self._post(url=self._build_url(endpoint), json=request_body)
def retrieve_funds_transfers(self, request_body):
"""POST /v3/fundsTransfer/retrieve
:param dict request_body: Dictionary object to send in the body of the request
:return: Response object
:rtype: requests.Response
"""
endpoint = FundsTransferEndpoint.RETRIEVE_FUNDS_TRANSFERS.value
return self._post(url=self._build_url(endpoint), json=request_body)
def retrieve_funds_transfer(self, transfer_id):
"""POST /v3/fundsTransfer/{id}/retrieve
:param str transfer_id: path parameter
:return: Response object
:rtype: requests.Response
"""
endpoint = FundsTransferEndpoint.RETRIEVE_FUNDS_TRANSFER.value.format(id=transfer_id)
return self._post(url=self._build_url(endpoint))
def cancel_funds_transfer(self, transfer_id):
"""POST /v3/fundsTransfer/{id}/cancel
:param str transfer_id: path parameter
:return: Response object
:rtype: requests.Response
"""
endpoint = FundsTransferEndpoint.CANCEL_FUNDS_TRANSFER.value.format(id=transfer_id)
return self._post(url=self._build_url(endpoint))
def authorize_funds_transfer(self, transfer_id):
"""POST /v3/fundsTransfer/{id}/authorize
:param str transfer_id: path parameter
:return: Response object
:rtype: requests.Response
"""
endpoint = FundsTransferEndpoint.AUTHORIZE_FUNDS_TRANSFER.value.format(id=transfer_id)
return self._post(url=self._build_url(endpoint))
```
#### File: clients/v3/pfm_account_client.py
```python
from q2_api_client.clients.base_q2_client import BaseQ2Client
from q2_api_client.endpoints.v3_endpoints import PFMAccountEndpoint
class PFMAccountClient(BaseQ2Client):
def get_pfm_account_permission(self):
"""GET /v3/pfmAccount/permission
:return: Response object
:rtype: requests.Response
"""
endpoint = PFMAccountEndpoint.PFM_ACCOUNT_PERMISSION.value
return self._get(url=self._build_url(endpoint))
def retrieve_pfm_accounts(self, request_body):
"""POST /v3/pfmAccount/retrieve
:param dict request_body: Dictionary object to send in the body of the request
:return: Response object
:rtype: requests.Response
"""
endpoint = PFMAccountEndpoint.RETRIEVE_PFM_ACCOUNTS.value
return self._post(url=self._build_url(endpoint), json=request_body)
def retrieve_pfm_account(self, pfm_account_id, request_body):
"""POST /v3/pfmAccount/{id}/retrieve
:param dict request_body: Dictionary object to send in the body of the request
:param str pfm_account_id: path parameter
:return: Response object
:rtype: requests.Response
"""
endpoint = PFMAccountEndpoint.RETRIEVE_PFM_ACCOUNT.value.format(id=pfm_account_id)
return self._post(url=self._build_url(endpoint), json=request_body)
```
#### File: clients/v3/type_client.py
```python
from q2_api_client.clients.base_q2_client import BaseQ2Client
from q2_api_client.endpoints.v3_endpoints import TypeEndpoint
class TypeClient(BaseQ2Client):
def get_action_types(self):
"""GET /v3/actionType
:return: Response object
:rtype: requests.Response
"""
endpoint = TypeEndpoint.ACTION_TYPE.value
return self._get(url=self._build_url(endpoint))
def get_image_types(self):
"""GET /v3/imageType
:return: Response object
:rtype: requests.Response
"""
endpoint = TypeEndpoint.IMAGE_TYPE.value
return self._get(url=self._build_url(endpoint))
def get_product_types(self):
"""GET /v3/productType
:return: Response object
:rtype: requests.Response
"""
endpoint = TypeEndpoint.PRODUCT_TYPE.value
return self._get(url=self._build_url(endpoint))
```
#### File: jcook00/q2-api-client/setup.py
```python
from setuptools import setup, find_packages
LICENSE = 'LICENSE'
README = 'README.md'
INSTALL_REQUIREMENTS = 'requirements/install.txt'
TEST_REQUIREMENTS = 'requirements/test.txt'
DOCUMENTATION_REQUIREMENTS = 'requirements/documentation.txt'
def get_requirements(*files):
requirements = list()
for file in files:
dependencies = read_file(file, split_lines=True)
for dependency in dependencies:
requirements.append(dependency)
return requirements
def read_file(file, split_lines=False):
with open(file, 'r') as f:
return f.read().splitlines() if split_lines else f.read()
setup(
name='q2-api-client',
version='0.1.0',
description='Q2 Rest API Client',
long_description=read_file(README),
author='<NAME>',
author_email='<EMAIL>',
url='https://bitbucket.q2dc.local/projects/IMPS/repos/q2-api-client/browse',
license=read_file(LICENSE),
packages=find_packages(exclude=('tests', 'docs')),
python_requires=">=3.7",
install_requires=get_requirements(INSTALL_REQUIREMENTS),
tests_require=get_requirements(TEST_REQUIREMENTS)
)
```
|
{
"source": "jcook793/C256Mgr",
"score": 3
}
|
#### File: C256Mgr/C256Mgr/c256mgr.py
```python
import intelhex
import wdc
import foenix
import srec
import configparser
import re
import sys
import argparse
import os
from serial.tools import list_ports
FLASH_SIZE = 524288 # Required size of flash file: 512 KB
CHUNK_SIZE = 4096 # Size of block of binary data to transfer
label_file = ""
to_send = ""
port = ""
start_address = ""
count = ""
label = ""
def confirm(question):
return input(question).lower().strip()[:1] == "y"
def revision(port):
"""Get the version code for the debug port."""
c256 = foenix.FoenixDebugPort()
try:
c256.open(port)
c256.enter_debug()
try:
data = c256.get_revision()
return "%X" % data
finally:
c256.exit_debug()
finally:
c256.close()
def upload_binary(port, filename, address):
"""Upload a binary file into the C256 memory."""
with open(filename, "rb") as f:
c256 = foenix.FoenixDebugPort()
try:
c256.open(port)
c256.enter_debug()
try:
current_addr = int(address, 16)
block = f.read(CHUNK_SIZE)
while block:
c256.write_block(current_addr, block)
current_addr += len(block)
block = f.read(CHUNK_SIZE)
finally:
c256.exit_debug()
finally:
c256.close()
def program_flash(port, filename, hex_address):
"""Program the flash memory using the contents of the C256's RAM."""
base_address = int(hex_address, 16)
address = base_address
print("About to upload image to address 0x{:X}".format(address), flush=True)
if os.path.getsize(filename) == FLASH_SIZE:
if confirm("Are you sure you want to reprogram the flash memory? (y/n): "):
with open(filename, "rb") as f:
c256 = foenix.FoenixDebugPort()
try:
c256.open(port)
c256.enter_debug()
try:
block = f.read(CHUNK_SIZE)
while block:
c256.write_block(address, block)
address += len(block)
block = f.read(CHUNK_SIZE)
print("Binary file uploaded...", flush=True)
c256.erase_flash()
print("Flash memory erased...", flush=True)
c256.program_flash(base_address)
print("Flash memory programmed...")
finally:
c256.exit_debug()
finally:
c256.close()
else:
print("The provided flash file is not the right size.")
def dereference(port, file, label):
"""Get the address contained in the pointer with the label in the label file."""
c256 = foenix.FoenixDebugPort()
try:
address = lookup(file, label)
c256.open(port)
c256.enter_debug()
try:
data = c256.read_block(int(address, 16), 3)
deref = data[2] << 16 | data[1] << 8 | data[0]
return "%X" % deref
finally:
c256.exit_debug()
finally:
c256.close()
def lookup(file, label):
"""Return the hex address linked to the passed label in the label file."""
with open(file) as f:
for line in f:
match = re.match('^(\S+)\s*\=\s*\$(\S+)', line)
if match:
if match.group(1) == label:
return match.group(2)
sys.stderr.write("Could not find a definition for that label.\n")
sys.exit(2)
def display(base_address, data):
"""Write a block of data to the console in a nice, hexadecimal format."""
text_buff = ""
for i in range(0, len(data)):
if (i % 16) == 0:
if text_buff != "":
sys.stdout.write(" {}\n".format(text_buff))
text_buff = ""
sys.stdout.write("{:06X}: ".format(base_address + i))
elif (i % 8) == 0:
sys.stdout.write(" ")
sys.stdout.write("{:02X}".format(data[i]))
b = bytearray(1)
b[0] = data[i]
if (b[0] & 0x80 == 0):
c = b.decode('ascii')
if c.isprintable():
text_buff = text_buff + c
else:
text_buff = text_buff + "."
else:
text_buff = text_buff + "."
sys.stdout.write(' {}\n'.format(text_buff))
def send_wdc(port, filename):
"""Send the data in the hex file 'filename' to the C256 on the given serial port."""
infile = wdc.WdcBinFile()
c256 = foenix.FoenixDebugPort()
try:
c256.open(port)
infile.open(filename)
try:
infile.set_handler(lambda address, data: c256.write_block(address, data))
c256.enter_debug()
try:
# Process the lines in the hex file
infile.read_blocks()
finally:
c256.exit_debug()
finally:
infile.close()
finally:
c256.close()
def send_srec(port, filename):
"""Send the data in the SREC hex file 'filename' to the C256 on the given serial port."""
infile = srec.SRECFile()
c256 = foenix.FoenixDebugPort()
try:
c256.open(port)
infile.open(filename)
try:
infile.set_handler(lambda address, data: c256.write_block(address, bytes.fromhex(data)))
c256.enter_debug()
try:
# Process the lines in the hex file
infile.read_lines()
finally:
c256.exit_debug()
finally:
infile.close()
finally:
c256.close()
def send(port, filename):
"""Send the data in the hex file 'filename' to the C256 on the given serial port."""
infile = intelhex.HexFile()
c256 = foenix.FoenixDebugPort()
try:
c256.open(port)
infile.open(filename)
try:
infile.set_handler(lambda address, data: c256.write_block(address, bytes.fromhex(data)))
c256.enter_debug()
try:
# Process the lines in the hex file
infile.read_lines()
finally:
c256.exit_debug()
finally:
infile.close()
finally:
c256.close()
def get(port, address, length):
"""Read a block of data from the C256."""
c256 = foenix.FoenixDebugPort()
try:
c256.open(port)
c256.enter_debug()
try:
data = c256.read_block(int(address, 16), int(length, 16))
display(int(address, 16), data)
finally:
c256.exit_debug()
finally:
c256.close()
def list_serial_ports():
serial_ports = list_ports.comports()
if len(serial_ports) == 0:
print("No serial ports found")
for serial_port in serial_ports:
print(f"{serial_port.device}")
print(f" Description: {serial_port.description}")
print(f" Manufacturer: {serial_port.manufacturer}")
print(f" Product: {serial_port.product}")
print()
def tcp_bridge(tcp_host_port, serial_port):
""" Listen for TCP socket connections and relay messages to Foenix via serial port """
parsed_host_port = tcp_host_port.split(":")
tcp_host = parsed_host_port[0]
tcp_port = int(parsed_host_port[1]) if len(parsed_host_port) > 0 else 2560
tcp_listener = foenix.FoenixTcpBridge(tcp_host, tcp_port, serial_port)
tcp_listener.listen()
config = configparser.ConfigParser()
config.read('c256.ini')
parser = argparse.ArgumentParser(description='Manage the C256 Foenix through its debug port.')
parser.add_argument("--port", dest="port", default=config['DEFAULT'].get('port', 'COM3'),
help="Specify the serial port to use to access the C256 debug port.")
parser.add_argument("--list-ports", dest="list_ports", action="store_true",
help="List available serial ports.")
parser.add_argument("--label-file", dest="label_file", default=config['DEFAULT'].get('labels', 'basic8'),
help="Specify the label file to use for dereference and lookup")
parser.add_argument("--count", dest="count", default="10", help="the number of bytes to read")
parser.add_argument("--dump", metavar="ADDRESS", dest="dump_address",
help="Read memory from the C256's memory and display it.")
parser.add_argument("--deref", metavar="LABEL", dest="deref_name",
help="Lookup the address stored at LABEL and display the memory there.")
parser.add_argument("--lookup", metavar="LABEL", dest="lookup_name",
help="Display the memory starting at the address indicated by the label.")
parser.add_argument("--revision", action="store_true", dest="revision",
help="Display the revision code of the debug interface.")
parser.add_argument("--flash", metavar="BINARY FILE", dest="flash_file",
help="Attempt to reprogram the flash using the binary file provided.")
parser.add_argument("--binary", metavar="BINARY FILE", dest="binary_file",
help="Upload a binary file to the C256's RAM.")
parser.add_argument("--address", metavar="ADDRESS", dest="address",
default=config['DEFAULT'].get('flash_address', '380000'),
help="Provide the starting address of the memory block to use in flashing memory.")
parser.add_argument("--upload", metavar="HEX FILE", dest="hex_file",
help="Attempt to reprogram the flash using the binary file provided.")
parser.add_argument("--upload-wdc", metavar="BINARY FILE", dest="wdc_file",
help="Upload a WDCTools binary hex file. (WDCLN.EXE -HZ)")
parser.add_argument("--upload-srec", metavar="SREC FILE", dest="srec_file",
help="Upload a Motorola SREC hex file.")
parser.add_argument("--tcp-bridge", metavar="HOST:PORT", dest="tcp_host_port",
help="Setup a TCP-serial bridge, listening on HOST:PORT and relaying messages to the Foenix via " +
"the configured serial port")
options = parser.parse_args()
try:
if options.list_ports:
list_serial_ports()
elif options.port != "":
if options.hex_file:
send(options.port, options.hex_file)
elif options.wdc_file:
send_wdc(options.port, options.wdc_file)
elif options.srec_file:
send_srec(options.port, options.srec_file)
elif options.deref_name and options.label_file:
address = dereference(options.port, options.label_file, options.deref_name)
get(options.port, address, options.count)
elif options.lookup_name and options.label_file:
address = lookup(options.label_file, options.lookup_name)
get(options.port, address, options.count)
elif options.dump_address:
get(options.port, options.dump_address, options.count)
elif options.revision:
rev = revision(options.port)
print(rev)
elif options.address and options.binary_file:
upload_binary(options.port, options.binary_file, options.address)
elif options.address and options.flash_file:
program_flash(options.port, options.flash_file, options.address)
elif options.tcp_host_port:
tcp_bridge(options.tcp_host_port, options.port)
else:
parser.print_help()
else:
parser.print_help()
finally:
print
```
#### File: C256Mgr/C256Mgr/foenix.py
```python
from abc import ABC, abstractmethod
import serial
import socket
import constants
class FoenixDebugPort:
"""Provide the connection to a C256 Foenix debug port."""
connection = None
status0 = 0
status1 = 0
def open(self, port):
"""Open a connection to the C256 Foenix."""
if ':' in port:
# A pretty weak test, looking for something like '192.168.1.114:2560'
self.connection = SocketFoenixConnection()
else:
# Otherwise assume it's a serial connection
self.connection = SerialFoenixConnection()
self.connection.open(port=port)
def is_open(self):
return self.connection.is_open()
def close(self):
"""Close the connection to the C256 Foenix."""
self.connection.close()
def enter_debug(self):
"""Send the command to make the C256 Foenix enter its debug mode."""
self.transfer(constants.CMD_ENTER_DEBUG, 0, 0, 0)
def exit_debug(self):
"""Send the command to make the C256 Foenix leave its debug mode.
This will make the C256 reset.
"""
self.transfer(constants.CMD_EXIT_DEBUG, 0, 0, 0)
def erase_flash(self):
"""Send the command to have the C256 Foenix erase its flash memory."""
self.transfer(constants.CMD_ERASE_FLASH, 0, 0, 0)
def get_revision(self):
"""Gets the revision code for the debug interface.
RevB2's revision code is 0, RevC4A is 1."""
self.transfer(constants.CMD_REVISION, 0, 0, 0)
return self.status1
def program_flash(self, address):
"""Send the command to have the C256 Foenix reprogram its flash memory.
Data to be written should already be in the C256's RAM at address."""
self.transfer(constants.CMD_PROGRAM_FLASH, address, 0, 0)
def write_block(self, address, data):
"""Write a block of data to the specified starting address in the C256's memory."""
self.transfer(constants.CMD_WRITE_MEM, address, data, 0)
def read_block(self, address, length):
"""Read a block of data of the specified length from the specified starting address of the C256's memory."""
return self.transfer(constants.CMD_READ_MEM, address, 0, length)
def readbyte(self):
b = self.connection.read(1)
return b[0]
def transfer(self, command, address, data, read_length):
"""Send a command to the C256 Foenix"""
self.status0 = 0
self.status1 = 0
lrc = 0
length = 0
if data == 0:
length = read_length
else:
length = len(data)
# if command == 0x80:
# print('Switching to debug mode')
# elif command == 0x81:
# print('Resetting')
# else:
# print('Writing data of length {:X} to {:X}'.format(length, address))
command_bytes = command.to_bytes(1, byteorder='big')
address_bytes = address.to_bytes(3, byteorder='big')
length_bytes = length.to_bytes(2, byteorder='big')
header = bytearray(7)
header[0] = constants.REQUEST_SYNC_BYTE
header[1] = command_bytes[0]
header[2] = address_bytes[0]
header[3] = address_bytes[1]
header[4] = address_bytes[2]
header[5] = length_bytes[0]
header[6] = length_bytes[1]
for i in range(0, 6):
lrc = lrc ^ header[i]
if data:
for i in range(0, length):
lrc = lrc ^ data[i]
lrc_bytes = lrc.to_bytes(1, byteorder='big')
if data:
packet = header + data + lrc_bytes
written = self.connection.write(packet)
if written != len(packet):
raise Exception("Could not write packet correctly.")
else:
packet = header + lrc_bytes
written = self.connection.write(packet)
if written != len(packet):
raise Exception("Could not write packet correctly.")
# print('Sent [{}]'.format(packet.hex()))
c = 0
while c != constants.RESPONSE_SYNC_BYTE:
c = self.readbyte()
# print('Got 0xAA')
read_bytes = 0
if c == constants.RESPONSE_SYNC_BYTE:
self.status0 = self.readbyte()
self.status1 = self.readbyte()
if read_length > 0:
read_bytes = self.connection.read(read_length)
read_lrc = self.readbyte()
# print("Status: {:X}, {:X}".format(self.status0, self.status1))
return read_bytes
class FoenixConnection(ABC):
@abstractmethod
def open(self, port):
pass
@abstractmethod
def close(self):
pass
@abstractmethod
def is_open(self):
pass
@abstractmethod
def read(self, num_bytes):
pass
@abstractmethod
def write(self, data):
pass
class SerialFoenixConnection(FoenixConnection):
""" Connects to Foenix via local serial port """
serial_port = None
def open(self, port):
self.serial_port = serial.Serial(port=port,
baudrate=6000000,
bytesize=serial.EIGHTBITS,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
timeout=60,
write_timeout=60)
try:
self.serial_port.open()
except:
self.serial_port.close()
self.serial_port.open()
def close(self):
self.serial_port.close()
def is_open(self):
return self.serial_port.is_open()
def read(self, num_bytes):
return self.serial_port.read(num_bytes)
def write(self, data):
return self.serial_port.write(data)
class SocketFoenixConnection(FoenixConnection):
""" Connects to Foenix via TCP-serial bridge """
tcp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # AF_INET = IPv4, SOCK_STREAM = TCP socket
_is_open = False
def open(self, port):
parsed_host_port = port.split(":")
tcp_host = parsed_host_port[0]
tcp_port = int(parsed_host_port[1]) if len(parsed_host_port) > 0 else 2560
print("Connecting to remote Foenix at {}:{}...".format(tcp_host, tcp_port), end="")
self.tcp_socket.connect(tuple([tcp_host, tcp_port]))
print(" ✓")
self._is_open = True
def close(self):
self.tcp_socket.close()
self._is_open = False
def is_open(self):
return self._is_open
def read(self, num_bytes):
bytes_read = self.tcp_socket.recv(num_bytes)
return bytes_read
def write(self, data):
self.tcp_socket.sendall(data)
return len(data)
class FoenixTcpBridge():
""" Listens on a TCP socket and relays communications to the Foenix serial debug port """
def __init__(self, tcp_host, tcp_port, serial_port):
self.tcp_host = tcp_host
self.tcp_port = tcp_port
self.serial_port = serial_port
def recv_bytes(self, num_bytes):
total_bytes_received = self.tcp_connection.recv(num_bytes)
if not total_bytes_received: # Client hung up
return total_bytes_received
total_bytes_received = bytearray(total_bytes_received)
while len(total_bytes_received) < num_bytes:
bytes_received = self.tcp_connection.recv(num_bytes - len(total_bytes_received))
total_bytes_received.extend(bytes_received)
return bytes(total_bytes_received)
def listen(self):
""" Listen for TCP socket connections and relay messages to Foenix via serial port """
while True:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: # AF_INET=IPv4, SOCK_STREAM=TCP
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((self.tcp_host, self.tcp_port))
s.listen()
print("Listening for connections to {} on port {}".format(self.tcp_host, self.tcp_port))
self.tcp_connection, tcp_address = s.accept()
print("Received connection from {}".format(tcp_address[0]))
with self.tcp_connection:
while True:
# First get the 7-byte request header
header = self.recv_bytes(7)
if not header:
print("Connection from {} closed".format(tcp_address[0]))
break
command = header[1]
# The data size comes from bytes 5 and 6 in the header
data_length = int.from_bytes(header[5:7], byteorder="big")
# The data payload only comes along with a write command
data = None
if command == constants.CMD_WRITE_MEM:
data = self.recv_bytes(data_length)
# The LRC byte is required and denotes the end of the request
request_lrc_byte = self.recv_bytes(1)
# TCP request is now complete, time to pass along to Foenix
bytes_to_write = bytearray(header)
if data is not None:
bytes_to_write.extend(data)
bytes_to_write.extend(request_lrc_byte)
with serial.Serial(port=self.serial_port, baudrate=6000000, timeout=60,
write_timeout=60) as serial_connection:
num_bytes_written = serial_connection.write(bytes_to_write)
# Probably should handle this situation a bit more elegantly
if num_bytes_written != len(bytes_to_write):
raise Exception("Serial port error - tried writing {} bytes, was only able to write {}"
.format(len(bytes_to_write), num_bytes_written))
# Read until we get the start of the response
response_sync_byte = serial_connection.read(1)
# Next two bytes are the status bytes
response_status_bytes = serial_connection.read(2)
# Read the data payload if requested
response_data = None
if command == constants.CMD_READ_MEM and data_length > 0:
response_data = serial_connection.read(data_length)
response_lrc_byte = serial_connection.read(1)
# Construct the response
response = bytearray(response_sync_byte)
response.extend(response_status_bytes)
if response_data is not None:
response.extend(response_data)
response.extend(response_lrc_byte)
# Return the response back to the TCP client
self.tcp_connection.sendall(response)
```
|
{
"source": "jcook/utilities",
"score": 2
}
|
#### File: utilities/cstool/cstool.py
```python
import sys, getopt
import requests
__VERSION__ = '0.0.1'
welcome = """
Checking Status from [http://192.168.3.11:8002/] (Shanghai) v%s
License: https://github.com/jcook/utilities/blob/master/LICENSE
""" % __VERSION__
def req_post(id=201625200000, step_forward=10):
print 'Checking Status from [%d] to [%d]' % (id-step_forward, id)
url = 'http://202.109.79.211:8002/TransFlowQueryResult.jsp'
headers = {'Content-Type': 'application/x-www-form-urlencoded',
'Host': '192.168.3.11:8002',
'Connection': 'keep-alive',
'Content-Length': '58',
'Cache-Control': 'max-age=0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Origin': 'http://192.168.3.11:8002',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36',
'Referer': 'http://192.168.3.11:8002/',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.8,en;q=0.6',
}
for i in range(id-step_forward, id+1):
s = 'query=1&transactionid=%s' % str(i)
s += '&button1=+%C8%B7+%B6%A8+'
try:
r = requests.post(url, data=s, headers=headers)
ret = r.text
idx1 = ret.find('<P> </P>') + len('<P> </P>')
idx2 = ret.find('</BODY>')
print ret[idx1:idx2-4]
except:
print 'Unexcepted error orrur. Checking network first.'
sys.exit(1)
def Usage():
print 'Usage:'
print ' -h, --help: print help message'
print ' -v, --version: Show version info'
print ' -i, --id: Indicate id for checking'
print ' -f, --forward: Indicate forward number for checking'
print ''
print 'Example:'
print ' tool.exe -i 201625200020 -f 5'
print 'Will check the result from 201625200015 to 201625200020.'
def Version():
print welcome
def main(argv):
id = 0
forward = 10
try:
opts, args = getopt.getopt(argv[1:], 'hvi:f:', ['id=', 'forward='])
except getopt.GetoptError, err:
print str(err)
Usage()
sys.exit(2)
for o, a in opts:
if o in ('-h', '--help'):
Usage()
sys.exit(1)
elif o in ('-v', '--version'):
Version()
sys.exit(1)
elif o in ('-i', '--id'):
id = int(a)
elif o in ('-f', '--forward'):
forward=int(a)
else:
print 'Invalid option(s)!'
Usage()
sys.exit(3)
if id == 0:
Usage()
sys.exit(1)
req_post(id, forward)
if __name__ == '__main__':
main(sys.argv)
```
|
{
"source": "jcooky/flask-clova",
"score": 2
}
|
#### File: flask-clova/flask_clova/directives.py
```python
class directive:
@staticmethod
def open_mike():
return {
"header": {
"name": "KeepConversation",
"namespace": "Clova"
},
"payload": {
"explicit": True
}
}
```
|
{
"source": "jcoombes/computing-year-2",
"score": 4
}
|
#### File: jcoombes/computing-year-2/pion.py
```python
from __future__ import division
import numpy as np
import fourvectors as fv
import properties as props
class Particle(object):
"""
A particle base class, provides relativistic movement.
Inputs:
e - energy. float. measured in MeV.
born - initial location. expect len(born)==3.
k - direction of travel.
m - mass. float. measured in MeV
tau - lifetime. measured in s.
"""
def __init__(self, e, born, k, m, tau):
"""
Make sure e is greater than rest mass of particle.
"""
if e < m:
raise ValueError('particle energy is less than the rest mass energy.')
self.e = e #MeV Lab frame. #2:pf
self.m = m #MeV
self.tau = tau #Particle frame. #2:pf
self.k = np.array(k, dtype='float64') #Lab frame #2: pf
self.born = np.array(born, dtype = 'float64') #Lf #2: lf
self.life = tau #pf
self.g = self.e/self.m #lf
self.b = np.sqrt(1-1/(self.g*self.g)) #lf
self.p = self.e*self.b # lf
self.knorm = self.k/np.sqrt(self.k.dot(self.k)) #normalised direction lf
self.pvec = self.p * self.knorm #momentum vector. # lf
self.emom = fv.FourVector(self.e, self.pvec) #energy momentum 4-vector.
#Muon position 4-vector currently uses lifetime in own frame, position in lab frame
#Muon energy 4-vector currently uses energy in pion frame. Momentum in momentum/lab frame.
self.walls = None
def decay_direction(self):
"""
Randomly generates a uniform vector on the unit sphere.
"""
u = np.random.sample()
v = np.random.sample()
phi = 2*np.pi*u
theta = np.arccos(2*v-1) #mathworld.wolfram.com/SpherePointPicking.html
x = np.sin(theta) * np.cos(phi)
y = np.sin(theta) * np.sin(phi)
z = np.cos(theta)
return np.array([x,y,z])
def hits_walls(self):
"""
Method to determine location particle ultimately intersects detector walls.
Use after particle.move()
"""
t1 = (0-self.born[2])/self.lab_u[2] # z = 0
t2 = (props.chamber_z-self.born[2])/self.lab_u[2] # z = 100
quant = (self.born[0]*self.lab_u[0] + self.born[1]*self.lab_u[1])
t3 = (-quant+np.sqrt(quant*quant \
- (self.lab_u.dot(self.lab_u)*\
(self.born[:2].dot(self.born[:2])-(props.chamber_r*props.chamber_r)))))\
/self.lab_u.dot(self.lab_u)
first_intersection_time = min([t for t in [t1, t2, t3] if t>0])
hits = self.born + first_intersection_time*self.lab_u
if first_intersection_time <= self.lab_life: #hits wall before it decays
self.walls = hits
return hits
else:
return None
def inside(self):
"""
Return true if a particle is inside chamber.
Use after particle.move().
"""
if hasattr(self, 'walls') and self.walls is not None:
x = self.walls
elif hasattr(self, 'died') and self.died is not None:
x = self.died
else:
x = self.born
if x[0]*x[0] + x[1] * x[1] <= props.chamber_r*props.chamber_r:
return True
elif 100 >= x[2] >= 0:
return True
else:
return False
class Pion(Particle):
"""
It's a pion. All our particles start off as these.
"""
pion_lives = np.random.exponential(props.pion_lifetime,2E7)
seed = 0 # so we can get back the same exp-distributed rand numbers.
#Code is faster if we generate lots at once then look it up.
instances = []
def __init__(self, e, born = [0,0,0], k = [0,0,1], m = props.pion_mass , tau = props.pion_lifetime, branch = 0.5):
super(Pion, self).__init__(e, born, k, m, tau)
self.branch = branch #Branching Ratio. REALLY IMPORTANT.
self.life = self.pion_lives[self.seed]
Pion.seed += 1
Pion.instances.append(self)
def move(self):
"""
Special faster pion method to move faster than the general lorenz boost.
propagates particle for length of life in lab frame.
Output distances in light-seconds as c=1.
See ls2m function in properties file for output in metres.
"""
self.died = self.knorm * self.life * self.b * self.g
return self.died
def decay(self):
"""
Deletes particle.
Makes new particle.
Note if self.died hasn't been set by move() the particle decays in place.
"""
if np.random.sample() > self.branch:
energy = (props.pion_mass*props.pion_mass
+props.muon_mass*props.muon_mass)/(2*props.pion_mass)
#Assumes zero neutrino mass.
return Muon(energy, self.died, self.decay_direction())
else:
energy = (props.pion_mass*props.pion_mass
+props.electron_mass*props.electron_mass)/(2*props.pion_mass)
#Assumes zero neutrino mass.
return Electron(energy, self.died, self.decay_direction(),parent = 'Pion')
def hits_walls(self):
"""
Specific pion method to detect if pion hits the back wall of the chamber.
Returns None if False, Returns co-ordinates if true.
"""
if self.died[2] >= props.chamber_z:
self.walls = np.array([0,0,props.chamber_z], dtype = 'float64')
return self.walls
"""
def detect(self):
raise NotImplementedError
"""
def __str__(self):
'''Returns a string representation of particle.
This output should be used to debug.
Do not use the output of this to create new pions.'''
#Note that I need to use energy, self.born, self.hits, detected?
if hasattr(self, 'walls'):
if hasattr(self, 'detected'):
ans = 'Pion({},died = {}, walls = {}, detected = {})'\
.format(self.e, self.died, self.walls, self.detected)
else:
ans = 'Pion({},died = {}, walls = {}, detected = {})'\
.format(self.e, self.died, self.walls, 'None')
elif hasattr(self, 'detected'):
ans = 'Pion({},died = {}, walls = {}, detected = {})'\
.format(self.e, self.died, 'None ',self.detected)
else:
ans = 'Pion({},died = {}, walls = {}, detected = {})'\
.format(self.e, self.died, 'None ','None')
return ans
class Muon(Particle):
"""
It's a muon. Pions decay into this.
Inputs:
e - total energy.
born - initial position. 3-position.
k - initial direction in created frame.
"""
muon_lives = np.random.exponential(props.muon_lifetime, 2e7)
seed = 0 # so we can get back the same exp-distributed rand numbers.
#Code is faster if we generate lots at once then look it up.
instances = []
def __init__(self, e, born, k, m = props.muon_mass, tau = props.muon_lifetime):
super(Muon, self).__init__(e, born, k, m, tau)
self.life = self.muon_lives[self.seed]
Muon.seed += 1
Muon.instances.append(self)
def decay(self):
"""
Unlike pion decays, muons can only decay into electrons.
"""
return Electron(props.michel(), self.died, self.decay_direction(), parent = 'Muon')
def move(self):
"""
propagates particle for length of life in lab frame.
This method will only work for derived classes (with defined self.life)
Output distances in light-seconds as c=1.
Use pion.props.ls2m() for an output in metres.
"""
self.lab_emom = self.emom.super_boost(-self.b, self.knorm)
self.lab_pvec = self.lab_emom.r
self.lab_e = self.lab_emom.ct
self.lab_u = self.lab_pvec/self.lab_e
self.lab_life = Pion.instances[-1].g * self.life
self.died = self.born + self.lab_u * self.lab_life
return self.died
def __str__(self):
'''
Returns a string representation of the particle.
This output should be used to debug.
Do not use this output to make new muons.
'''
#Note that I need to use energy, self.born, self.hits, detected?
if hasattr(self, 'walls'):
if hasattr(self, 'detected'):
ans = 'Muon({},died = {}, walls = {}, detected = {})'\
.format(self.e, self.died, self.walls, self.detected)
else:
ans = 'Muon({},died = {}, walls = {}, detected = {})'\
.format(self.e, self.died, self.walls, 'None')
elif hasattr(self, 'detected'):
ans = 'Muon({},died = {}, walls = {}, detected = {})'\
.format(self.e, self.died, 'None ',self.detected)
else:
ans = 'Muon({},died = {}, walls = {}, detected = {})'\
.format(self.e, self.died, 'None ','None')
return ans
class Electron(Particle):
"""
It's an electron. Electrons have an extra parameter showing their parent.
"""
instances = []
def __init__(self, e, born, k, m = props.electron_mass, parent = 'Pion'):
tau = 1
super(Electron, self).__init__(e, born, k, m, tau)
self.parent = parent
#Easier to have both pion and muon inherit from particle,
#and delete tau in electron, than implement for both pion and muon.
Electron.instances.append(self)
def move(self):
"""
propagates particle for length of life in lab frame.
This method will only work for derived classes (with defined self.life)
Output distances in light-seconds as c=1.
Use pion.props.ls2m() for an output in metres.
"""
self.lab_emom = self.emom.super_boost(-self.b, self.knorm)
self.lab_pvec = self.lab_emom.r
self.lab_e = self.lab_emom.ct
self.lab_u = self.lab_pvec/self.lab_e
if self.parent == 'Pion':
self.lab_life = Pion.instances[-1].g * self.life
elif self.parent == 'Muon':
self.lab_life = Muon.instances[-1].g * self.life
else:
raise ValueError, 'electron has no parents.'
self.died = self.born + self.lab_u * self.lab_life
return self.died
def __str__(self):
'''
Returns a string representation of the particle.
This output should be used to debug.
Do not use this output to make new electrons.
'''
#Note that I need to use energy, self.born, self.hits, detected?
if hasattr(self, 'walls'):
if hasattr(self, 'detected'):
ans = 'Electron({},died = {}, walls = {}, detected = {})'\
.format(self.e, self.died, self.walls, self.detected)
else:
ans = 'Electron({},died = {}, walls = {}, detected = {})'\
.format(self.e, self.died, self.walls, 'None')
elif hasattr(self, 'detected'):
ans = 'Electron({},died = {}, walls = {}, detected = {})'\
.format(self.e, self.died, 'None ',self.detected)
else:
ans = 'Electron({},died = {}, walls = {}, detected = {})'\
.format(self.e, self.died, 'None ','None')
return ans
```
#### File: jcoombes/computing-year-2/properties.py
```python
import numpy as np
pion_mass = 139.6 #MeV
pion_lifetime = 2.6e-8 #s
muon_mass = 105.7 #MeV
muon_lifetime = 2.2e-6 #s
electron_mass = 0.5 #MeV
#electron lifetime is so long we can assume it lives forever.
#also neutrino mass is 0.320eV. We can assume they're massless.
chamber_z = 3.335640952e-7 # 100m in light seconds
chamber_r = 8.339102380e-9 #2.5m in light seconds
# 1eV^-1 of length = 1.97327e-7 m
# 1eV of mass = 1.782662e-36 kilograms
# 1eV-1 of time = 6.582119e-16 seconds
def michel():
x,y = np.random.sample(2)
if x > y:
return x * 53
else:
return y * 53
def ls2m(lightseconds):
"""
Convert light seconds to natural units.
Only use this to make output more human friendly.
Not built to do any calculations in metres.
Inputs:
lightseconds - float.
"""
if type(lightseconds) is list:
raise TypeError,'Convert this list to an array or float.'
return lightseconds*299792458
```
|
{
"source": "jcoombes/tictactoe",
"score": 4
}
|
#### File: jcoombes/tictactoe/tictactoe.py
```python
import numpy as np
import random
import copy
import time
def show_board(board: np.ndarray) -> str:
zeroth_line = "\n"
first_line = " {} | {} | {} ".format(board[0, 0], board[0, 1], board[0, 2])
line_of_dashes = "--- --- ---"
second_line = " {} | {} | {} ".format(board[1, 0], board[1, 1], board[1, 2])
third_line = " {} | {} | {} ".format(board[2, 0], board[2, 1], board[2, 2])
return "\n".join((zeroth_line, first_line, line_of_dashes, second_line, line_of_dashes, third_line, zeroth_line))
def turn_flipper(x_or_o: str) -> str:
if x_or_o == 'x':
return 'o'
elif x_or_o == 'o':
return 'x'
else:
raise ValueError(x_or_o + 'is neither "x" nor "o"')
def human_turn(board: np.ndarray, x_or_o: str) -> np.ndarray:
"""
Careful, this also mutates data.
:param board: numpy array containing game state.
:param x_or_o: the string 'x' or the string 'o', whose side are they on.
:return: board after move.
"""
input2rowcol = {'e': (0, 0), 'r': (0, 1), 't': (0, 2),
'd': (1, 0), 'f': (1, 1), 'g': (1, 2),
'c': (2, 0), 'v': (2, 1), 'b': (2, 2)}
user_input = ""
while user_input not in input2rowcol.keys():
user_input = input("Use your keyboard to select your location, player " + x_or_o + ": ")
board[input2rowcol[user_input]] = x_or_o
return board
def score_one_gamestate(board: np.array, x_or_o: str, available: list, ai_player_choice: str, score_dict=None,
weight=1) -> int:
if score_dict is None:
score_dict = {'wins': 0, 'losses': 0, 'draws': 0}
is_finished = game_over(board)
if is_finished == ai_player_choice: # This might be wrong, we want ai_player_choice
score_dict['wins'] += 1 / weight
return score_dict
elif is_finished == turn_flipper(ai_player_choice):
score_dict['losses'] += 1 / weight
return score_dict
elif is_finished == 'draw':
score_dict['draws'] += 1 / weight
return score_dict
else:
weight *= len(available)
for candidate_move in available:
deeper = copy.deepcopy(board)
deeper[candidate_move] = x_or_o
available_moves = find_available_moves(deeper)
score_dict = score_one_gamestate(deeper,
turn_flipper(x_or_o),
available_moves,
ai_player_choice,
score_dict,
weight)
return score_dict
def optimal_ai_move(board: np.array, x_or_o: str, available: list):
"""
The heart of the robot brain.
Given a board state, will return the optimal move.
:param board: numpy array of the board.
:param x_or_o: which side is the AI playing on? #This isn't exactly true in recursive calls.
:param available: available[(row, col)] = True, if the space hasn't been taken yet.
:return: best_move: (row, col). The move with the greatest number of simulated wins.
"""
possibility_dict = {}
for candidate_move in available:
deeper = copy.deepcopy(board)
deeper[candidate_move] = x_or_o
available = find_available_moves(deeper)
possibility_dict[candidate_move] = score_one_gamestate(deeper,
turn_flipper(x_or_o),
available,
x_or_o,
score_dict=None)
metric_dict = {candidate_move: (score_dict['wins'] + score_dict['draws']) / sum(score_dict.values())
for candidate_move, score_dict in possibility_dict.items()}
best_move = max(metric_dict, key=metric_dict.get)
return best_move, possibility_dict
def find_available_moves(board: np.array) -> list:
not_x_mask: np.array = board != 'x'
not_o_mask: np.array = board != 'o'
neither_mask: np.array = np.logical_and(not_x_mask, not_o_mask)
available = [(i, j) for i in range(3) for j in range(3) if neither_mask[(i, j)]]
return available
def ai_turn(board: np.array, x_or_o: str) -> np.array:
# First policy is random policy, it randomly decides to pick from the available spaces.
print("bleep bloop, now it is my turn")
policy = 'optimal'
available = find_available_moves(board)
if policy == 'random':
next_move = random.choice(available)
if policy == 'optimal':
if len(available) == 9:
next_move = (1, 1) # Really hacky memoisation, solving the first move takes 120 seconds otherwise.
elif 6 < len(available) < 9:
time.sleep(0.1)
next_move = optimal_ai_move(board, x_or_o, available)[0]
else:
time.sleep(0.3)
next_move = optimal_ai_move(board, x_or_o, available)[0]
board[next_move] = x_or_o
return board
def game_over(board: np.array) -> str:
"""
Reads board to determine whether x, o, or a draw has occurred.
:param board:
:return: scenario "" = False, game not finished, 'x'=x_won, 'o'=o_won, 'draw'=draw
"""
mask = {
'x': board == 'x', # Boolean mask of all the squares x resides in.
'o': board == 'o'
}
for x_or_o, v in mask.items():
has_winning_row = np.any(np.all(v, axis=1))
has_winning_col = np.any(np.all(v, axis=0))
has_winning_diagonal = np.all(v.diagonal())
has_winning_off_diagonal = np.all(np.fliplr(v).diagonal())
if has_winning_row or has_winning_col or has_winning_diagonal or has_winning_off_diagonal:
return x_or_o
if np.all(mask['x'] + mask['o']): # all the squares are filled.
return 'draw'
else:
return ""
def game_loop() -> None:
"""
:return: None.
"""
num_players = ''
human_player_choice = ''
while num_players not in ('0', '1', '2'):
num_players = input("Number of players: ")
board = np.array(
[["e", "r", "t"], ["d", "f", "g"], ["c", "v", "b"]]
)
whose_turn = 'x'
if num_players == '0':
print("Ah, you would like to see robots compete for your entertainment. Sit back for two minutes and enjoy.")
while not (game_over(board)):
print(show_board(ai_turn(board, whose_turn)))
whose_turn = turn_flipper(whose_turn)
if num_players == '1':
while human_player_choice not in ('x', 'o'):
human_player_choice = input("Would you like to play as x or o: ")
ai_player_choice = turn_flipper(human_player_choice)
print("Welcome player " + human_player_choice)
print(show_board(board))
while not (game_over(board)):
if whose_turn == human_player_choice:
print(show_board(human_turn(board, human_player_choice)))
whose_turn = turn_flipper(whose_turn)
elif whose_turn != human_player_choice:
print(show_board(ai_turn(board, ai_player_choice)))
whose_turn = turn_flipper(whose_turn)
if num_players == '2':
print(show_board(board))
while not (game_over(board)):
print(show_board(human_turn(board, whose_turn)))
whose_turn = turn_flipper(whose_turn)
not_x_mask: np.array = board != 'x'
not_o_mask: np.array = board != 'o'
neither_mask: np.array = np.logical_and(not_x_mask, not_o_mask)
print(show_board(np.where(neither_mask, ' ', board)))
print(game_over(board))
if __name__ == "__main__":
game_loop()
```
|
{
"source": "jcooper036/tri_hybid_mapping",
"score": 3
}
|
#### File: tri_hybid_mapping/analysis/read_sort.py
```python
import sys
import pickle
import time
import re
def cigarParse(genPos, seq, cigarString):
"""returns a new sequence and/or modified start position based on the cigar string"""
cigarString = re.split('(\d+)',cigarString)
oper = []
cigarString = cigarString[1:]
for idx, item in enumerate(cigarString):
if item.isdigit():
ent = (int(item),cigarString[idx+1])
oper.append(ent)
if (len(oper) == 0):
return genPos, seq, False
readPos = 0
for idx, ent in enumerate(oper):
## soft clip
if idx == 0:
# if ent[1] == 'S':
# seq = seq[ent[0]:] # clip out the sequence that soft clip actually clipped
if ent[1] == 'S' or ent[1] == 'I' or ent[1] == 'D':
return genPos, seq, False
else:
if ent[1] == 'S':
seq = seq[:-ent[0]] # clip out the sequence that soft clip actually clipped
# if ent[1] == 'S':
# return genPos, seq, False
if ent[1] == 'I':
seq = seq[:readPos] + seq[(readPos + ent[0]):]
if ent[1] == 'P':
seq = seq[:readPos] + seq[(readPos + ent[0]):]
if ent[1] == 'D':
add = '-' * ent[0]
seq = seq[:readPos] + add + seq[readPos:]
## add on the number of BP that have been covered
readPos += ent[0]
return genPos, seq, True
## variables
scaffolds = [
'X',
'Y',
'2L',
'2R',
'3L',
'3R',
'4'
]
bamFile = sys.argv[1]
vcfTable = sys.argv[2]
outFile = sys.argv[3]
timeStart = time.clock()
## make a dictionary of all postions by scaffold
countTable = {}
for scaf in scaffolds:
countTable[scaf] = {}
## load in the SNP table
snpDict = {}
for scaf in scaffolds:
snpDict[scaf] = {}
with open (vcfTable) as vcf:
for line in vcf:
line = line.strip()
if 'CHRM' not in line:
line = line.split('\t')
if line[0] not in snpDict.keys():
snpDict[line[0]] = {}
snpDict[line[0]][int(line[1])] = [line[2], line[3], line[4]]
## make the header for the out file
printOut = open(outFile, 'w')
printOut.write('CHRM\tPOS1\tPOS2\tTYPE1\tTYPE2\tRead1\tRead2\tSAMPLE\n')
print('CHRM\tPOS1\tPOS2\tTYPE1\tTYPE2\tRead1\tRead2\tSAMPLE')
## load in the bam file, and process each read
with open (bamFile, 'r') as bam:
readDict = {}
saveDict = {}
for line in bam:
if ('@SQ' not in line) and ('@PG' not in line) and ('@HD' not in line) and ('@RG' not in line):
# line.split('\t')[2] is the CHR, [3] is the starting position, [9] is the sequence
read = line.split('\t')[0]
chrom = line.split('\t')[2]
posit = line.split('\t')[3]
seq = line.split('\t')[9]
qualCheck = False
if int(line.split('\t')[4]) > 36: qualCheck = True
sample = line.split('RG:Z:')[1].split('\t')[0]
mateCheck = False
if line.split('\t')[6] == '=':
mateCheck = True
matePos = line.split('\t')[7]
cigarString = line.split('\t')[5]
mel = 0
sim = 0
sec = 0
ukn = True
readType = 'ukn'
if sample not in readDict:
readDict[sample] = {}
if sample not in saveDict:
saveDict[sample] = {}
## modify the sequence and start position based of the cigar string
posit, seq, cigFlag = cigarParse(posit, seq, cigarString)
## just don't do anything if these fail
if chrom not in scaffolds or not qualCheck or not mateCheck or not cigFlag:
continue
## sort through the postions in a read and find out if it matches anything
for pos in range(0, len(seq)):
genPOS = pos + int(posit)
if genPOS in snpDict[chrom]:
# print(snpDict[chrom][genPOS], seq[pos-1:pos+2], seq[pos], sample)
ukn = False
if snpDict[chrom][genPOS][0] == seq[pos]:
mel += 1
if snpDict[chrom][genPOS][1] == seq[pos]:
sim += 1
if snpDict[chrom][genPOS][2] == seq[pos]:
sec += 1
## now classify the read
# if (mel > 0) and (sim == 0) and (sec == 0):
# readType = 'mel'
if (mel == 0) and (sim > 0) and (sec == 0):
readType = 'sim'
elif (mel == 0) and (sim == 0) and (sec > 0):
readType = 'sec'
else:
ukn = True
# if (mel < sim) and (mel < sec):
# if sim > sec:
# readType = 'sim'
# elif sec > sim:
# readType = 'sec'
# else:
# ukn = True
# else:
# ukn = True
# add it to the dictionary of the read file
if not ukn and mateCheck:
if (read in readDict[sample]) and (matePos == readDict[sample][read]['pos1']):
if readDict[sample][read]['type1'] != readType:
saveDict[sample][read] = readDict[sample][read]
saveDict[sample][read]['pos2'] = posit
saveDict[sample][read]['type2'] = readType
saveDict[sample][read]['read2'] = read
printOut.write(str(saveDict[sample][read]['CHROM']) + '\t' +
str(saveDict[sample][read]['pos1']) + '\t' +
str(saveDict[sample][read]['pos2']) + '\t' +
str(saveDict[sample][read]['type1']) + '\t' +
str(saveDict[sample][read]['type2']) + '\t' +
str(saveDict[sample][read]['read1']) + '\t' +
str(saveDict[sample][read]['read2']) + '\t' +
str(sample) + '\n')
print(str(saveDict[sample][read]['CHROM']) + '\t' +
str(saveDict[sample][read]['pos1']) + '\t' +
str(saveDict[sample][read]['pos2']) + '\t' +
str(saveDict[sample][read]['type1']) + '\t' +
str(saveDict[sample][read]['type2']) + '\t' +
str(saveDict[sample][read]['read1']) + '\t' +
str(saveDict[sample][read]['read2']) + '\t' +
str(sample))
else:
readDict[sample][read] = {'pos1' : posit,
'pos2' : '',
'type1' : readType,
'type2' : 'ukn',
'CHROM' : chrom,
'read1' : read}
## close the file handle
printOut.close()
# measure time
print(str(time.clock()-timeStart) + ' seconds to process bam file')
```
#### File: tri_hybid_mapping/analysis/window_controls.py
```python
import time
import sys
controlFile = sys.argv[1]
experFile = sys.argv[2]
outName = sys.argv[3]
def readTsv(file):
"""Reads a TSV file"""
file = open(file, 'r')
outdict = {}
## skip the first line
i = 0
for line in file.readlines():
if i > 0:
line = line.strip()
line = line.split('\t')
if line[0] not in outdict:
outdict[line[0]] = {}
outdict[line[0]][line[1]] = [line[2], line[3], line[4]]
i += 1
file.close()
return outdict
def normalizeTsv(controlFile, experFile):
"""Gets dictionaries, finds difference between values, returns dictionary"""
controlDict = readTsv(controlFile)
experDict = readTsv(experFile)
diffDict = {}
for scaff in controlDict:
if scaff in experDict:
if scaff not in diffDict:
diffDict[scaff] = {}
for pos in controlDict[scaff]:
if pos in experDict[scaff]:
diffDict[scaff][pos] = [float(experDict[scaff][pos][idx]) - float(controlDict[scaff][pos][idx]) for idx in range(0, len(experDict[scaff][pos]))]
return diffDict
def outWrite(outFile, diffDict):
"""Writes the results to an output file"""
with open(outFile, 'w') as f:
f.write('CHROM\tPOS\tmel\tsim\tsec\n')
for scaff in diffDict:
for pos in diffDict[scaff]:
f.write(str(scaff) + '\t' +
str(pos) + '\t' +
str(diffDict[scaff][pos][0]) + '\t' +
str(diffDict[scaff][pos][1]) + '\t' +
str(diffDict[scaff][pos][2]) + '\n')
diffDict = normalizeTsv(controlFile, experFile)
outWrite(outName, diffDict)
```
#### File: tri_hybid_mapping/simulation/02a_simulated_analysis.py
```python
import random
import copy
import numpy as np
from scipy.signal import find_peaks_cwt
import pickle
LENGTH = 50000000
N_TRIALS = 350
selection_strength = 1
replicates = 10
window = 1000000
step = 20000
out_pre = '/Volumes/Jacob_2TB_storage/sim_sec_recombination_mapping/simulation/'
def load_tsv(idx, out_pre):
table = {}
# load files
male_reps = {}
female_reps = {}
for idx2 in range(3):
male_reps[idx2] = []
filename = out_pre + 'data/simulated_data/' + str(idx) + '_' + str(idx2) + '_male.tsv'
with open(filename, 'r') as f:
for line in f:
line = line.rstrip()
if '@@@' in line:
sel_spot = int(line.split(':')[1])
elif 'CHROM' not in line:
line = line.split('\t')
male_reps[idx2].append(line)
female_reps[idx2] = []
filename = out_pre + 'data/simulated_data/' + str(idx) + '_' + str(idx2) + '_female.tsv'
with open(filename, 'r') as f:
for line in f:
line = line.rstrip()
if '@@@' in line:
sel_spot = int(line.split(':')[1])
elif 'CHROM' not in line:
line = line.split('\t')
female_reps[idx2].append(line)
return male_reps, female_reps, sel_spot
def window_average(reps, window, step):
win_reps = {}
for rep in reps:
win2 = window/2
pos = window/2
winds = []
posits = [int(x[1]) for x in reps[rep]]
while pos < max(posits):
melav = []
simav = []
secav = []
start = 0
for idx in range(start, len(reps[rep])):
x = reps[rep][idx]
if (int(x[1]) > pos-win2):
start = idx
if (int(x[1]) < pos+win2):
melav.append(float(x[2]))
simav.append(float(x[3]))
secav.append(float(x[4]))
else:
break
if melav and simav and secav:
winds.append([pos, np.mean(melav), np.mean(simav), np.mean(secav)])
pos += step
win_reps[rep] = winds
return win_reps
def sex_difference(male_reps, female_reps):
reps = {}
for i in male_reps:
reps[i] = []
for idx, entry in enumerate(male_reps[i]):
male = male_reps[i][idx]
female = female_reps[i][idx]
freq = [(male[0]), (male[1] - female[1]), (male[2] - female[2]), (male[3] - female[3])]
reps[i].append(freq)
return reps
def average_replicates(reps):
table = []
for pos, lis in enumerate(reps[0]):
melav = (reps[0][pos][1] + reps[1][pos][1] + reps[2][pos][1]) / 3
simav = (reps[0][pos][2] + reps[1][pos][2] + reps[2][pos][2]) / 3
secav = (reps[0][pos][3] + reps[1][pos][3] + reps[2][pos][3]) / 3
table.append([reps[0][pos][0], melav, simav, secav])
return table
def estimate_max(table):
table = table[1:]
sim_freqs = [(x[0],(x[2]-x[3]))for x in table]
sim_freqs = sorted(sim_freqs, key=lambda x: x[1])
# find the peaks
xs = [x[1] for x in sim_freqs]
peaks = list(find_peaks_cwt(xs, np.arange(50, 200)))
# this produces a list. Find the biggest one in the list
big = (0,0)
for peak in peaks:
if sim_freqs[peak][1] > big[1]:
big = (sim_freqs[peak][0], sim_freqs[peak][1])
return big[0]
differences = []
for i2 in range(replicates):
male_reps, female_reps, sel_spot = load_tsv(i2, out_pre)
male_reps = window_average(male_reps, window, step)
female_reps = window_average(female_reps, window, step)
table = sex_difference(male_reps, female_reps)
table = average_replicates(table)
estimated_site = estimate_max(table)
out = {
'est_site' : estimated_site,
'difference' : estimated_site-sel_spot,
'sel_site' : sel_spot,
'table' : table
}
pickle_file = out_pre + 'data/parsed_data/' + str(i2) + '.pkl'
with open(pickle_file, 'wb') as f:
pickle.dump(out, f, pickle.HIGHEST_PROTOCOL)
# print(differences)
# print("2x std:", np.std(differences))
# print("Average:", np.average(differences))
# plt.hist(differences, bins=20)
# plt.savefig(out_pre+'confidence_hist.pdf')
# plot_frequencies(table, sel_spot, estimated_site, 'final')
```
#### File: tri_hybid_mapping/simulation/02b_plotting.py
```python
import random
import copy
import pickle
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import find_peaks_cwt
LENGTH = 50000000
N_TRIALS = 350
selection_strength = 1
replicates = 10
window = 1000000
step = 20000
out_pre = '/Volumes/Jacob_2TB_storage/sim_sec_recombination_mapping/simulation/'
def load_table(filename):
with open(filename, 'rb') as f:
data = pickle.load(f)
return data['table'], data['sel_site'], data['est_site']
def plot_frequencies(table, sel_spot, esitmate, num, out_pre):
fig = plt.figure(figsize=(6, 8))
table = np.array(table)
plt.plot(table[::,0], table[::,1], color = 'blue', label = 'D.mel')
plt.plot(table[::,0], table[::,2], color = 'orange', label = 'D.sim')
plt.plot(table[::,0], table[::,3], color = 'red', label = 'D.sec')
plt.axvline(x=sel_spot, color='black', label = 'actual site')
plt.axvline(x=esitmate, color='green', label = 'estimated site')
plt.ylim(-0.4,0.4)
plt.legend()
plt.ylabel('Allele Frequency (Male - Female)')
plt.xlabel('Genomic position')
plotname = out_pre + 'simulated_graph.pdf'
plt.savefig(plotname)
differences = []
for i2 in range(replicates):
filename = out_pre + 'data/parsed_data/' + str(i2) + '.pkl'
table, sel_spot, estimated_site = load_table(filename)
differences.append(estimated_site-sel_spot)
print(differences)
print("2x std:", np.std(differences))
print("Average:", np.average(differences))
plt.hist(differences, bins=20)
plt.savefig(out_pre+'confidence_hist.pdf')
plot_frequencies(table, sel_spot, estimated_site, 'final', out_pre)
```
#### File: tri_hybid_mapping/simulation/03b_plotting copy.py
```python
import random
import copy
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import find_peaks_cwt
LENGTH = 50000000
N_TRIALS = 350
selection_strength = 1
replicates = 3
window = 1000000
step = 20000
out_pre = '/Volumes/Jacob_2TB_storage/sim_sec_recombination_mapping/simulation/'
def load_tsv(idx, out_pre):
table = {}
# load files
male_reps = {}
female_reps = {}
for idx2 in range(3):
male_reps[idx2] = []
filename = out_pre + 'data/simulated_data/' + str(idx) + '_' + str(idx2) + '_male.tsv'
with open(filename, 'r') as f:
for line in f:
line = line.rstrip()
if '@@@' in line:
sel_spot = int(line.split(':')[1])
elif 'CHROM' not in line:
line = line.split('\t')
male_reps[idx2].append(line)
female_reps[idx2] = []
filename = out_pre + 'data/simulated_data/' + str(idx) + '_' + str(idx2) + '_female.tsv'
with open(filename, 'r') as f:
for line in f:
line = line.rstrip()
if '@@@' in line:
sel_spot = int(line.split(':')[1])
elif 'CHROM' not in line:
line = line.split('\t')
female_reps[idx2].append(line)
return male_reps, female_reps, sel_spot
def window_average(reps, window, step):
win_reps = {}
for rep in reps:
win2 = window/2
pos = window/2
winds = []
posits = [int(x[1]) for x in reps[rep]]
while pos < max(posits):
melav = []
simav = []
secav = []
for x in reps[rep]:
if (int(x[1]) > pos-win2):
if (int(x[1]) < pos+win2):
melav.append(float(x[2]))
simav.append(float(x[3]))
secav.append(float(x[4]))
else:
break
if melav and simav and secav:
winds.append([pos, np.mean(melav), np.mean(simav), np.mean(secav)])
pos += step
win_reps[rep] = winds
return win_reps
def sex_difference(male_reps, female_reps):
reps = {}
for i in male_reps:
reps[i] = []
for idx, entry in enumerate(male_reps[i]):
male = male_reps[i][idx]
female = female_reps[i][idx]
freq = [(male[0]), (male[1] - female[1]), (male[2] - female[2]), (male[3] - female[3])]
reps[i].append(freq)
return reps
def average_replicates(reps):
table = []
for pos, lis in enumerate(reps[0]):
melav = (reps[0][pos][1] + reps[1][pos][1] + reps[2][pos][1]) / 3
simav = (reps[0][pos][2] + reps[1][pos][2] + reps[2][pos][2]) / 3
secav = (reps[0][pos][3] + reps[1][pos][3] + reps[2][pos][3]) / 3
table.append([reps[0][pos][0], melav, simav, secav])
return table
def estimate_max(table):
table = table[1:]
sim_freqs = [(x[0],(x[2]-x[3]))for x in table]
sim_freqs = sorted(sim_freqs, key=lambda x: x[1])
# find the peaks
xs = [x[1] for x in sim_freqs]
peaks = list(find_peaks_cwt(xs, np.arange(50, 200)))
# this produces a list. Find the biggest one in the list
big = (0,0)
for peak in peaks:
if sim_freqs[peak][1] > big[1]:
big = (sim_freqs[peak][0], sim_freqs[peak][1])
return big[0]
def plot_frequencies(table, sel_spot, esitmate, num):
df = pd.read_csv(filename)
fig = plt.figure(figsize=(6, 8))
plt.plot(table[::0], table[::1], color = 'blue', label = 'D.mel')
plt.plot(table[::0], table[::2], color = 'orange', label = 'D.sim')
plt.plot(table[::0], table[::3], color = 'red', label = 'D.sec')
plt.axvline(x=sel_spot, color='black', label = 'actual site')
plt.axvline(x=esitmate, color='green', label = 'estimated site')
plt.ylim(-0.4,0.4)
plt.legend()
plt.ylabel('Allele Frequency (Male - Female)')
plt.xlabel('Genomic position')
plotname = filename.split('.csv')[0] + str(num) + '.pdf'
plt.savefig(plotname)
differences = []
for i2 in range(replicates):
male_reps, female_reps, sel_spot = load_tsv(i2, out_pre)
male_reps = window_average(male_reps, window, step)
female_reps = window_average(female_reps, window, step)
table = sex_difference(male_reps, female_reps)
table = average_replicates(table)
estimated_site = estimate_max(table)
differences.append(estimated_site-sel_spot)
print(differences)
print("2x std:", np.std(differences))
print("Average:", np.average(differences))
plt.hist(differences, bins=20)
plt.savefig(out_pre+'confidence_hist.pdf')
plot_frequencies(table, sel_spot, estimated_site, 'final')
```
|
{
"source": "jcoopere/binary-image-classification",
"score": 3
}
|
#### File: jcoopere/binary-image-classification/vgg16-with-dropout.py
```python
import os
import sys
import time
import glob
import matplotlib.pyplot as plt
from keras import layers, models, optimizers, backend as K
from keras.preprocessing.image import ImageDataGenerator
# Globals
image_dimensions = 224
num_channels = 3
def count_images_in_dir(path):
# This code assumes .tiff image format...to accept a different format, modify the following line.
return len(glob.glob(os.path.join(path, '*', '*.tiff')))
def smooth_curve(points, factor=0.8):
smoothed_points = []
for point in points:
if smoothed_points:
previous = smoothed_points[-1]
smoothed_points.append(previous * factor + point * (1 - factor))
else:
smoothed_points.append(point)
return smoothed_points
def train_cnn(corpus_path):
train_dir = os.path.join(corpus_path, 'train')
validate_dir = os.path.join(corpus_path, 'validate')
test_dir = os.path.join(corpus_path, 'test')
training_image_count = count_images_in_dir(train_dir)
validation_image_count = count_images_in_dir(validate_dir)
testing_image_count = count_images_in_dir(test_dir)
print('INFO: {} images will be used for training ({})'.format(training_image_count, train_dir))
print('INFO: {} images will be used for validation ({})'.format(validation_image_count, validate_dir))
print('INFO: {} images will be used for testing ({})'.format(testing_image_count, test_dir))
labels = os.listdir(train_dir)
num_labels = len(labels)
print("INFO: Training set contains the following {} labels...".format(num_labels))
for label in labels:
print(' - {}'.format(label))
# Train
K.clear_session()
model = models.Sequential()
model.add(layers.Conv2D(64, (3, 3), input_shape=(image_dimensions, image_dimensions, num_channels), padding='same', activation='relu'))
model.add(layers.Dropout(0.2))
model.add(layers.Conv2D(64, (3, 3), padding='same', activation='relu'))
model.add(layers.MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(layers.Dropout(0.2))
model.add(layers.Conv2D(128, (3, 3), padding='same', activation='relu'))
model.add(layers.Conv2D(128, (3, 3), padding='same', activation='relu'))
model.add(layers.MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(layers.Dropout(0.2))
model.add(layers.Conv2D(256, (3, 3), padding='same', activation='relu'))
model.add(layers.Conv2D(256, (3, 3), padding='same', activation='relu'))
model.add(layers.Conv2D(256, (3, 3), padding='same', activation='relu'))
model.add(layers.MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(layers.Dropout(0.2))
model.add(layers.Conv2D(512, (3, 3), padding='same', activation='relu'))
model.add(layers.Conv2D(512, (3, 3), padding='same', activation='relu'))
model.add(layers.Conv2D(512, (3, 3), padding='same', activation='relu'))
model.add(layers.MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(layers.Dropout(0.2))
model.add(layers.Conv2D(512, (3, 3), padding='same', activation='relu'))
model.add(layers.Conv2D(512, (3, 3), padding='same', activation='relu'))
model.add(layers.Conv2D(512, (3, 3), padding='same', activation='relu'))
model.add(layers.MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(layers.Dropout(0.2))
model.add(layers.Flatten())
model.add(layers.Dense(4096, activation='relu'))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(4096, activation='relu'))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(num_labels, activation='softmax'))
model.summary()
model.compile(optimizers.RMSprop(lr=1e-5), loss='categorical_crossentropy', metrics=['accuracy'])
train_datagen = ImageDataGenerator(rescale=1./255)
validate_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(train_dir, target_size=(image_dimensions, image_dimensions), batch_size=20, shuffle=True)
validate_generator = validate_datagen.flow_from_directory(validate_dir, target_size=(image_dimensions, image_dimensions), batch_size=20, shuffle=True)
for data_batch, labels_batch in train_generator:
print('INFO: Data batch shape: {}'.format(data_batch.shape))
print('INFO: Labels batch shape: {}'.format(labels_batch.shape))
break
t_start = time.time()
history = model.fit_generator(
train_generator,
steps_per_epoch=training_image_count // 20 + 1,
epochs=300,
validation_data=validate_generator,
validation_steps=validation_image_count // 20 + 1)
t_end = time.time()
elapsed_time = t_end - t_start
print('INFO: Training complete! Elapsed time: {}'.format(elapsed_time))
print('INFO: Saving model as vgg16-classifier-model.h5')
model.save('vgg16-classifier-model.h5')
print('INFO: Plotting training & validation accuracy and loss...')
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.ylim(0.8, 1.0)
plt.plot(epochs, acc, 'bo')
plt.plot(epochs, smooth_curve(val_acc), 'b')
plt.title('Training & Validation Accuracy')
plt.figure()
plt.plot(epochs, loss, 'bo')
plt.plot(epochs, smooth_curve(val_loss), 'b')
plt.title('Training & Validation Loss')
plt.show()
def test_cnn(corpus_path, model_path):
from keras.models import load_model
model = load_model(model_path)
test_dir = os.path.join(corpus_path, 'test')
testing_image_count = count_images_in_dir(test_dir)
print('INFO: {} images will be used for testing'.format(testing_image_count))
# Test
test_datagen = ImageDataGenerator(rescale=1./255)
test_generator = test_datagen.flow_from_directory(test_dir, target_size=(image_dimensions, image_dimensions), batch_size=20)
test_loss, test_acc = model.evaluate_generator(test_generator, testing_image_count // 20 + 1)
print('INFO: Model accuracy on test data set is {}'.format(test_acc))
# Train, test, and save CNN
split_corpus_path = '/home/cdsw/split-sorted-images'
train_cnn(split_corpus_path)
test_cnn(split_corpus_path, '/home/cdsw/vgg16-classifier-model.h5')
```
|
{
"source": "jcooper-korg/talon_user",
"score": 2
}
|
#### File: jcooper-korg/talon_user/clipboard.py
```python
from talon.voice import ctrl, Key, Context
ctx = Context('clipboard')
def double_click(m):
x, y = ctrl.mouse_pos()
ctrl.mouse_click(x, y, button=0, times=2, wait=16000)
ctx.keymap({
'(copy | stoosh)': Key('cmd-c'),
'(paste | spark)': Key('cmd-v'),
'(cut | snatch)': Key('cmd-x'),
'allspark': Key('cmd-a cmd-v'),
'allcopy': Key('cmd-a cmd-c'),
'do park': [double_click, Key('cmd-v')],
'match park': Key('cmd-shift-alt-v'),
'do koosh': [double_click, Key('cmd-c')],
})
```
#### File: jcooper-korg/talon_user/generic_editor.py
```python
from talon.voice import Key, press, Str, Context
ctx = Context('generic_editor') # , bundle='com.microsoft.VSCode')
numeral_map = dict((str(n), n) for n in range(0, 20))
for n in [20, 30, 40, 50, 60, 70, 80, 90]:
numeral_map[str(n)] = n
numeral_map["oh"] = 0 # synonym for zero
numerals = ' (' + ' | '.join(sorted(numeral_map.keys())) + ')+'
optional_numerals = ' (' + ' | '.join(sorted(numeral_map.keys())) + ')*'
def text_to_number(words):
tmp = [str(s).lower() for s in words]
words = [parse_word(word) for word in tmp]
result = 0
factor = 1
for word in reversed(words):
if word not in numerals:
raise Exception('not a number')
result = result + factor * int(numeral_map[word])
factor = 10 * factor
return result
def parse_word(word):
word = word.lstrip('\\').split('\\', 1)[0]
return word
def jump_to_bol(m):
line = text_to_number(m)
press('cmd-l')
Str(str(line))(None)
press('enter')
def jump_to_end_of_line():
press('cmd-right')
def jump_to_beginning_of_text():
press('cmd-left')
def jump_to_nearly_end_of_line():
press('left')
def jump_to_bol_and(then):
def fn(m):
if len(m._words) > 1:
jump_to_bol(m._words[1:])
else:
press('ctrl-a')
press('cmd-left')
then()
return fn
def jump_to_eol_and(then):
def fn(m):
if len(m._words) > 1:
jump_to_bol(m._words[1:])
press('cmd-right')
then()
return fn
def toggle_comments():
# works in VSCode with Finnish keyboard layout
# press('cmd-shift-7')
# does not work in VSCode, see https://github.com/talonvoice/talon/issues/3
press('cmd-/')
def snipline():
press('shift-cmd-right')
press('delete')
press('delete')
press('ctrl-a')
press('cmd-left')
keymap = {
'sprinkle' + optional_numerals: jump_to_bol,
'spring' + optional_numerals: jump_to_eol_and(jump_to_beginning_of_text),
'dear' + optional_numerals: jump_to_eol_and(lambda: None),
'smear' + optional_numerals: jump_to_eol_and(jump_to_nearly_end_of_line),
'trundle' + optional_numerals: jump_to_bol_and(toggle_comments),
'jolt': Key('ctrl-a cmd-left shift-down cmd-c down cmd-v' ), # jsc simplified
'snipline' + optional_numerals: jump_to_bol_and(snipline),
# NB these do not work properly if there is a selection
'snipple': Key('shift-cmd-left delete'),
'snipper': Key('shift-cmd-right delete'),
'shackle': Key('cmd-right shift-cmd-left'),
'bracken': [Key('cmd-shift-ctrl-right')],
'shockey': Key('ctrl-a cmd-left enter up'),
'shockoon': Key('cmd-right enter'),
'sprinkoon' + numerals: jump_to_eol_and(lambda: press('enter')),
'olly': Key('cmd-a'),
# jsc added
'(indent | shabble)': Key('cmd-['),
'(outdent | shabber)': Key('cmd-]'),
}
ctx.keymap(keymap)
```
#### File: jcooper-korg/talon_user/utils.py
```python
import itertools
# Useful for identifying app/window information for context selection
def context_func(app, win):
print('---')
# print(app)
print(app.bundle)
print(win)
print(win.title)
print(win.doc)
print('---')
return True
number_conversions = {
# 'oh': '0', # 'oh' => zero
}
for i, w in enumerate(['zero', 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine',]):
number_conversions[str(i)] = str(i)
number_conversions[w] = str(i)
number_conversions['%s\\number'%(w)] = str(i)
def parse_words_as_integer(words):
# TODO: Once implemented, use number input value rather than manually parsing number words with this function
# Ignore any potential trailing non-number words
number_words = list(itertools.takewhile(lambda w: w not in number_conversions, words))
# Somehow, no numbers were detected
if len(number_words) == 0:
return None
# Map number words to simple number values
number_values = list(map(lambda w: number_conversions[w.word], number_words))
# Filter out initial zero values
normalized_number_values = []
non_zero_found = False
for n in number_values:
if not non_zero_found and n == '0':
continue
non_zero_found = True
normalized_number_values.append(n)
# If the entire sequence was zeros, return single zero
if len(normalized_number_values) == 0:
normalized_number_values = ['0']
# Create merged number string and convert to int
return int(''.join(normalized_number_values))
```
|
{
"source": "jcoopertech/autoread",
"score": 3
}
|
#### File: autoread/scripts/AR_track.py
```python
import math
import COM_CONFIG
import sys, getopt
class Coordinate():
def __init__(self, X, Y, Z):
self.X = X
self.Y = Y
self.Z = Z
def print_me(self):
print("\tX:\t", self.X)
print("\tY:\t", self.Y)
print("\tZ:\t", self.Z)
class TrackingPoint():
def __init__(self, CoordTuple, Name, ID):
self.Coordinate = Coordinate(CoordTuple[0], CoordTuple[1], CoordTuple[2])
self.Name = Name
self.ID = ID
def print_me(self):
print("TrackingPoint")
print("Name:\t\t", self.Name)
print("ID:\t\t", self.ID)
self.Coordinate.print_me()
def print_simple(self):
print("Tracking Point:\t\t", self.Name)
class Axis():
def __init__(self, AxisNumber, position, WinchCalibration = 0,):
self.AxisNumber = AxisNumber
self.Coordinate = Coordinate(None, COM_CONFIG.AxisYValues[int(self.AxisNumber)-1], position)
self.WinchCalibration = WinchCalibration
self.CalibratedCoordinate = Coordinate(None, COM_CONFIG.AxisYValues[int(self.AxisNumber)-1], position + self.WinchCalibration)
def print_me(self):
#print("--")
#print("Axis object print_me")
print("AxisNumber:\t\t", self.AxisNumber)
print("WinchCalibration:\t", self.WinchCalibration)
print("Coordinate:")
self.Coordinate.print_me()
print("CalibratedCoordinate:")
self.CalibratedCoordinate.print_me()
#print("--")
class Light():
def __init__(self,ThisLightType,CoordTuple,AxisClass,unitID):
# Calculate maximum pan ranges in degrees.
if isinstance(AxisClass, Axis):
self.Axis = AxisClass
else:
self.Axis = None
#Use LightType.type_name etc to access type variables
self.LightType = ThisLightType
self.unitID = unitID
self.plus_pan = self.LightType.total_pan / 2
self.neg_pan = - self.plus_pan
self.plus_tilt = self.LightType.total_tilt / 2
self.neg_tilt = - self.plus_tilt
if self.Axis == None:
self.BaseCoord = AxisClass
print(self.BaseCoord)
if self.Axis != None:
self.BaseCoord = Coordinate(CoordTuple[0], self.Axis.CalibratedCoordinate.Y, self.Axis.CalibratedCoordinate.Z)
self.OffsetCoord = Coordinate( self.BaseCoord.X, self.BaseCoord.Y, self.BaseCoord.Z - self.LightType.yoke_offset)
self.TrackingPoint = None
self.PanDeg = None
self.TiltDeg = None
self.Pan = None
self.Tilt = None
self.Addresses = []
self.Universe = None
def print_me(self):
print("Unit ID:", self.unitID)
print(".LightType attributes")
self.LightType.print_me()
print(".Axis attributes")
if self.Axis != None:
self.Axis.print_me()
print(".BaseCoord attributes")
self.BaseCoord.print_me()
print(".OffsetCoord attributes")
self.OffsetCoord.print_me()
print("plus_pan:\t\t", self.plus_pan)
print("neg_pan\t\t\t", self.neg_pan)
print("plus_tilt\t\t", self.plus_tilt)
print("neg_tilt\t\t", self.neg_tilt)
print("PanDeg\t\t", self.PanDeg)
print("TiltDeg\t\t", self.TiltDeg)
print("Pan:\t\t", self.Pan)
print("Tilt:\t\t", self.Tilt)
print("Universe:\t\t", self.Universe)
print("Addresses:\t\t", self.Addresses)
if self.TrackingPoint != None:
self.TrackingPoint.print_me()
else:
print("TrackingPoint\t\t", self.TrackingPoint)
def print_simple(self):
print("Unit ID:", self.unitID)
print("PanDeg\t\t", self.PanDeg)
print("TiltDeg\t\t", self.TiltDeg)
self.TrackingPoint.print_simple()
LightTypeObjects = COM_CONFIG.LightTypeObjects
def setup_AxisObjects(axisDict):
AxisObjects = []
for key, value in axisDict.items():
AxisObjects.append(Axis(key, value[0]))
return AxisObjects
def setup_LightObjects(Lights, AxisObjects, LightTypeObjects):
LightObjects = []
#Get Axis Object to be written to Light object
#print([LightType for LightType in LightTypeObjects if LightType.type_name == "TW1"])
for LightItem in Lights:
unitID = LightItem[3]
CurrentLightType = [LightType for LightType in LightTypeObjects if LightType.type_name == LightItem[0]][0]
#print(LightItem[2])
if LightItem[2] == None:
#If there is no Axis assigned
BaseCoord = Coordinate(LightItem[1][0], LightItem[1][1], LightItem[1][2])
LightObjects.append(Light(CurrentLightType, LightItem[1], BaseCoord, unitID))
else:
Axis = [Axis for Axis in AxisObjects if Axis.AxisNumber == LightItem[2]][0]
LightObjects.append(Light(CurrentLightType, LightItem[1], Axis, unitID))
return LightObjects
def setup_TrackingPoints(TrackingPoints):
TrackingObjects = []
for Point in TrackingPoints:
TrackingObjects.append(TrackingPoint(Point[0], Point[1], Point[2]))
return TrackingObjects
def printAllLightDetails(LightObjects, argv, opts):
for opt, arg in opts:
if opt == "-v":
verbose = arg
else:
verbose = 0
for Light in LightObjects:
if int(verbose) == 2:
Light.print_me()
elif int(verbose) == 1:
Light.print_simple()
else:
""
print("\n")
def printAllTrackingPointDetails(TrackingObjects):
for Point in TrackingObjects:
Point.print_me()
print("\n")
def GiveLightTrackingPoint(LightObjects, TrackingObjects):
for Light in LightObjects:
try:
WhichPointID = int(input("Which point should Light:{0} track?\n\t-\tType 0 for no Point".format(Light.unitID)))
except ValueError:
print("ValueError raised. Telling to track no point.")
WhichPointID = 0
if WhichPointID != 0:
Light.TrackingPoint = [Point for Point in TrackingObjects if WhichPointID == Point.ID][0]
else:
Light.TrackingPoint = None
def CalculateLightAngles(LightsTracking, Light, TrackingObjects):
"""Calculate Azimuth and Elevation (Pan/Tilt) for all the
lights defined in the AutoRead system."""
LightX = Light.OffsetCoord.X
LightY = Light.OffsetCoord.Y
LightZ = Light.OffsetCoord.Z
PointX = Light.TrackingPoint.Coordinate.X
PointY = Light.TrackingPoint.Coordinate.Z
PointZ = Light.TrackingPoint.Coordinate.Y
Z_Difference = LightZ - PointZ
X_Difference = LightX - PointX
Y_Difference = LightY - PointY
# print("Light ", Light.unitID, "X",X_Difference, "Y",Y_Difference, "Z",Z_Difference)
TiltAngle = math.degrees(math.atan2(math.sqrt(X_Difference**2 + Y_Difference**2), Z_Difference))
# print("Tilt", TiltAngle)
# PanDeg = -(math.degrees(math.atan2(Y_Difference, X_Difference)))+90
PanDeg = math.degrees(math.atan2(Y_Difference, X_Difference))+90
# print("Pan", PanDeg)
Light.TiltDeg = TiltAngle
Light.PanDeg = PanDeg
#https://planetcalc.com/7952/"""
def updateTrackingPointsAssociation(LightsTracking, LightObjects, TrackingObjects):
for Light in LightObjects:
association = [Assignment for Assignment in LightsTracking if Assignment[0] == Light.unitID][0]
# print(association)
Light.TrackingPoint = [Point for Point in TrackingObjects if association[1] == Point.ID][0]
def remap(degreeinput,degreemin,degreemax,dmxmin=0,dmxmax=65536):
"""
Convert the degree value to a 16 bit dmx number.
"""
DMXvalue = ((degreeinput - degreemin) * (dmxmax-dmxmin) / (degreemax - degreemin) + dmxmin)
return DMXvalue
def resplitfinecontrol(DMXvalue):
DMXvalue = round(int(DMXvalue))
coarse = int(DMXvalue) >> 8
fine = DMXvalue % 256
return coarse,fine
def AssignAddressesToLights(LightsUniverseAddr, Light):
try:
Light.Addresses = [LightAsAddresses[1:6] for LightAsAddresses in LightsUniverseAddr if LightAsAddresses[0] == Light.unitID][0]
Light.Universe = Light.Addresses[0]
Light.Addresses.remove(Light.Addresses[0])
except IndexError:
print("Light {0}'s address could not be found!' ".format(Light.unitID))
def main_track(axisDict, Lights, argv=None, opts=None):
#Lights = GenerateNewLightList()
AxisObjects = setup_AxisObjects(axisDict)
LightObjects = setup_LightObjects(Lights, AxisObjects, LightTypeObjects)
TrackingObjects = setup_TrackingPoints(COM_CONFIG.TrackingPoints)
# GiveLightTrackingPoint(LightObjects, TrackingObjects)
updateTrackingPointsAssociation(COM_CONFIG.LightsTracking, LightObjects, TrackingObjects)
for Light in LightObjects:
if Light.TrackingPoint != None:
CalculateLightAngles(COM_CONFIG.LightsTracking, Light, TrackingObjects)
"""Calculate Pan sACN 16 bit"""
Light.Pan = resplitfinecontrol(remap(Light.PanDeg,Light.neg_pan,Light.plus_pan))
Light.Tilt = resplitfinecontrol(remap(Light.TiltDeg,Light.neg_tilt,Light.plus_tilt))
AssignAddressesToLights(COM_CONFIG.LightsUniverseAddr, Light)
#CalculatesACNforLights(COM_CONFIG.LightsUniverseAddr)
COM_CONFIG.LightObjects = LightObjects
printAllLightDetails(LightObjects, argv, opts)
if __name__ == "__main__":
if COM_CONFIG._offline_test_ == True:
pass
#COM_CONFIG.GenerateNewLightList()
argv = sys.argv[1:]
try:
opts, args = getopt.getopt(argv,"v:")
main_track(COM_CONFIG.axisDict,COM_CONFIG.Lights, args, opts)
except getopt.GetoptError:
print('AR_track.py -v [0-2]')
sys.exit(2)
```
#### File: scripts/AutoRead-GUI/GUI.py
```python
__doc__ = """
This software takes UDP readouts from TAIT Stage Tech's eChameleon software, and
translates it into useful things.
Originally written for the Guildhall School of Music and Drama by <NAME>,
whilst studying on the BA Hons Technical Theatre Arts course.
"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2019, <NAME>"
__credits__ = ["<NAME>"]
__license__ = "GNU AGPLv3"
__version__ = "0.1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
import sys
try:
import Tkinter as tk
except ImportError:
import tkinter as tk
try:
import ttk
py3 = False
except ImportError:
import tkinter.ttk as ttk
py3 = True
import GUI_support
from AR_read import read_main
def vp_start_gui():
'''Starting point when module is the main routine.'''
global val, w, root
root = tk.Tk()
top = MainWindow (root)
GUI_support.init(root, top)
root.mainloop()
w = None
def create_MainWindow(root, *args, **kwargs):
'''Starting point when module is imported by another program.'''
global w, w_win, rt
rt = root
w = tk.Toplevel (root)
top = MainWindow (w)
GUI_support.init(w, top, *args, **kwargs)
return (w, top)
def destroy_MainWindow():
global w
w.destroy()
w = None
class MainWindow:
def __init__(self, top=None):
def updater():
print("updater running")
top.after(0,updater)
'''This class configures and populates the toplevel window.
top is the toplevel containing window.'''
_bgcolor = '#d9d9d9' # X11 color: 'gray85'
_fgcolor = '#000000' # X11 color: 'black'
_compcolor = '#d9d9d9' # X11 color: 'gray85'
_ana1color = '#d9d9d9' # X11 color: 'gray85'
_ana2color = '#ececec' # Closest X11 color: 'gray92'
self.style = ttk.Style()
if sys.platform == "win32":
self.style.theme_use('winnative')
self.style.configure('.',background=_bgcolor)
self.style.configure('.',foreground=_fgcolor)
self.style.map('.',background=
[('selected', _compcolor), ('active',_ana2color)])
pad = 3
top.geometry("{0}x{1}+0+0".format(
top.winfo_screenwidth()-pad, top.winfo_screenheight()-pad))
top.minsize(0, 0)
top.maxsize(2000, 1500)
top.resizable(1, 1)
top.title("Autoread: <NAME>")
top.configure(borderwidth="1")
top.configure(background="#550000")
top.configure(highlightbackground="#d9d9d9")
top.configure(highlightcolor="black")
self.Master_Container = tk.Frame(top)
self.Master_Container.place(x=12, y=12, relheight=0.958
, relwidth=0.988)
self.Master_Container.configure(relief='flat')
self.Master_Container.configure(borderwidth="1")
self.Master_Container.configure(background="#000000")
self.Master_Container.configure(highlightbackground="#d9d9d9")
self.Master_Container.configure(highlightcolor="black")
def importAutoData():
AxisArray = read_main()
return AxisArray
AxisArray = importAutoData()
def importAxisNames():
with open("AR_axis_names.txt","r") as file:
AxisNames = list(file.readlines())
for line in AxisNames:
AxisNames[AxisNames.index(line)] = line.replace("\n","")
return AxisNames
AxisNames = importAxisNames()
self.MCC1 = tk.LabelFrame(self.Master_Container)
self.MCC1.place(x=12, y=12, relheight=0.872, relwidth=0.98)
self.MCC1.configure(relief='groove')
self.MCC1.configure(foreground="#ffffff")
self.MCC1.configure(text='''<NAME>''')
self.MCC1.configure(background="#000066")
self.MCC1.configure(highlightbackground="#d9d9d9")
self.MCC1.configure(highlightcolor="black")
self.TkStorage = list([]*len(AxisArray))
AxisOffset = 1 #"""Choose the Axes to display here:"""
AxisNumbers = [*range(1,36),*range(41,47),*range(91,94)]
for Axis in AxisArray:
AxisItem = 0
AxisLabels = []
#Labels per axis: 7
AxisLabels.append(tk.Frame(self.MCC1))
VertSpacing = 75
"""Insert frame for item."""
if AxisOffset % 4 == 1:
AxisLabels[AxisItem].place(x=10, y=17*((AxisOffset*4)-(AxisOffset * 3))+17, height=60 #rely=0.023
, width=400, bordermode='ignore')
elif AxisOffset % 4 == 2:
AxisLabels[AxisItem].place(x=430, y=17*((AxisOffset*4)-(AxisOffset * 3)), height=60 #rely=0.023
, width=400, bordermode='ignore')
elif AxisOffset % 4 == 3:
AxisLabels[AxisItem].place(x=850, y=17*((AxisOffset*4)-(AxisOffset * 3))-17, height=60 #rely=0.023
, width=400, bordermode='ignore')
elif AxisOffset % 4 == 0:
AxisLabels[AxisItem].place(x=1270, y=17*((AxisOffset*4)-(AxisOffset * 3))-(17*2), height=60 #rely=0.023
, width=400, bordermode='ignore')
AxisLabels[AxisItem].configure(relief='flat')
AxisLabels[AxisItem].configure(borderwidth="2")
AxisLabels[AxisItem].configure(background="#d9d9d9")
AxisLabels[AxisItem].configure(highlightbackground="#d9d9d9")
AxisLabels[AxisItem].configure(highlightcolor="black")
AxisItem += 1
AxisLabels.append(tk.Label(AxisLabels[0]))
AxisLabels[AxisItem].place(relx=0.0, rely=0.364, height=35, width=40)
AxisLabels[AxisItem].configure(activebackground="#f9f9f9")
AxisLabels[AxisItem].configure(activeforeground="black")
AxisLabels[AxisItem].configure(background="#000000")
AxisLabels[AxisItem].configure(font="-family {Arial} -size 30")
AxisLabels[AxisItem].configure(foreground="#ffff00")
AxisLabels[AxisItem].configure(highlightbackground="#d9d9d9")
AxisLabels[AxisItem].configure(highlightcolor="black")
AxisLabels[AxisItem].configure(text=str(AxisNumbers[AxisOffset-1]))
AxisItem += 1
AxisLabels.append(tk.Label(AxisLabels[0]))
AxisLabels[AxisItem].place(relx=0.098, rely=0.364, height=35, width=140)
AxisLabels[AxisItem].configure(activebackground="#f9f9f9")
AxisLabels[AxisItem].configure(activeforeground="black")
AxisLabels[AxisItem].configure(anchor='e')
AxisLabels[AxisItem].configure(background="#ffffff")
AxisLabels[AxisItem].configure(font="-family {Arial} -size 30")
AxisLabels[AxisItem].configure(foreground="#000000")
AxisLabels[AxisItem].configure(highlightbackground="#d9d9d9")
AxisLabels[AxisItem].configure(highlightcolor="black")
AxisLabels[AxisItem].configure(text=str(AxisArray[str(AxisNumbers[AxisOffset-1])][0]))
AxisItem += 1
AxisLabels.append(tk.Label(AxisLabels[0]))
AxisLabels[AxisItem].place(relx=0.439, rely=0.364, height=35, width=159)
AxisLabels[AxisItem].configure(activebackground="#000000")
AxisLabels[AxisItem].configure(activeforeground="white")
AxisLabels[AxisItem].configure(activeforeground="black")
AxisLabels[AxisItem].configure(anchor='e')
AxisLabels[AxisItem].configure(background="#000000")
AxisLabels[AxisItem].configure(font="-family {Arial} -size 30")
AxisLabels[AxisItem].configure(foreground="#ffffff")
AxisLabels[AxisItem].configure(highlightbackground="#d9d9d9")
AxisLabels[AxisItem].configure(highlightcolor="black")
AxisLabels[AxisItem].configure(text='''0 mm/s''')
AxisItem += 1
AxisLabels.append(tk.Label(AxisLabels[0]))
AxisLabels[AxisItem].place(relx=0.0, rely=0.0, height=20, width=410)
AxisLabels[AxisItem].configure(activebackground="#f9f9f9")
AxisLabels[AxisItem].configure(activeforeground="black")
AxisLabels[AxisItem].configure(anchor='w')
AxisLabels[AxisItem].configure(background="#bcbcbc")
AxisLabels[AxisItem].configure(font="-family {Arial} -size 16 -weight bold")
AxisLabels[AxisItem].configure(foreground="#000000")
AxisLabels[AxisItem].configure(highlightbackground="#d9d9d9")
AxisLabels[AxisItem].configure(highlightcolor="black")
AxisLabels[AxisItem].configure(padx="25")
AxisLabels[AxisItem].configure(text=AxisNames[AxisOffset-1])
AxisItem += 1
AxisLabels.append(tk.Label(AxisLabels[0]))
AxisLabels[AxisItem].place(relx=0.829, rely=0.364, height=35, width=71)
AxisLabels[AxisItem].configure(activebackground="#f9f9f9")
AxisLabels[AxisItem].configure(activeforeground="black")
AxisLabels[AxisItem].configure(anchor='se')
AxisLabels[AxisItem].configure(background="#d9d9d9")
AxisLabels[AxisItem].configure(font="-family {Arial} -size 24")
AxisLabels[AxisItem].configure(foreground="#000000")
AxisLabels[AxisItem].configure(highlightbackground="#d9d9d9")
AxisLabels[AxisItem].configure(highlightcolor="black")
AxisLabels[AxisItem].configure(text='''0s''')
AxisOffset += 1
self.Status = tk.LabelFrame(self.Master_Container)
self.Status.place(relx=0.006, rely=0.898, height=70
, relwidth=0.988)
self.Status.configure(relief='groove')
self.Status.configure(foreground="#ffffff")
self.Status.configure(text='''Status''')
self.Status.configure(background="#505050")
self.Status.configure(highlightbackground="#d9d9d9")
self.Status.configure(highlightcolor="black")
self.Scrolledlistbox1 = ScrolledListBox(self.Status)
self.Scrolledlistbox1.place(relx=0.006, rely=0.235, relheight=0.635
, relwidth=0.989, bordermode='ignore')
self.Scrolledlistbox1.configure(background="white")
self.Scrolledlistbox1.configure(font="TkDefaultFont")
self.Scrolledlistbox1.configure(foreground="black")
self.Scrolledlistbox1.configure(highlightbackground="#d9d9d9")
self.Scrolledlistbox1.configure(highlightcolor="#d9d9d9")
self.Scrolledlistbox1.configure(selectbackground="#c4c4c4")
self.Scrolledlistbox1.configure(selectforeground="black")
self.Label1 = tk.Label(top)
self.Label1.place(relx=0.006, rely=0.968, height=22, width=1433)
self.Label1.configure(background="#550000")
self.Label1.configure(foreground="#aaaaaa")
self.Label1.configure(text='''Developed by <NAME> for the Guildhall School of Music and Drama. Not working? Contact <EMAIL>. This system should not be relied upon for show or safety critical purposes. System provided as is, with no guarantee.''')
# The following code is added to facilitate the Scrolled widgets you specified.
class AutoScroll(object):
'''Configure the scrollbars for a widget.'''
def __init__(self, master):
# Rozen. Added the try-except clauses so that this class
# could be used for scrolled entry widget for which vertical
# scrolling is not supported. 5/7/14.
try:
vsb = ttk.Scrollbar(master, orient='vertical', command=self.yview)
except:
pass
hsb = ttk.Scrollbar(master, orient='horizontal', command=self.xview)
#self.configure(yscrollcommand=_autoscroll(vsb),
# xscrollcommand=_autoscroll(hsb))
try:
self.configure(yscrollcommand=self._autoscroll(vsb))
except:
pass
self.configure(xscrollcommand=self._autoscroll(hsb))
self.grid(column=0, row=0, sticky='nsew')
try:
vsb.grid(column=1, row=0, sticky='ns')
except:
pass
hsb.grid(column=0, row=1, sticky='ew')
master.grid_columnconfigure(0, weight=1)
master.grid_rowconfigure(0, weight=1)
# Copy geometry methods of master (taken from ScrolledText.py)
if py3:
methods = tk.Pack.__dict__.keys() | tk.Grid.__dict__.keys() \
| tk.Place.__dict__.keys()
else:
methods = tk.Pack.__dict__.keys() + tk.Grid.__dict__.keys() \
+ tk.Place.__dict__.keys()
for meth in methods:
if meth[0] != '_' and meth not in ('config', 'configure'):
setattr(self, meth, getattr(master, meth))
@staticmethod
def _autoscroll(sbar):
'''Hide and show scrollbar as needed.'''
def wrapped(first, last):
first, last = float(first), float(last)
if first <= 0 and last >= 1:
sbar.grid_remove()
else:
sbar.grid()
sbar.set(first, last)
return wrapped
def __str__(self):
return str(self.master)
def _create_container(func):
'''Creates a ttk Frame with a given master, and use this new frame to
place the scrollbars and the widget.'''
def wrapped(cls, master, **kw):
container = ttk.Frame(master)
container.bind('<Enter>', lambda e: _bound_to_mousewheel(e, container))
container.bind('<Leave>', lambda e: _unbound_to_mousewheel(e, container))
return func(cls, container, **kw)
return wrapped
class ScrolledListBox(AutoScroll, tk.Listbox):
'''A standard Tkinter Listbox widget with scrollbars that will
automatically show/hide as needed.'''
@_create_container
def __init__(self, master, **kw):
tk.Listbox.__init__(self, master, **kw)
AutoScroll.__init__(self, master)
def size_(self):
sz = tk.Listbox.size(self)
return sz
import platform
def _bound_to_mousewheel(event, widget):
child = widget.winfo_children()[0]
if platform.system() == 'Windows' or platform.system() == 'Darwin':
child.bind_all('<MouseWheel>', lambda e: _on_mousewheel(e, child))
child.bind_all('<Shift-MouseWheel>', lambda e: _on_shiftmouse(e, child))
else:
child.bind_all('<Button-4>', lambda e: _on_mousewheel(e, child))
child.bind_all('<Button-5>', lambda e: _on_mousewheel(e, child))
child.bind_all('<Shift-Button-4>', lambda e: _on_shiftmouse(e, child))
child.bind_all('<Shift-Button-5>', lambda e: _on_shiftmouse(e, child))
def _unbound_to_mousewheel(event, widget):
if platform.system() == 'Windows' or platform.system() == 'Darwin':
widget.unbind_all('<MouseWheel>')
widget.unbind_all('<Shift-MouseWheel>')
else:
widget.unbind_all('<Button-4>')
widget.unbind_all('<Button-5>')
widget.unbind_all('<Shift-Button-4>')
widget.unbind_all('<Shift-Button-5>')
def _on_mousewheel(event, widget):
if platform.system() == 'Windows':
widget.yview_scroll(-1*int(event.delta/120),'units')
elif platform.system() == 'Darwin':
widget.yview_scroll(-1*int(event.delta),'units')
else:
if event.num == 4:
widget.yview_scroll(-1, 'units')
elif event.num == 5:
widget.yview_scroll(1, 'units')
def _on_shiftmouse(event, widget):
if platform.system() == 'Windows':
widget.xview_scroll(-1*int(event.delta/120), 'units')
elif platform.system() == 'Darwin':
widget.xview_scroll(-1*int(event.delta), 'units')
else:
if event.num == 4:
widget.xview_scroll(-1, 'units')
elif event.num == 5:
widget.xview_scroll(1, 'units')
if __name__ == '__main__':
vp_start_gui()
```
#### File: autoread/tools/COM_LED.py
```python
import RPi.GPIO as GPIO
import COM_CONFIG
"""This is a common file for controlling GPIO. Don't try to run this"""
def GPIO_ON(Channel):
"""Turns on the pin."""
GPIO.output(Channel, GPIO.HIGH)
print("COM_LED.py: Pin {0} is HIGH".format(Channel))
def GPIO_OFF(Channel):
"""Turns off the channel."""
GPIO.output(Channel, GPIO.LOW)
print("COM_LED.py: Pin {0} is LOW".format(Channel))
def setupGPIO(LED_def=COM_CONFIG.LED_def):
"""By default, read GPIO pins from COM_CONFIG"""
GPIO.setmode(GPIO.BOARD)
for key, value in COM_CONFIG.LED_def.items():
GPIO.setup(value, GPIO.OUT)
print("COM_LED: {0} has been set up on PIN {1}".format(key,value))
```
|
{
"source": "jcoopertech/sACNtoArtNet",
"score": 3
}
|
#### File: jcoopertech/sACNtoArtNet/sACN.py
```python
import time
from params.UserParams import *
from params.sACNParams import *
from setup import *
def flush_buffer(buffer_size):
buffer = bytearray()
for i in range(buffer_size):
buffer.append(0)
return buffer
def merge_sacn_inputs(sacn_data): # Input Universe, CID and DMX data
if merge is True:
# Merge can be enabled and disabled in the UserParams.py
if sacn_data["cid"] not in merge_dict[sacn_data["universe"]]:
merge_dict[sacn_data["universe"]].update({sacn_data["cid"]: {}})
# Create new entry for this CID if not already created
if "priority" not in merge_dict[sacn_data["universe"]][sacn_data["cid"]]:
# If per channel priority does not exist yet, add it.
per_channel_priority = bytearray() # Create empty bytearray
for i in range(512):
per_channel_priority.append(sacn_data["priority"]) # Copy universe priority to every channel
merge_dict[sacn_data["universe"]][sacn_data["cid"]].update(priority=per_channel_priority)
merge_dict[sacn_data["universe"]][sacn_data["cid"]].update(dmx=sacn_data["dmx_data"], time=time.time())
# Update Time and DMX data
output_dmx = flush_buffer(512) # Reset DMX output to 0
output_priority = flush_buffer(512) # Reset Priority output to 0
for cids in merge_dict[sacn_data["universe"]]: # Loop for every CID input on this universe
for dmx_length in range(512): # Loop for every position of the DMX packet
if use_per_channel_priority is True:
# Merge mode can be changed in the UserParams.py
if output_priority[dmx_length] < merge_dict[sacn_data["universe"]][cids]["priority"][dmx_length]:
# If priority is higher, overwrite output.
output_priority[dmx_length] = merge_dict[sacn_data["universe"]][cids]["priority"][dmx_length]
if "dmx" not in merge_dict[sacn_data["universe"]][cids]:
pass
# Do not do anything if we receive a priority packet first.
else:
output_dmx[dmx_length] = merge_dict[sacn_data["universe"]][cids]["dmx"][dmx_length]
# Update DMX if priority is higher.
if output_priority[dmx_length] == merge_dict[sacn_data["universe"]][cids]["priority"][dmx_length]:
# If priority is equal, check DMX value
if "dmx" not in merge_dict[sacn_data["universe"]][cids]:
pass
elif output_dmx[dmx_length] < merge_dict[sacn_data["universe"]][cids]["dmx"][dmx_length]:
# If priority is equal, the highest value wins.
output_dmx[dmx_length] = merge_dict[sacn_data["universe"]][cids]["dmx"][dmx_length]
if use_per_channel_priority is False:
if "dmx" not in merge_dict[sacn_data["universe"]][cids]:
pass
elif output_dmx[dmx_length] < merge_dict[sacn_data["universe"]][cids]["dmx"][dmx_length]:
# If priority is equal, the highest value wins.
output_dmx[dmx_length] = merge_dict[sacn_data["universe"]][cids]["dmx"][dmx_length]
for universes in merge_dict:
# If a universe has timeout, remove it from the dictionary, so it won't overwrite the priority of the other
# active universes.
for cids in merge_dict[sacn_data["universe"]]:
if time.time() - merge_dict[sacn_data["universe"]][cids]["time"] > E131_NETWORK_DATA_LOSS_TIMEOUT:
if debug_level >= 2:
print(f"From {sacn_data['cid']} deleting universe {sacn_data['universe']}. Timeout after "
f"{time.time() - merge_dict[sacn_data['universe']][cids]['time']} seconds.")
del merge_dict[sacn_data["universe"]][cids]
# Delete the Universe on this CID and leave the loop.
break
sacn_data["dmx_data"] = output_dmx
sacn_data["per_channel_priority"] = output_priority
elif merge is False:
pass
# Store these data in the input dict and return
return sacn_data["dmx_data"]
def add_sacn_priority(sacn_data):
if sacn_data["cid"] not in merge_dict[sacn_data["universe"]]:
merge_dict[sacn_data["universe"]] = {sacn_data["cid"]: {}}
# Create new entry for this CID if not already created
per_channel_priority = bytearray()
for i in range(512):
per_channel_priority.append(sacn_data["per_channel_priority"][i])
merge_dict[sacn_data["universe"]][sacn_data["cid"]].update(priority=per_channel_priority, time=time.time())
def identify_sacn_startcode(sacn_input):
# Identifies the start code of this sACN packet
if sacn_input[125] == 0x00:
return "DMX"
elif sacn_input[125] == 0xDD:
return "PER_CHANNEL_PRIORITY"
elif sacn_input[125] == 0xCC:
return "RDM"
else:
return "ALTERNATE"
def identify_sacn_packet(sacn_input):
# Identifies the type of this sACN packet. Raises an error if the packet is too short.
try:
len(sacn_input) < 126
if len(sacn_input) < 126:
raise TypeError("Unknown Package. The minimum length for a sACN package is 126.")
except TypeError as error_message:
if debug_level >= 1:
print("LENGTH ERROR:", error_message)
if tuple(sacn_input[40:44]) == VECTOR_E131_DATA_PACKET: # sACN Data Packet
return "sACN_DATA_PACKET"
elif tuple(sacn_input[40:44]) == VECTOR_E131_EXTENDED_SYNCHRONIZATION: # sACN Sync Packet
return "sACN_EXTENDED_SYNCHRONIZATION"
elif tuple(sacn_input[40:44]) == VECTOR_E131_EXTENDED_DISCOVERY: # sACN Discovery Packet
return "sACN_EXTENDED_DISCOVERY"
def sacn_data_check_validity(sacn_packet):
# E131 Data Packet:
# # # ROOT LAYER # # #
# 0-1: Preamble Size (0x0010) <- Discard if not valid
# 2-3: Postable Size (0x0000) <- Discard if not valid
# 4-15: ACN Packet Identifier
# (0x41 0x53 0x43 0x2d 0x45 0x31 0x2e 0x31 0x37 0x00 0x00 0x00) <- Discard if not valid
# 16-17: Flags and length (Low 12 bits = PDU length, High 4 bits = 0x7)
# 18-21: Identifies RLP Data as 1.31 Protocol
# (VECTOR_ROOT_E131_DATA or VECTOR_ROOT_E131_EXTENDED) <- Discard if not valid
# 22-37: Senders unique CID
# # # DATA PACKET FRAMING LAYER # # #
# 38-39 Flags and lenght (Low 12 bits = PDU length, High 4 bits = 0x7
# 40-43 Identifies 1.31 data as DMP Protocol PDU (VECTOR_E131_DATA_PACKET)
# 44-107: Source Name assigned by User (UTF-8 encoded string)
# 108: Package Priority of multiple sources (0-200, 100 being default)
# 109-110: Synchronization Address (Universe on which sync packets will be sent)
# 111: Sequence Number (To detect duplicate or out of order packets)
# 112: Options (Bit 5 = Force_Synchronization, Bit 6 = Stream_Terminated, Bit 7 = Preview Data)
# 113-114: Universe Number
# # # DMP Layer # # #
# 115-116: Flags and length (Low 12 bits = PDU Length, High 4 bits = 0x7)
# 117: Identifies DMP Set Property Message PDU (VECTOR_DMP_SET_PROPERTY) <- Discard if not valid
# 118: Address Type and Data Type (0xa1) <- Discard if not valid
# 119-120: First property address, Indicates DMX Start Code is at DMP address 0 (0x0000) <- Discard if not valid
# 121-122: Address Increment, Indicate each property is 1 octet (0x0001) <- Discard if not valid
# 123-124: Property value count, Indicates +1 the number of slots in packet (0x0001 -- 0x0201)
# 125-637: Property values, DMX Start Code and data (Start Code + data) <- DMX DATA
# The following IF-Statements discard the package if it does not comply with E1.31 standards
if tuple(sacn_packet[0:2]) != PREAMBLE_SIZE or tuple(sacn_packet[2:4]) != POST_AMBLE_SIZE or \
tuple(sacn_packet[4:16]) != ACN_PACKET_IDENTIFIER or \
tuple(sacn_packet[18:22]) != VECTOR_ROOT_E131_DATA or \
tuple(sacn_packet[40:44]) != VECTOR_E131_DATA_PACKET or \
sacn_packet[117] != VECTOR_DMP_SET_PROPERTY or \
sacn_packet[118] != ADDRESS_TYPE_DATA_TYPE or \
tuple(sacn_packet[119:121]) != FIRST_PROPERTY_ADDRESS or \
tuple(sacn_packet[121:123]) != ADDRESS_INCREMENT:
# Raise an error, if any of the package content is not valid. Print out what it should be and what was sent.
raise TypeError(f"Package does not comply E1.31 standard! \
Preamble {PREAMBLE_SIZE} was {tuple(sacn_packet[0:2])}, \
Postamble {POST_AMBLE_SIZE} was {tuple(sacn_packet[2:4])}, \
ACN Packet Identifier {ACN_PACKET_IDENTIFIER} was {tuple(sacn_packet[4:16])}, \
VECTOR E1.31 {VECTOR_ROOT_E131_DATA} was {tuple(sacn_packet[18:22])}, \
VECTOR E1.31 Data {VECTOR_ROOT_E131_DATA} was {tuple(sacn_packet[40:44])}, \
VECTOR DMP {VECTOR_DMP_SET_PROPERTY} was {(sacn_packet[117])}, \
Address Type {ADDRESS_TYPE_DATA_TYPE} was {sacn_packet[118]}, \
First Property Address {FIRST_PROPERTY_ADDRESS} was {tuple(sacn_packet[119:121])}, \
Address Increment {ADDRESS_INCREMENT} was {tuple(sacn_packet[121:123])}")
def sacn_dmx_input(sacn_packet):
# If this is a normal DMX packet (Start Code = 0x00)
# Dictionary with all the information we can get from this package
sacn_data = {"cid": sacn_packet[22:38], "source_name": str(sacn_packet[44:108]), "priority": sacn_packet[108],
"sync_address": tuple(sacn_packet[109:111]), "sequence_number": sacn_packet[111],
"option_flags": sacn_packet[112], "universe": tuple(sacn_packet[113:115]),
"start_code": sacn_packet[125],
"dmx_data": sacn_packet[126:638], "universe_hibyte": sacn_packet[113],
"universe_lobyte": sacn_packet[114]}
return sacn_data
# Merge DMX data from multiple sources.
def sacn_per_channel_input(sacn_packet):
# If this is a per channel priority packet (Start Code = 0xDD)
# Dictionary with all the information we can get from this package
sacn_data = {"cid": sacn_packet[22:38], "source_name": str(sacn_packet[44:108]),
"sync_address": tuple(sacn_packet[109:111]), "sequence_number": sacn_packet[111],
"option_flags": sacn_packet[112], "universe": tuple(sacn_packet[113:115]),
"start_code": sacn_packet[125],
"per_channel_priority": sacn_packet[126:638], "universe_hibyte": sacn_packet[113],
"universe_lobyte": sacn_packet[114]}
return sacn_data
def sacn_rdm_input(sacn_packet):
# If this is a alternate start code packet (Start Code != 0xDD or 0x00)
# Dictionary with all the information we can get from this package
sacn_data = {"cid": sacn_packet[22:38], "source_name": str(sacn_packet[44:108]), "priority": sacn_packet[108],
"sync_address": tuple(sacn_packet[109:111]), "sequence_number": sacn_packet[111],
"option_flags": sacn_packet[112], "universe": tuple(sacn_packet[113:115]),
"start_code": sacn_packet[125],
"alternate_data": sacn_packet[126:638], "universe_hibyte": sacn_packet[113],
"universe_lobyte": sacn_packet[114]}
return sacn_data
def sacn_alternate_input(sacn_packet):
pass
def sacn_sync_input(sacn_packet):
# E131 Data Packet:
# # # ROOT LAYER # # #
# 0-1: Preamble Size (0x0010) <- Discard if not valid
# 2-3: Postable Size (0x0000) <- Discard if not valid
# 4-15: ACN Packet Identifier
# (0x41 0x53 0x43 0x2d 0x45 0x31 0x2e 0x31 0x37 0x00 0x00 0x00) <- Discard if not valid
# 16-17: Flags and length (Low 12 bits = PDU length, High 4 bits = 0x7)
# 18-21: Identifies RLP Data as 1.31 Protocol
# (VECTOR_ROOT_E131_DATA or VECTOR_ROOT_E131_EXTENDED) <- Discard if not valid
# 22-37: Senders unique CID
# # # SYNCHRONIZATION PACKET FRAMING LAYER # # #
# 38-39 Flags and lenght (Low 12 bits = PDU length, High 4 bits = 0x7
# 40-43 Identifies 1.31 data as DMP Protocol PDU (VECTOR_E131_EXTENDED_SYNCHRONIZATION)
# 44: Sequence Number (To detect duplicate or out of order packets)
# 45-46: Synchronization Address (Universe on which synchronization packets are transmitted) <- Discard if zero
# 47-48: Reserved (Transmit as zero)
# The following IF-Statement discards the package if it does not comply with E1.31 standards
if tuple(sacn_packet[0:2]) != PREAMBLE_SIZE or tuple(sacn_packet[2:4]) != POST_AMBLE_SIZE or \
tuple(sacn_packet[4:16]) != ACN_PACKET_IDENTIFIER or \
tuple(sacn_packet[18:22]) != VECTOR_ROOT_E131_EXTENDED or \
tuple(sacn_packet[40:44]) != VECTOR_E131_EXTENDED_SYNCHRONIZATION or \
tuple(sacn_packet[45:47]) != (0, 0):
# Raise an error, if any of the package content is not valid. Print out what it should be and what was sent.
raise TypeError(f"Package does not comply E1.31 standard! \
Preamble {PREAMBLE_SIZE} was {tuple(sacn_packet[0:2])}, \
Postamble {POST_AMBLE_SIZE} was {tuple(sacn_packet[2:4])}, \
ACN Packet Identifier {ACN_PACKET_IDENTIFIER} was {tuple(sacn_packet[4:16])}, \
VECTOR E1.31 {VECTOR_ROOT_E131_EXTENDED} was {tuple(sacn_packet[18:22])}, \
VECTOR E1.31 Sync {VECTOR_E131_EXTENDED_SYNCHRONIZATION} was {tuple[40:44]}")
# Dictionary with all the information we can get from this package
sACN_data = {"cid": sacn_packet[22:38], "sync_address": tuple(sacn_packet[45:47]),
"sequence_number": sacn_packet[44]}
return sACN_data
def sacn_discovery_input(sacn_packet):
# E131 Data Packet:
# # # ROOT LAYER # # #
# 0-1: Preamble Size (0x0010) <- Discard if not valid
# 2-3: Postable Size (0x0000) <- Discard if not valid
# 4-15: ACN Packet Identifier
# (0x41 0x53 0x43 0x2d 0x45 0x31 0x2e 0x31 0x37 0x00 0x00 0x00) <- Discard if not valid
# 16-17: Flags and length (Low 12 bits = PDU length, High 4 bits = 0x7)
# 18-21: Identifies RLP Data as 1.31 Protocol
# (VECTOR_ROOT_E131_DATA or VECTOR_ROOT_E131_EXTENDED) <- Discard if not valid
# 22-37: Senders unique CID
# # # FRAMING LAYER # # #
# 38-39 Flags and lenght (Low 12 bits = PDU length, High 4 bits = 0x7
# 40-43 Identifies 1.31 data as DMP Protocol PDU (VECTOR_E131_EXTENDED_DISCOVERY)
# 44-107: Source Name assigned by User (UTF-8 encoded string)
# 108-111: Reserved (Transmit as zero)
# # # UNIVERSE DISCOVERY LAYER # # #
# 112-113: Flags and length (Low 12 bits = PDU Length, High 4 bits = 0x7)
# 114-117: Identifies Universe Discovery data as universe list (VECTOR_UNIVERSE_DICOVERY_UNIVERSE_LIST)
# 118: Page (Indicating which packet of N this is - page numbers start at 0)
# 119: Final page (Page number N of the final page to be transmitted
# 120-1143: List of Universes (Sorted list of up to 512 16-bit universes)
# The following IF-Statement discards the package if it does not comply with E1.31 standards
if tuple(sacn_packet[0:2]) != PREAMBLE_SIZE or tuple(sacn_packet[2:4]) != POST_AMBLE_SIZE or \
tuple(sacn_packet[4:16]) != ACN_PACKET_IDENTIFIER or \
tuple(sacn_packet[18:22]) != VECTOR_ROOT_E131_EXTENDED or \
tuple(sacn_packet[40:44]) != VECTOR_E131_EXTENDED_DISCOVERY or \
tuple(sacn_packet[114:118]) != VECTOR_UNIVERSE_DISCOVERY_UNIVERSE_LIST:
# Raise an error, if any of the package content is not valid. Print out what it should be and what was sent.
raise TypeError(f"Package does not comply E1.31 standard! \
Preamble {PREAMBLE_SIZE} was {tuple(sacn_packet[0:2])}, \
Postamble {POST_AMBLE_SIZE} was {tuple(sacn_packet[2:4])}, \
ACN Packet Identifier {ACN_PACKET_IDENTIFIER} was {tuple(sacn_packet[4:16])}, \
VECTOR E1.31 {VECTOR_ROOT_E131_EXTENDED} was {tuple(sacn_packet[18:22])}, \
VECTOR E1.31 Discovery {VECTOR_E131_EXTENDED_DISCOVERY} was {tuple[40:44]}, \
VECTOR E1.31 Discovery List {VECTOR_UNIVERSE_DISCOVERY_UNIVERSE_LIST} was {tuple[114:118]}")
# Dictionary with all the information we can get from this package
sACN_data = {"cid": sacn_packet[22:38], "source_name": str(sacn_packet[44:108]), "page": tuple(sacn_packet[118]),
"final_page": sacn_packet[119], "universes": sacn_packet[120:1143]}
return sACN_data
```
#### File: jcoopertech/sACNtoArtNet/socket_settings.py
```python
import socket
from params import ArtNetParams, sACNParams
from params.UserParams import *
def calculate_multicast_addr(universemin: int):
hibyte = universemin >> 8
lobyte = universemin & 0xFF
return F"239.255.{hibyte}.{lobyte}"
def sacn_socket_setup(udp_ip="127.0.0.1", min_universe=universe_min, max_universe=universe_max,
sacn_port=sACNParams.ACN_SDT_MULTICAST_PORT):
"""SACN SOCKET"""
if debug_level >= 3:
print("RUNNING sACN SOCKET SETUP...")
sacn_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) # Set up socket
try:
sacn_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Reuse address if already taken by another application
except RuntimeWarning:
if debug_level >= 1:
print(f"Address can't be reused! Please close all applications that are assigned to Port \
{sacn_port}")
sacn_sock.bind((udp_ip, sacn_port)) # Calculate multicast addresses and bind to it
multicast_list = []
if debug_level >= 3:
print(f"Listening to sACN on Universe {min_universe} thru {max_universe}")
while min_universe <= max_universe:
multicast_list.append(calculate_multicast_addr(min_universe))
sacn_sock.setsockopt(socket.SOL_IP, socket.IP_ADD_MEMBERSHIP,
socket.inet_aton(calculate_multicast_addr(min_universe)) + socket.inet_aton(udp_ip))
min_universe = min_universe + 1
if debug_level >= 3:
print(f"Joined Multicast Group:{multicast_list}")
print(f"UDP target IP: {udp_ip}")
print(f"UDP target Port: {sacn_port}")
return sacn_sock
def artnet_socket_setup(udp_ip="127.0.0.1", artnet_port=ArtNetParams.UDP_PORT):
"""ART-NET SOCKET"""
if debug_level >= 3:
print("RUNNING ART-NET SOCKET SETUP...")
artnet_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) # Set up socket
artnet_sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, True) # Broadcast for ArtNet-Broadcast sending
artnet_sock.setblocking(False)
try:
artnet_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Reuse address if already taken by another application
except:
if debug_level >= 1:
print(f"Address can't be reused! Please close all applications that are assigned to Port \
{artnet_port}")
artnet_sock.bind((udp_ip, artnet_port))
if debug_level >= 3:
print(f"UDP target IP: {udp_ip}")
print(f"UDP target Port: {artnet_port}")
return artnet_sock
def artnet_socket_unicast_setup(udp_ip="127.0.0.1", artnet_port=ArtNetParams.UDP_PORT):
"""ART-NET SOCKET"""
if debug_level >= 3:
print("RUNNING ART-NET SOCKET SETUP...")
artnet_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) # Set up socket
artnet_sock.setblocking(False)
try:
artnet_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Reuse address if already taken by another application
except:
if debug_level >= 1:
print(f"Address can't be reused! Please close all applications that are assigned to Port \
{artnet_port}")
artnet_sock.bind((udp_ip, artnet_port))
if debug_level >= 3:
print(f"UDP target IP: {udp_ip}")
print(f"UDP target Port: {artnet_port}")
return artnet_sock
```
|
{
"source": "JCoopzKnoxRox/thatsTheNews",
"score": 2
}
|
#### File: thatsTheNews/server/autoArtUpdate.py
```python
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize, sent_tokenize
import json
import sys
import newspaper
import requests
import datetime
from mongoengine.errors import ValidationError
from newspaper import Article, Config, news_pool
from schema import And, Schema
from apscheduler.schedulers.background import BackgroundScheduler
from time import strftime
from flask import jsonify, request
from app import app
from authorization import login_required
from models import NewsArticle
import utils
def auto_article_go_getter():
print("starting builds ", file=sys.stderr)
cnn_paper = newspaper.build("https://www.cnn.com", memorize_articles=True, language = 'en')
print("cnn_paper built", file=sys.stderr)
nbc_paper = newspaper.build("https://www.nbcnews.com", memorize_articles=True, language = 'en')
#print("nbc_paper built", file=sys.stderr)
#nyt_paper = newspaper.build("https://www.nytimes.com/", memorize_articles=True, language = 'en')
#print("nyt_paper built", file=sys.stderr)
apn_paper = newspaper.build("https://apnews.com/", memorize_articles=True, language = 'en')
print("apn_paper built", file=sys.stderr)
abc_paper = newspaper.build("https://abcnews.go.com/", memorize_articles=True, language = 'en')
print("abc_paper built", file=sys.stderr)
papers = [cnn_paper, nbc_paper, apn_paper, abc_paper]
verge_paper = newspaper.build("https://www.theverge.com/", memorize_articles=True, language = 'en')
print("verge_paper built", file=sys.stderr)
techP = [verge_paper]
espn_paper = newspaper.build("https://www.espn.com/", memorize_articles=True, language = 'en')
print("espn_paper built", file=sys.stderr)
sportP = [espn_paper]
et_paper = newspaper.build("https://ew.com/", memorize_articles=True, language = 'en')
print("ew_paper built", file=sys.stderr)
entertainmentP = [et_paper]
crypto_paper = newspaper.build("https://cryptonews.com/", memorize_articles=True, language = 'en')
print("crypto_paper built", file=sys.stderr)
cryptoP = [crypto_paper]
climate_paper = newspaper.build("https://www.climatechangenews.com/", memorize_articles=True, language = 'en')
print("climate_paper built", file=sys.stderr)
climateP = [climate_paper]
print("all papers built", file=sys.stderr)
count = 0
article_list = []
print("Starting pool threading", file=sys.stderr)
print("Starting pool for papers", file=sys.stderr)
news_pool.set(papers, threads_per_source=1000)
news_pool.join()
print("Finished pool threading for papers", file=sys.stderr)
print("Starting pool for techp", file=sys.stderr)
news_pool.set(techP, threads_per_source=1000)
news_pool.join()
print("Finished pool threading for techp", file=sys.stderr)
print("Starting pool for sportp", file=sys.stderr)
news_pool.set(sportP, threads_per_source=1000)
news_pool.join()
print("Finished pool threading for sportp", file=sys.stderr)
print("Starting pool for entertainmentp", file=sys.stderr)
news_pool.set(entertainmentP, threads_per_source=1000)
news_pool.join()
print("Finished pool threading for entertainmentp", file=sys.stderr)
print("Starting pool for cryptop", file=sys.stderr)
news_pool.set(cryptoP, threads_per_source=1000)
news_pool.join()
print("Finished pool threading for cryptop", file=sys.stderr)
print("Starting pool for climatep", file=sys.stderr)
news_pool.set(climateP, threads_per_source=1000)
news_pool.join()
print("Finished pool threading for climatep", file=sys.stderr)
print("Saving articles to mongodb", file=sys.stderr)
for build in papers:
for news in (build.articles):
if "politics" in news.url and "cnnespanol" not in news.url:
news.parse()
#call on text summarizer with text of article
textSum = text_summarizer(news.text)
if "apnews.com" in news.url:
textSum = news.text
article = NewsArticle(
link = news.url,
image = news.top_image,
wing = "political",
#text = news.text,
text = textSum,
title = news.title
).save()
#email_services = ["hotmail", "gmail", "yahoo"]
#email_contains_service = any(email_service in user_email for email_service in email_services)
elif ["stock", "net", "loss", "Q1", "Q2", "Q3", "Q4", "Gain"] in word_tokenize(news.text):
news.parse()
#call on text summarizer with text of article
textSum = text_summarizer(news.text)
if "apnews.com" in news.url:
textSum = news.text
article = NewsArticle(
link = news.url,
image = news.top_image,
wing = "buisness",
text = textSum,
title = news.title
).save()
elif "covid" in news.url or "corona" in news.url:
news.parse()
#call on text summarizer with text of article
textSum = text_summarizer(news.text)
if "apnews.com" in news.url:
textSum = news.text
article = NewsArticle(
link = news.url,
image = news.top_image,
wing = "covid",
text = textSum,
title = news.title
).save()
count += 1
for build in techP:
for news in (build.articles):
news.parse()
#call on text summarizer with text of article
textSum = text_summarizer(news.text)
if "apnews.com" in news.url:
textSum = news.text
if "#comments" not in news.url:
article = NewsArticle(
link = news.url,
image = news.top_image,
wing = "tech",
text = textSum,
title = news.title
).save()
for build in sportP:
for news in (build.articles):
news.parse()
#call on text summarizer with text of article
textSum = text_summarizer(news.text)
article = NewsArticle(
link = news.url,
image = news.top_image,
wing = "sports",
text = textSum,
title = news.title
).save()
for build in entertainmentP:
for news in (build.articles):
news.parse()
#call on text summarizer with text of article
textSum = text_summarizer(news.text)
article = NewsArticle(
link = news.url,
image = news.top_image,
wing = "entertainment",
text = textSum,
title = news.title
).save()
for build in cryptoP:
for news in (build.articles):
news.parse()
#call on text summarizer with text of article
textSum = text_summarizer(news.text)
article = NewsArticle(
link = news.url,
image = news.top_image,
wing = "crypto",
text = textSum,
title = news.title
).save()
for build in climateP:
for news in (build.articles):
news.parse()
#call on text summarizer with text of article
textSum = text_summarizer(news.text)
article = NewsArticle(
link = news.url,
image = news.top_image,
wing = "climate",
text = textSum,
title = news.title
).save()
print("Articles saved in mongodb", file=sys.stderr)
#Schedule the above function to run every hour to look for new news articles
print("instantiating scheduler", file=sys.stderr)
scheduler = BackgroundScheduler()
print("adding job to scheduler", file=sys.stderr)
scheduler.add_job(auto_article_go_getter, 'interval', next_run_time=datetime.datetime.now(), hours=1)
print("starting scheduler", file=sys.stderr)
scheduler.start()
print("finished schedule", file=sys.stderr)
def text_summarizer(text):
print("Starting Text Summarizer", file=sys.stderr)
if (len(text) <= 0):
return text
else:
# Tokenizing the text
stopWords = set(stopwords.words("english"))
words = word_tokenize(text)
#print(text, file=sys.stderr)
print(text +"\n \n \n \n \n \n \n \n \n", file=sys.stderr)
# Creating a frequency table to keep the
# score of each word
freqTable = dict()
for word in words:
word = word.lower()
if word in stopWords:
continue
if word in freqTable:
freqTable[word] += 1
else:
freqTable[word] = 1
# Creating a dictionary to keep the score
# of each sentence
sentences = sent_tokenize(text)
sentenceValue = dict()
for sentence in sentences:
for word, freq in freqTable.items():
if word in sentence.lower():
if sentence in sentenceValue:
sentenceValue[sentence] += freq
else:
sentenceValue[sentence] = freq
sumValues = 0
for sentence in sentenceValue:
sumValues += sentenceValue[sentence]
# Average value of a sentence from the original text
average = int(sumValues / len(sentenceValue))
# Storing sentences into our summary.
summary = ''
for sentence in sentences:
if (sentence in sentenceValue) and (sentenceValue[sentence] > (1.2 * average)):
summary += " " + sentence
print(summary + "\n \n \n", file=sys.stderr)
return summary
if __name__ == "__main__":
auto_article_go_getter()
```
|
{
"source": "jcooter/awsprocesscreds",
"score": 2
}
|
#### File: awsprocesscreds/awsprocesscreds/__init__.py
```python
import logging
__version__ = '0.0.2'
class NullHandler(logging.Handler):
def emit(self, record):
pass
logger = logging.getLogger(__name__)
logger.addHandler(NullHandler())
```
|
{
"source": "jcopi/py_test",
"score": 3
}
|
#### File: jcopi/py_test/test.py
```python
import time
class Test(object):
def __init__(self):
pass
def initialize(self):
pass
def execute(self, delta_t):
# delta_t represents the amount of time having passed since the last call
pass
def is_finished(self):
return False
def end(self):
pass
def command(self, name):
pass
def accept_data(self, jobj):
pass
def schedule(test, pipe):
test_inst = test()
last_t = time.time()
test_inst.initialize()
while not test_inst.is_finished():
test_inst.execute(time.time() - last_time)
last_t = time.time()
if pipe.poll():
test_inst.accept_data(pipe.recv())
time.sleep(0.02)
test_inst.end()
```
|
{
"source": "jcoppin/home-assistant",
"score": 2
}
|
#### File: components/rainwise/sensor.py
```python
import asyncio
import logging
import async_timeout
import aiohttp
import voluptuous as vol
from bs4 import BeautifulSoup
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.const import (
CONF_NAME,
CONF_URL,
ATTR_ATTRIBUTION,
CONF_MONITORED_CONDITIONS,
)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_call_later
REQUIREMENTS = ["beautifulsoup4==4.8.0"]
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Weather from a Rainwise IP-100"
SENSOR_TYPES = {
"precipitation": [
"Precipitation",
"in",
"#rfd",
"mdi:weather-pouring",
lambda text: float(text[0][:-1]),
],
"temperature": [
"Temperature",
"°F",
"#tic",
"mdi:thermometer",
lambda text: float(text[0][:-1]),
],
"humidity": [
"Humidity",
"%",
"#ric",
"mdi:water-percent",
lambda text: int(text[0][:-1]),
],
"windDirection": [
"Wind direction",
"",
"#wic",
"mdi:compass-outline",
lambda text: _get_wind_direction(float(text[0][:-1].split(", ")[1])),
],
"pressure": [
"Pressure",
"inHg",
"#bic",
"mdi:gauge",
lambda text: float(text[0][:-1]),
],
"windSpeed": [
"Wind speed",
"mph",
"#wic",
"mdi:weather-windy",
lambda text: float(text[0].split(", ")[0][:-4]),
],
"windGust": [
"Wind gust",
"mph",
"#wdh",
"mdi:weather-windy",
lambda text: float(text[0].split(", ")[0][:-4]),
],
"battery": [
"Sation Battery",
"Volts",
"#batt",
"mdi:gauge",
lambda text: float(text[0][:-6]),
],
}
DEFAULT_NAME = "Rainwise"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_URL): cv.string,
vol.Optional(CONF_MONITORED_CONDITIONS, default=["symbol"]): vol.All(
cv.ensure_list, vol.Length(min=1), [vol.In(SENSOR_TYPES)]
),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
def _get_wind_direction(wind_direction_degree: float) -> str:
"""Convert wind direction degree to named direction."""
if 11.25 <= wind_direction_degree < 33.75:
return "NNE"
if 33.75 <= wind_direction_degree < 56.25:
return "NE"
if 56.25 <= wind_direction_degree < 78.75:
return "ENE"
if 78.75 <= wind_direction_degree < 101.25:
return "E"
if 101.25 <= wind_direction_degree < 123.75:
return "ESE"
if 123.75 <= wind_direction_degree < 146.25:
return "SE"
if 146.25 <= wind_direction_degree < 168.75:
return "SSE"
if 168.75 <= wind_direction_degree < 191.25:
return "S"
if 191.25 <= wind_direction_degree < 213.75:
return "SSW"
if 213.75 <= wind_direction_degree < 236.25:
return "SW"
if 236.25 <= wind_direction_degree < 258.75:
return "WSW"
if 258.75 <= wind_direction_degree < 281.25:
return "W"
if 281.25 <= wind_direction_degree < 303.75:
return "WNW"
if 303.75 <= wind_direction_degree < 326.25:
return "NW"
if 326.25 <= wind_direction_degree < 348.75:
return "NNW"
return "N"
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Rainwise sensor."""
name = config.get(CONF_NAME)
url = config.get(CONF_URL)
dev = []
for sensor_type in config[CONF_MONITORED_CONDITIONS]:
dev.append(RainwiseSensor(name, sensor_type))
async_add_entities(dev)
weather = RainwiseData(hass, url, dev)
await weather.fetching_data()
class RainwiseSensor(Entity):
"""Representation of an Rainwise sensor."""
def __init__(self, name, sensor_type):
"""Initialize the sensor."""
self.client_name = name
self._name = SENSOR_TYPES[sensor_type][0]
self.type = sensor_type
self._state = None
self._unit_of_measurement = SENSOR_TYPES[self.type][1]
self._selector = SENSOR_TYPES[self.type][2]
self._icon = SENSOR_TYPES[self.type][3]
self._parser = SENSOR_TYPES[self.type][4]
def update_changes(self, new_state):
"""Update the state if it has changed, return true if it changed"""
if new_state != self._state:
_LOGGER.info("Changing state of %s to: %s", self._name, new_state)
self._state = new_state
return True
else:
return False
@property
def name(self):
"""Return the name of the sensor."""
return f"{self.client_name} {self._name}"
@property
def parser(self):
"""Return the parser method."""
return self._parser
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def selector(self):
"""Return the CSS selector"""
return self._selector
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return self._icon
@property
def entity_picture(self):
return None
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {ATTR_ATTRIBUTION: ATTRIBUTION}
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
class RainwiseData:
"""Get the latest data and update the sensors."""
def __init__(self, hass, url, devices):
"""Initialize the data object."""
self._url = url
self.devices = devices
self.data = {}
self.hass = hass
async def fetching_data(self, *_):
"""Ensure this is called every 60 seconds"""
def try_again(err: str):
async_call_later(self.hass, 60, self.fetching_data)
websession = async_get_clientsession(self.hass)
data = b""
try:
with async_timeout.timeout(10):
request = await websession.get(self._url)
if request.status != 200:
_LOGGER.error(
"Error %d on load URL %s", request.status, request.url
)
try_again("HTTP Error")
data = await request.text()
except (asyncio.TimeoutError, aiohttp.ClientError):
_LOGGER.error("Timeout calling IP-100")
try_again("Timeout Error")
raw_data = BeautifulSoup(data, "html.parser")
tasks = []
for dev in self.devices:
new_state = None
new_state = dev.parser(raw_data.select(dev.selector)[0].contents)
if dev.update_changes(new_state):
tasks.append(dev.async_update_ha_state())
if tasks:
await asyncio.wait(tasks)
async_call_later(self.hass, 60, self.fetching_data)
```
|
{
"source": "jcopps/BhavCopyPRParser",
"score": 3
}
|
#### File: bhavpr/collection/download_helper.py
```python
from datetime import datetime, timedelta
from bhavpr.collection.constants import (
PR_URL,
PERIOD,
PR_DIR_FORMAT,
DATE_FORMAT_STR
)
def date_range_iter(start_date, end_date):
for n in range(int((end_date - start_date).days)):
yield start_date + timedelta(n)
def preprocess_date(input_date):
if isinstance(input_date, str):
input_date = datetime.strptime(input_date, DATE_FORMAT_STR)
return input_date
class PrProperties(object):
def __init__(self, day, month, year) -> None:
self.day = day
self.month = month
self.year = year
@staticmethod
def pad_zero(element, final_len=2):
if element < 10:
element = str(element).rjust(final_len, "0")
return element
@staticmethod
def format_year(year):
year = str(year)
if len(year) > 2:
year = year[-2:]
return year
@staticmethod
def create_instance_from_directory(directory_name) -> object:
dir_date = datetime.strptime(directory_name, PR_DIR_FORMAT)
return PrProperties(dir_date.day, dir_date.month, dir_date.year)
def get_file_name(self, directory=False) -> str:
file_name = "PR{0}{1}{2}"
day = PrProperties.pad_zero(self.day)
month = PrProperties.pad_zero(self.month)
year = PrProperties.format_year(self.year)
file_name = file_name.format(day, month, year)
if directory:
return file_name
file_name = "".join([file_name, ".zip"])
return file_name
def get_anddmmyy_file_name(self) -> str:
file_name = self.get_file_name(directory=True)
file_name = file_name.replace("PR", "an")
return "".join([file_name, ".txt"])
def get_bcddmmyy_file_name(self) -> str:
file_name = self.get_file_name(directory=True)
file_name = file_name.replace("PR", "bc")
return "".join([file_name, ".csv"])
def get_specific_file_name(self, prefix, extension) -> str:
file_name = self.get_file_name(directory=True)
file_name = file_name.replace("PR", prefix)
return "".join([file_name, PERIOD, extension])
def get_download_url(self) -> str:
file_name = self.get_file_name()
url = PR_URL.format(file_name)
return url
```
|
{
"source": "Jcorb08/programming-support-skill",
"score": 3
}
|
#### File: Jcorb08/programming-support-skill/duckduckgo.py
```python
import requests
from lxml import html
import time
from mycroft.util.log import LOG
# https://github.com/thibauts/duckduckgo
# prints out list of urls
# edited to add headers and css selecting and now fully understand code
def search(keywords, max_results=None):
url = 'https://html.duckduckgo.com/html/'
params = {'q': keywords}
headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:86.0) Gecko/20100101 Firefox/86.0'}
yielded = 0
while True:
res = requests.post(url, data=params, headers=headers)
doc = html.fromstring(res.text)
results = [a.get('href') for a in doc.cssselect('#links .links_main a')]
for result in results:
yield result
time.sleep(0.1)
yielded += 1
if max_results and yielded >= max_results:
return
try:
form = doc.cssselect('.results_links_more form')[-1]
except IndexError:
return
params = dict(form.fields)
```
#### File: Jcorb08/programming-support-skill/output.py
```python
import webbrowser
# create a new object Output
class Output:
def __init__(self, link):
# set url to the link url from resource class
self.url = link
# output url to user
self.output_url()
def output_url(self):
# opens a new tab in the webbrowser that has the url set before
webbrowser.open(self.url, new=2, autoraise=True)
```
|
{
"source": "jcorbette/ice-cream-poll",
"score": 3
}
|
#### File: ice-cream-poll/ice_cream_poll_app/models.py
```python
from django.db import models
from django.db.models import Sum
# Create your models here.
class Questions(models.Model):
question = models.CharField(max_length=350)
def __str__(self):
return self.question
class Choices(models.Model):
question = models.ForeignKey(Questions, on_delete=models.CASCADE, related_name='choices')
option = models.CharField(max_length=100)
vote = models.IntegerField(default=0)
def get_percentage(self):
total_votes = Choices.objects.all().aggregate(vote_sum=Sum("vote"))["vote_sum"]
vote_num = self.vote
percentage_calc = (vote_num / total_votes) * 100
return percentage_calc
```
|
{
"source": "jcorbettfrank/find_pisne",
"score": 3
}
|
#### File: find_pisne/archive/OpenSNAPI_v2.py
```python
import requests
import os
import pandas as pd
import numpy as np
from io import StringIO
import errno
import pdb
import string
# In[56]:
OverArchDirs = ['Data','Data_BVRI']
SN_types = ['SLSN','1a','IIn','IIp','IIb']
for OverArchDir in OverArchDirs:
for SN_type in SN_types:
catalog = 'astrocats'
fn = 'The Open Supernova Catalog '+ SN_type+ '.csv'
data_dir = os.path.join(OverArchDir,SN_type)
Phot_lim = 20 #if doesn't have Phot lim photometry datapoints then we won't look at it
if OverArchDir =='Data':
Filters = ['g','r','i','z'] #if it doesnt have photometry in these filters we won't look at it
else:
Filters = ['B','V','R','I']
try:
os.makedirs(data_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
# In[57]:
api_str_base = 'https://api.'+catalog+'.space/'
df = pd.read_csv(fn)
sort_df = df.sort_values(by=['Phot.'],axis=0,ascending=False)
exp_df = sort_df.drop(['Radio','X-ray'],axis = 1, inplace=False)
exp_df['Fit?'] = np.zeros(len(exp_df))
exp_df = exp_df[exp_df['Phot.'] > Phot_lim]
print(exp_df)
#need to delete entries that do not have redshift
# In[58]:
#add column to dataframe containing absolute magnitudes - assumes apparent magnitudes are in AB system
def appAB2Abs(df,DL):
#DL in Mpc
app = df['magnitude'].values
Abs = app-5*np.log10(DL*10**6)+5
df['AbsMag'] = pd.Series(Abs,index=df.index)
return(df)
# In[60]:
arr_holder = np.zeros((1,len(exp_df.values[0])))
goodSNs = 0
badSNs = 0
for i in range(len(exp_df)):
rowDf = pd.DataFrame(exp_df.iloc[[i]])
SN_name = rowDf['Name'].values[0]
#get luminosity distance
dLum = requests.get(api_str_base+SN_name+'/lumdist+redshift?complete&format=csv')
api_response = dLum.text
#check if data is available
if api_response[2:9] == 'message':
#remove from SNinfodf
badSNs+=1
else:
dLum_df = pd.read_csv(StringIO(api_response))
lumdist = dLum_df['lumdist'].values[0]
redshift = dLum_df['redshift'].values[0]
try:
redshift = redshift[0] #think the first is most believed
except IndexError:
pass
redshift = float(redshift)
#make sure redshift makes sense
if redshift >= 0:
rowDf['z'] = redshift
#get SN photometry
data = requests.get(api_str_base+SN_name+'/photometry/time+magnitude+e_magnitude+upperlimit+band+zeropoint+system+kcorrected+scorrected+mcorrected?format=csv')
api_response2 = data.text
if api_response2[2:9] == 'message':
#remove from SNinfodf
badSNs+=1
else:
SNdf = pd.read_csv(StringIO(api_response2))
#add Absolute Magnitudes
newDf = appAB2Abs(SNdf,lumdist)
#make sure the filters we want are in this file
bands = newDf['band'].values
lower_bands = []
for b in bands:
try:
lower_bands.append(b)
except AttributeError:
pass
FilterTroll = 0
for filt in Filters:
if filt in lower_bands or filt+"'" in lower_bands:
FilterTroll += 1
if FilterTroll != len(Filters): #not all the filters are in this file
badSNs+=1
else:
try:
invalidChars = set(string.punctuation.replace(":",""))
if any(char in invalidChars for char in SN_name):
SN_name = SN_name.replace(":","-")
exp_df.at[i,'Name'] = SN_name
os.makedirs(os.path.join(data_dir,SN_name)) #try to make a directory, will return OSerror if already exist
except OSError as e:
if e.errno != errno.EEXIST:
raise
#we made it
rowDf.to_csv(os.path.join(data_dir,SN_name,'info.csv'))
newDf.to_csv(os.path.join(data_dir,SN_name,'Photometry.csv'))
arr_holder = np.concatenate((arr_holder,np.array([exp_df.iloc[i].values])),axis=0)
arr_holder = arr_holder[1:,:]#get rid of first row
master_holderDF = pd.DataFrame(arr_holder,columns = exp_df.columns.values)
print(master_holderDF)
master_holderDF.to_csv(os.path.join(data_dir,'OverallSNinfo.csv'))
# In[55]:
master_holderDF.to_csv(os.path.join(data_dir,'OverallSNinfo.csv'))
# In[49]:
'''
SN_name = 'SN2015bn'
dLum = requests.get(api_str_base+SN_name+'/lumdist+redshift?complete&format=csv')
print(dLum.text)
data = requests.get(api_str_base+SN_name+'/photometry/time+magnitude+e_magnitude+upperlimit+band+zeropoint+system+kcorrected+scorrected+mcorrected?format=csv')
api_response2 = data.text
SNdf = pd.read_csv(StringIO(api_response2))
display(SNdf)
'''
# In[ ]:
"'"
```
#### File: jcorbettfrank/find_pisne/lightCurve.py
```python
import numpy as np
import pandas as pd
class lightCurve(object):
def __init__(self, name):
self.name = name
l = lightCurve("red")
```
|
{
"source": "jcordatauclair/ari-tfidf",
"score": 3
}
|
#### File: part2/Prog/remove_common_words.py
```python
import nltk
nltk.download('punkt')
from nltk import word_tokenize
from nltk.stem.porter import *
inpathname = "../../TP1/Collection/Token/"
outpathname = "../Collection/NoCommonWords/"
commonwordsfile = "../../TP1/Data/common_words"
# ici, stembool est un booléen qui indique si oui ou non on souhaite l'incorporer
# dans le traitement
def RemoveCommonWords(inpath, outpath, stembool):
if (stembool == True):
stemmer = PorterStemmer()
# nombre de documents disponibles dans la librairie
nDocs = 3204;
# on définit un dictionnaire auquel on va associer un 'common-word' (mot)
# et une clé (1)
stopList = {}
# on créé notre stop-list depuis notre fichier de common-words
with open(commonwordsfile) as CwFile:
for line in CwFile:
commonWord = nltk.word_tokenize(line)[0]
stopList[commonWord] = 1
# on parcourt chacun de nos fichiers filtrés
for i in range(1, nDocs + 1):
file = "CACM-" + str(i)
infile = inpath + file + ".flt"
outfile = outpath + file + ".sttr"
# on ouvre le fichier filtré CACM-i
with open(infile, "r") as TokenFile:
print("processing file " + file)
# on créé le fichier sans les common-words CACM-i
with open(outfile, "w+") as NoCwFile:
for line in TokenFile:
word = nltk.word_tokenize(line)[0]
if word not in stopList:
if (stembool == True):
NoCwFile.write(stemmer.stem(word) + " ")
else:
NoCwFile.write(word + " ")
else:
print(" removing " + word)
RemoveCommonWords(inpathname, outpathname, True)
```
|
{
"source": "jcordell/fantasycalc-utils",
"score": 2
}
|
#### File: core/factory/DatabaseFactory.py
```python
from services.MongoDB import MongoDB
from config import LocalHostConfig as config
_db_instances = {
'mongodb': MongoDB(),
}
def get_database():
return _db_instances[config.DATABASE_TYPE]
```
#### File: services/fantasysites/ESPNService.py
```python
import sys
sys.path.append('/Users/jkc023/Documents/homeprojects/fantasycalc-utils/')
from services.fantasysites.FantasySiteService import FantasySiteService
from services.apis.mfl_api import mfl_api
from datatypes.trade import trade_dtype
from datatypes.fantasy_league import fantasy_league_dtype
from bs4 import BeautifulSoup
import re
import requests
import json
import time
from services.MongoDB import MongoDB
class ESPNService(FantasySiteService):
name = 'ESPN'
basic_settings = {'Number of Teams': 'num_teams',
'League Name': 'name', 'Quarterback (QB)': 'num_qbs'}
# dictionary used to rename scoring setting
advanced_settings = {'Each reception (REC)': 'ppr'}
league_settings = {}
def __init__(self, year):
self.year = year
self._db = MongoDB()
self._db.connect()
def make_trade(self, trade_columns):
team1 = None
team2 = None
# get team name of each trade index
teams = [team.split(' ')[0]
for team in trade_columns[2] if 'traded' in team]
# make sure only 2 teams (error if team names are the same)
if len(set(teams)) is not 2:
raise ValueError
trade_side1 = []
trade_side2 = []
for player, team in zip(trade_columns[2].findAll('b'), teams):
# if player on espn is injured, they will show as example: <NAME>*
if player is not None:
player = player.text.replace('*', '')
# if first trade item, set team1, add to trade
if team1 is None and team2 is None:
team1 = team
trade_side1.append(player)
elif team2 is None and team1 == team:
trade_side1.append(player)
else:
team2 = team
trade_side2.append(player)
return trade_side1, trade_side2
def get_trades(self, league_id):
url = "http://games.espn.com/ffl/recentactivity?leagueId=" + str(league_id) + "&seasonId=" \
+ str(self.year) + \
"&activityType=2&startDate=20180911&endDate=20181129&teamId=-1&tranType=4"
try:
# headers = {
# 'User-Agent': 'Mozilla/5.0 (iPad; U; CPU OS 3_2_1 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Mobile/7B405'}
page = requests.get(url)
except Exception as e:
print(e)
print(
'Warning: Unable to download, couldn\'t request trades from:', league_id)
# return []
soup = BeautifulSoup(page.text, 'lxml')
trade_data = []
tables = soup.findAll('table')
for transaction in soup.findAll(text="Transaction"):
trade_table = transaction.parent.parent.parent
# trades can be accepted and processed, don't want duplicates
if 'Processed' not in trade_table.text:
continue
col = trade_table.find_all('td')
try:
trade_side1, trade_side2 = self.make_trade(col)
except ValueError:
# print('Warning: Unable to make trade in ESPNServices.get_trades in league', league_id)
continue
# Convert date to epoch (example preprocessed format: Wed, Nov 22 7:30 PM)
date = str(col[0].renderContents()).replace(
'<br/>', ' ')[2:-4].split(',')[1].lstrip() + " " + str(self.year)
date_epoch = int(time.mktime(
time.strptime(date, '%b %d %H:%M %Y')))
single_trade = trade_dtype()
single_trade.league_id = league_id
single_trade.side1 = trade_side1
single_trade.side2 = trade_side2
single_trade.timestamp = date_epoch
single_trade.fantasy_site = self.name
single_trade.all_players = [*trade_side1, *trade_side2]
if single_trade is None:
continue
single_trade.league_settings = self.get_settings(
league_id).to_json()
trade_data.append(single_trade)
return trade_data
def get_settings(self, league_id):
if league_id in self.league_settings:
return self.league_settings[league_id]
league_settings = fantasy_league_dtype()
url = 'http://games.espn.com/ffl/leaguesetup/settings?leagueId=' + \
str(league_id)
page = requests.get(url)
soup = BeautifulSoup(page.text, 'lxml')
tables = soup.find_all('table', attrs={'class': 'leagueSettingsTable'})
settings = {}
league_settings.league_id = league_id
# PPR scoring defaults to 0, since ESPN does not display this setting if it is 0
settings['ppr'] = 0
for table in tables:
rows = table.find_all('tr')
for row in rows:
cols = row.find_all('td')
cols = [ele.text.strip() for ele in cols]
# add to settings dictionary if setting is needed
if cols[0] in self.basic_settings:
settings[self.basic_settings[cols[0]]] = cols[1]
# check if any advanced settings are in string
for setting in self.advanced_settings:
if setting in cols:
settings[self.advanced_settings[setting]
] = cols[cols.index(setting) + 1]
settings['league_id'] = league_id
league_settings.name = settings['name']
league_settings.num_teams = settings['num_teams']
league_settings.num_qbs = settings['num_qbs']
league_settings.ppr = settings['ppr']
league_settings.site = 'ESPN'
self.league_settings[league_id] = league_settings
return league_settings
def get_valid_leagues(self, fast_search=False):
if fast_search:
# return leagues which have already been found to have a trade in them
raise NotImplementedError
else:
# return [841145]
# return range(841145, 1726141)
# return range(930430, 1726141)
# updated this after college lib
# return range(1264529 + 35533 + 16262 + 8772, 1726141)
league_ids = self._db.get_league_ids('ESPN')
print(league_ids)
print(len(league_ids))
return league_ids
# espn = ESPNService()
# # print(espn.get_settings(842623))
# print(espn.get_trades(841145))
# print(espn.get_settings(841145))
# service = MFLService(2018)
# service.get_settings(35465)
# service.get_settings(10431)
```
|
{
"source": "jcordell/keras-optimization",
"score": 3
}
|
#### File: keras-optimization/GA/train.py
```python
from keras.datasets import mnist, cifar10
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.utils.np_utils import to_categorical
from keras.callbacks import EarlyStopping
import data_parser
import numpy as np
from keras.optimizers import Adadelta, Adam, rmsprop
from sklearn.metrics import mean_squared_error
# Helper: Early stopping.
early_stopper = EarlyStopping(patience=5)
def get_cifar10():
"""Retrieve the CIFAR dataset and process the data."""
# Set defaults.
nb_classes = 10
batch_size = 64
input_shape = (3072,)
# Get the data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train = x_train.reshape(50000, 3072)
x_test = x_test.reshape(10000, 3072)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
# convert class vectors to binary class matrices
y_train = to_categorical(y_train, nb_classes)
y_test = to_categorical(y_test, nb_classes)
return (nb_classes, batch_size, input_shape, x_train, x_test, y_train, y_test)
def get_mnist():
"""Retrieve the MNIST dataset and process the data."""
# Set defaults.
nb_classes = 10
batch_size = 128
input_shape = (784,)
# Get the data.
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(60000, 784)
x_test = x_test.reshape(10000, 784)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
# convert class vectors to binary class matrices
y_train = to_categorical(y_train, nb_classes)
y_test = to_categorical(y_test, nb_classes)
return (nb_classes, batch_size, input_shape, x_train, x_test, y_train, y_test)
def get_dbtt():
data = data_parser.parse("DBTT_Data22.csv")
data_lwr = data_parser.parse("CD_LWR_clean8.csv")
X = ["N_log(eff fl p =.05)", "N_log(eff fl p =.4)", "N_log(eff fl p =.5)", "N(Cu)", "N(Ni)", "N(Mn)", "N(P)",
"N(Si)", "N( C )", "N_log(eff fl p =.1)", "N_log(eff fl p =.2)", "N_log(eff fl p =.3)", "N(Temp)"]
Y = "CD delta sigma"
data.set_x_features(X)
data.set_y_feature(Y)
data_lwr.set_y_feature(Y)
data_lwr.set_x_features(X)
data.add_exclusive_filter("Alloy", '=', 29)
data.add_exclusive_filter("Alloy", '=', 8)
data.add_exclusive_filter("Alloy", '=', 1)
data.add_exclusive_filter("Alloy", '=', 2)
data.add_exclusive_filter("Alloy", '=', 14)
data_lwr.add_exclusive_filter("Alloy", '=', 29)
data_lwr.add_exclusive_filter("Alloy", '=', 14)
x_test = np.array(data_lwr.get_x_data())
y_test = np.array(data_lwr.get_y_data())
x_train = np.array(data.get_x_data())
y_train = np.array(data.get_y_data())
#print("Training with", np.shape(y_train)[0], "data points")
nb_classes = -1
batch_size = np.shape(y_train)[0]
input_shape = (13,)
# normalize y columns
y_train = y_train/758.92
return (nb_classes, batch_size, input_shape, x_train, x_test, y_train, y_test)
def compile_model(network, nb_classes, input_shape):
"""Compile a sequential model.
Args:
network (dict): the parameters of the network
Returns:
a compiled network.
"""
# Get our network parameters.
nb_layers = network['nb_layers']
nb_neurons = network['nb_neurons']
activation = network['activation']
optimizer = network['optimizer']
learning_rate = network['learning_rate']
model = Sequential()
# Add each layer.
for i in range(nb_layers):
# Need input shape for first layer.
if i == 0:
print(nb_neurons)
model.add(Dense(units=nb_neurons, activation=activation, input_shape=input_shape))
else:
print(nb_neurons)
model.add(Dense(nb_neurons, activation=activation))
model.add(Dropout(0.2)) # hard-coded dropout
# Output layer.
if(nb_classes == -1):
model.add(Dense(1, activation='linear'))
ADAM = Adam(lr=learning_rate)
model.compile(loss='mean_squared_error', metrics=['accuracy'], optimizer=ADAM)
else:
model.add(Dense(nb_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer=optimizer,
metrics=['accuracy'])
return model
def train_and_score(network, dataset):
"""Train the model, return test loss.
Args:
network (dict): the parameters of the network
dataset (str): Dataset to use for training/evaluating
"""
if dataset == 'cifar10':
nb_classes, batch_size, input_shape, x_train, \
x_test, y_train, y_test = get_cifar10()
elif dataset == 'mnist':
nb_classes, batch_size, input_shape, x_train, \
x_test, y_train, y_test = get_mnist()
elif dataset == 'dbtt':
nb_classes, batch_size, input_shape, x_train, \
x_test, y_train, y_test = get_dbtt()
model = compile_model(network, nb_classes, input_shape)
if dataset == 'dbtt':
model.fit(x_train, y_train, epochs=10, batch_size=1406, verbose=0)
y_predict = model.predict(x_test) * 758.92 # todo way to not hardcode this?
rms = np.sqrt(mean_squared_error(y_test, y_predict))
print(rms)
return rms
else:
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=10000, # using early stopping, so no real limit
verbose=0,
validation_data=(x_test, y_test),
callbacks=[early_stopper])
score = model.evaluate(x_test, y_test, verbose=0)
return score[1] # 1 is accuracy. 0 is loss.
```
#### File: jcordell/keras-optimization/StochasticityTest.py
```python
from hyperopt import fmin, tpe, hp, STATUS_OK, Trials
import sys
import numpy as np
from sklearn.metrics import mean_squared_error
import data_parser
def getData():
data = data_parser.parse("DBTT_Data22.csv")
data_lwr = data_parser.parse("CD_LWR_clean8.csv")
X = ["N_log(eff fl p =.05)", "N_log(eff fl p =.4)", "N_log(eff fl p =.5)", "N(Cu)", "N(Ni)", "N(Mn)", "N(P)", "N(Si)","N( C )", "N_log(eff fl p =.1)", "N_log(eff fl p =.2)", "N_log(eff fl p =.3)", "N(Temp)"]
Y = "CD delta sigma"
data.set_x_features(X)
data.set_y_feature(Y)
data_lwr.set_y_feature(Y)
data_lwr.set_x_features(X)
data.add_exclusive_filter("Alloy",'=', 29)
data.add_exclusive_filter("Alloy",'=', 8)
data.add_exclusive_filter("Alloy", '=', 1)
data.add_exclusive_filter("Alloy", '=', 2)
data.add_exclusive_filter("Alloy", '=', 14)
data_lwr.add_exclusive_filter("Alloy",'=', 29)
data_lwr.add_exclusive_filter("Alloy", '=', 14)
x_test = np.array(data_lwr.get_x_data())
y_test = np.array(data_lwr.get_y_data())
x_train = np.array(data.get_x_data())
y_train = np.array(data.get_y_data())
print("Training with", np.shape(y_train)[0], "data points")
return x_train, y_train, x_test, y_test
x_train, y_train, x_test, y_test = getData()
# normalize y column
y_train = y_train/758.92
# gridsearch parameters
learning_rate = [.005, .05]
hidden1 = [5]
hidden2 = [5, 40]
num_ensembles = 2
# intializing rms_list
columns = 3
rows = len(learning_rate) * len(hidden1) * len(hidden2)
rms_list = [[0 for i in range(rows)] for j in range(columns)]
for i in range(0, columns):
j = 0
for lr in learning_rate:
for h1 in hidden1:
for h2 in hidden2:
for n in range(num_ensembles):
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import Adadelta, Adam, rmsprop
model = Sequential()
model.add(Dense(units=h1, input_dim=np.shape(x_train)[1]))
model.add(Activation('sigmoid'))
model.add(Dense(h2))
model.add(Activation('sigmoid'))
model.add(Dense(1))
model.add(Activation('linear'))
ADAM = Adam(lr=lr)
model.compile(loss='mean_squared_error', metrics=['accuracy'], optimizer=ADAM)
# model.compile(loss='binary_crossentropy', optimizer=params['optimizer'])
model.fit(x_train, y_train, nb_epoch=2000, batch_size=1400, verbose=0)
y_predict = model.predict(x_test) * 758.92 # todo way to not hardcode this?
if(n == 0):
y_predict_all = y_predict
else:
np.column_stack((y_predict, y_predict_all))
# find average prediction
y_predict_avg = np.mean(y_predict_all, axis = 1)
rms = np.sqrt(mean_squared_error(y_test, y_predict))
print('RMS:', rms)
rms_list[i][j] = rms
j+=1
print(rms_list)
print(np.shape(rms_list)[0])
rank_final = []
for i in range(np.shape(rms_list)[0]):
temp = np.array(rms_list[i]).argsort()
ranks = np.empty(len(rms_list[i]), int)
ranks[temp] = np.arange(len(rms_list[i]))
rank_final.append(ranks)
print("Average rank:")
print(np.mean(rank_final, axis=0))
print(" ")
print("StDev:")
print(np.std(rank_final, axis=0))
print(" ")
print("Average StDev:")
print(np.mean(np.std(rank_final, axis=0)))
```
|
{
"source": "jcoreyes/erl",
"score": 2
}
|
#### File: rlkit/core/batch_rl_algorithm_modenv.py
```python
from collections import OrderedDict
from rlkit.core.timer import timer
from rlkit.core import logger
from rlkit.data_management.replay_buffer import ReplayBuffer
from rlkit.misc import eval_util
from rlkit.samplers.data_collector.path_collector import PathCollector
from rlkit.core.rl_algorithm import BaseRLAlgorithm
import numpy as np
def linear_schedule(start, end, current):
return float(current) / (end - start)
class BatchRLAlgorithmModEnv(BaseRLAlgorithm):
def __init__(
self,
batch_size,
max_path_length,
num_eval_steps_per_epoch,
num_expl_steps_per_train_loop,
num_trains_per_train_loop,
mod_env_epoch_schedule,
env_class,
env_mod_params,
env_mod_dist,
lifelong,
num_train_loops_per_epoch=1,
min_num_steps_before_training=0,
*args,
**kwargs
):
super().__init__(*args, **kwargs)
self.batch_size = batch_size
self.max_path_length = max_path_length
self.num_eval_steps_per_epoch = num_eval_steps_per_epoch
self.num_trains_per_train_loop = num_trains_per_train_loop
self.num_train_loops_per_epoch = num_train_loops_per_epoch
self.num_expl_steps_per_train_loop = num_expl_steps_per_train_loop
self.min_num_steps_before_training = min_num_steps_before_training
self.lifelong = lifelong
self.mod_env_epoch_schedule = mod_env_epoch_schedule
self.env_class = env_class
self.env_mod_params = env_mod_params
self.env_mod_dist = env_mod_dist
def _train(self):
if self.lifelong:
return self._train_lifelong()
else:
return self._train_batch()
def _train_lifelong(self):
done = (self.epoch == self.num_epochs)
if done:
return OrderedDict(), done
self.training_mode(False)
if self.min_num_steps_before_training > 0 and self.epoch == 0:
self.expl_data_collector.collect_new_steps(
self.max_path_length,
self.min_num_steps_before_training,
discard_incomplete_paths=False,
)
init_expl_paths = self.expl_data_collector.get_epoch_paths()
self.replay_buffer.add_paths(init_expl_paths)
self.expl_data_collector.end_epoch(-1)
num_trains_per_expl_step = self.num_trains_per_train_loop // self.num_expl_steps_per_train_loop
timer.start_timer('evaluation sampling')
if self.epoch % self._eval_epoch_freq == 0:
self.eval_data_collector.collect_new_paths(
self.max_path_length,
self.num_eval_steps_per_epoch,
discard_incomplete_paths=True,
)
timer.stop_timer('evaluation sampling')
if not self._eval_only:
for _ in range(self.num_train_loops_per_epoch):
for _ in range(self.num_expl_steps_per_train_loop):
timer.start_timer('exploration sampling', unique=False)
self.expl_data_collector.collect_new_steps(
self.max_path_length,
1, # num steps
discard_incomplete_paths=False,
)
timer.stop_timer('exploration sampling')
timer.start_timer('training', unique=False)
self.training_mode(True)
for _ in range(num_trains_per_expl_step):
train_data = self.replay_buffer.random_batch(
self.batch_size)
self.trainer.train(train_data)
timer.stop_timer('training')
self.training_mode(False)
timer.start_timer('replay buffer data storing', unique=False)
new_expl_paths = self.expl_data_collector.get_epoch_paths()
self.replay_buffer.add_paths(new_expl_paths)
timer.stop_timer('replay buffer data storing')
log_stats = self._get_diagnostics()
return log_stats, False
def _train_batch(self):
done = (self.epoch == self.num_epochs)
if done:
return OrderedDict(), done
if self.epoch == 0 and self.min_num_steps_before_training > 0:
init_expl_paths = self.expl_data_collector.collect_new_paths(
self.max_path_length,
self.min_num_steps_before_training,
discard_incomplete_paths=False,
)
self.replay_buffer.add_paths(init_expl_paths)
self.expl_data_collector.end_epoch(-1)
timer.start_timer('evaluation sampling')
if self.epoch % self._eval_epoch_freq == 0:
self.eval_data_collector.collect_new_paths(
self.max_path_length,
self.num_eval_steps_per_epoch,
discard_incomplete_paths=True,
)
timer.stop_timer('evaluation sampling')
if not self._eval_only:
for _ in range(self.num_train_loops_per_epoch):
timer.start_timer('exploration sampling', unique=False)
new_env_mod_parms = dict()
if self.env_mod_dist:
current = max(1, self.epoch / (self.mod_env_epoch_schedule * self.num_epochs))
for k, v in self.env_mod_params.items():
lbound, ubound = v
low = current + (1.0 - current) * lbound
high = current + (1.0 - current) * ubound
new_env_mod_parms[k] = np.random.uniform(low, high)
else:
current = max(1, self.epoch / (self.mod_env_epoch_schedule * self.num_epochs))
for k, v in self.env_mod_params.items():
new_env_mod_parms[k] = 1.0 * current + (1.0 - current) * v
self.expl_data_collector._env = self.env_class(new_env_mod_parms)
new_expl_paths = self.expl_data_collector.collect_new_paths(
self.max_path_length,
self.num_expl_steps_per_train_loop,
discard_incomplete_paths=False,
)
timer.stop_timer('exploration sampling')
timer.start_timer('replay buffer data storing', unique=False)
self.replay_buffer.add_paths(new_expl_paths)
timer.stop_timer('replay buffer data storing')
timer.start_timer('training', unique=False)
for _ in range(self.num_trains_per_train_loop):
train_data = self.replay_buffer.random_batch(self.batch_size)
self.trainer.train(train_data)
timer.stop_timer('training')
log_stats = self._get_diagnostics()
return log_stats, False
```
#### File: envs/contextual/goal_conditioned.py
```python
import warnings
from typing import Any, Callable, Dict, List
import random
import numpy as np
import gym
from gym.spaces import Box, Dict
from multiworld.core.multitask_env import MultitaskEnv
from rlkit.misc.asset_loader import load_local_or_remote_file
from gym.spaces import Box, Dict
from rlkit import pythonplusplus as ppp
from rlkit.core.distribution import DictDistribution
from rlkit.envs.contextual import ContextualRewardFn
from rlkit.envs.contextual.contextual_env import (
ContextualDiagnosticsFn,
Path,
Context,
Diagnostics,
)
from rlkit.envs.images import EnvRenderer
Observation = Dict
Goal = Any
GoalConditionedDiagnosticsFn = Callable[
[List[Path], List[Goal]],
Diagnostics,
]
class GoalDictDistributionFromMultitaskEnv(DictDistribution):
def __init__(
self,
env: MultitaskEnv,
desired_goal_keys=('desired_goal',),
):
self._env = env
self._desired_goal_keys = desired_goal_keys
env_spaces = self._env.observation_space.spaces
self._spaces = {
k: env_spaces[k]
for k in self._desired_goal_keys
}
def sample(self, batch_size: int):
return {
k: self._env.sample_goals(batch_size)[k]
for k in self._desired_goal_keys
}
@property
def spaces(self):
return self._spaces
class AddImageDistribution(DictDistribution):
def __init__(
self,
env: MultitaskEnv,
base_distribution: DictDistribution,
renderer: EnvRenderer,
image_goal_key='image_desired_goal',
_suppress_warning=False,
):
self._env = env
self._base_distribution = base_distribution
img_space = Box(0, 1, renderer.image_shape, dtype=np.float32)
self._spaces = base_distribution.spaces
self._spaces[image_goal_key] = img_space
self._image_goal_key = image_goal_key
self._renderer = renderer
self._suppress_warning = _suppress_warning
def sample(self, batch_size: int):
if batch_size > 1 and not self._suppress_warning:
warnings.warn(
"Sampling many goals is slow. Consider using "
"PresampledImageAndStateDistribution"
)
contexts = self._base_distribution.sample(batch_size)
images = []
for i in range(batch_size):
goal = ppp.treemap(lambda x: x[i], contexts, atomic_type=np.ndarray)
env_state = self._env.get_env_state()
self._env.set_to_goal(goal)
img_goal = self._renderer(self._env)
self._env.set_env_state(env_state)
images.append(img_goal)
contexts[self._image_goal_key] = np.array(images)
return contexts
@property
def spaces(self):
return self._spaces
class PresampledDistribution(DictDistribution):
def __init__(
self,
slow_sampler: DictDistribution,
num_presampled_goals,
):
self._sampler = slow_sampler
self._num_presampled_goals = num_presampled_goals
self._presampled_goals = self._sampler.sample(num_presampled_goals)
def sample(self, batch_size: int):
idx = np.random.randint(0, self._num_presampled_goals, batch_size)
sampled_goals = {
k: v[idx] for k, v in self._presampled_goals.items()
}
return sampled_goals
@property
def spaces(self):
return self._sampler.spaces
class PresampledPathDistribution(DictDistribution):
def __init__(
self,
datapath,
):
self._presampled_goals = load_local_or_remote_file(datapath)
self._num_presampled_goals = self._presampled_goals[list(self._presampled_goals)[0]].shape[0]
self._set_spaces()
def sample(self, batch_size: int):
idx = np.random.randint(0, self._num_presampled_goals, batch_size)
sampled_goals = {
k: v[idx] for k, v in self._presampled_goals.items()
}
return sampled_goals
def _set_spaces(self):
pairs = []
for key in self._presampled_goals:
dim = self._presampled_goals[key][0].shape[0]
box = gym.spaces.Box(-np.ones(dim), np.ones(dim))
pairs.append((key, box))
self.observation_space = Dict(pairs)
@property
def spaces(self):
return self.observation_space.spaces
class ContextualRewardFnFromMultitaskEnv(ContextualRewardFn):
def __init__(
self,
env: MultitaskEnv,
achieved_goal_from_observation: Callable[[Observation], Goal],
desired_goal_key='desired_goal',
achieved_goal_key='achieved_goal',
additional_obs_keys=None,
additional_context_keys=None,
):
self._env = env
self._desired_goal_key = desired_goal_key
self._achieved_goal_key = achieved_goal_key
self._achieved_goal_from_observation = achieved_goal_from_observation
self._additional_obs_keys = additional_obs_keys
self._additional_context_keys = additional_context_keys
def __call__(self, states, actions, next_states, contexts):
del states
achieved = self._achieved_goal_from_observation(next_states)
obs = {
self._achieved_goal_key: achieved,
self._desired_goal_key: contexts[self._desired_goal_key],
}
if self._additional_obs_keys is not None:
for key in self._additional_obs_keys:
obs[key] = next_states[key]
if self._additional_context_keys is not None:
for key in self._additional_context_keys:
obs[key] = contexts[key]
return self._env.compute_rewards(actions, obs)
class IndexIntoAchievedGoal(object):
def __init__(self, key):
self._key = key
def __call__(self, observations):
return observations[self._key]
class L2Distance(ContextualRewardFn):
def __init__(
self,
achieved_goal_from_observation: Callable[[Observation], Goal],
desired_goal_key='desired_goal',
dimension_weights=None,
):
self._desired_goal_key = desired_goal_key
self._achieved_goal_from_observation = achieved_goal_from_observation
self._dimension_weights = dimension_weights
def __call__(self, states, actions, next_states, contexts):
del states
achieved = self._achieved_goal_from_observation(next_states)
desired = contexts[self._desired_goal_key]
if self._dimension_weights is None:
distance = np.linalg.norm(achieved - desired, axis=-1)
else:
difference = achieved - desired
weighted_difference = (
self._dimension_weights[None, :]
* difference
)
distance = np.linalg.norm(weighted_difference, axis=-1)
return distance
class NegativeL2Distance(ContextualRewardFn):
def __init__(
self,
achieved_goal_from_observation: Callable[[Observation], Goal],
desired_goal_key='desired_goal',
dimension_weights=None,
):
self.distance_fn = L2Distance(
achieved_goal_from_observation,
desired_goal_key=desired_goal_key,
dimension_weights=dimension_weights,
)
def __call__(self, states, actions, next_states, contexts):
distance = self.distance_fn(states, actions, next_states, contexts)
return - distance
class ThresholdDistanceReward(ContextualRewardFn):
def __init__(self, distance_fn: ContextualRewardFn, threshold):
self._distance_fn = distance_fn
self._distance_threshold = threshold
def __call__(self, states, actions, next_states, contexts):
distance = self._distance_fn(states, actions, next_states, contexts)
return -(distance > self._distance_threshold).astype(np.float32)
class GoalConditionedDiagnosticsToContextualDiagnostics(ContextualDiagnosticsFn):
# use a class rather than function for serialization
def __init__(
self,
goal_conditioned_diagnostics: GoalConditionedDiagnosticsFn,
desired_goal_key: str,
observation_key: str,
):
self._goal_conditioned_diagnostics = goal_conditioned_diagnostics
self._desired_goal_key = desired_goal_key
self._observation_key = observation_key
def __call__(self, paths: List[Path],
contexts: List[Context]) -> Diagnostics:
goals = [c[self._desired_goal_key] for c in contexts]
non_contextual_paths = [self._remove_context(p) for p in paths]
return self._goal_conditioned_diagnostics(non_contextual_paths, goals)
def _remove_context(self, path):
new_path = path.copy()
new_path['observations'] = np.array([
o[self._observation_key] for o in path['observations']
])
new_path['next_observations'] = np.array([
o[self._observation_key] for o in path['next_observations']
])
new_path.pop('full_observations', None)
new_path.pop('full_next_observations', None)
return new_path
```
#### File: samplers/data_collector/path_collector.py
```python
from collections import deque, OrderedDict
from functools import partial
import numpy as np
#from rlkit.envs.vae_wrappers import VAEWrappedEnv
from rlkit.misc.eval_util import create_stats_ordered_dict
from rlkit.samplers.data_collector.base import PathCollector
from rlkit.samplers.rollout_functions import rollout
class MdpPathCollector(PathCollector):
def __init__(
self,
env,
policy,
max_num_epoch_paths_saved=None,
render=False,
render_kwargs=None,
rollout_fn=rollout,
save_env_in_snapshot=True,
):
if render_kwargs is None:
render_kwargs = {}
self._env = env
self._policy = policy
self._max_num_epoch_paths_saved = max_num_epoch_paths_saved
self._epoch_paths = deque(maxlen=self._max_num_epoch_paths_saved)
self._render = render
self._render_kwargs = render_kwargs
self._rollout_fn = rollout_fn
self._num_steps_total = 0
self._num_paths_total = 0
self._save_env_in_snapshot = save_env_in_snapshot
def collect_new_paths(
self,
max_path_length,
num_steps,
discard_incomplete_paths,
):
paths = []
num_steps_collected = 0
while num_steps_collected < num_steps:
max_path_length_this_loop = min( # Do not go over num_steps
max_path_length,
num_steps - num_steps_collected,
)
path = self._rollout_fn(
self._env,
self._policy,
max_path_length=max_path_length_this_loop,
render=self._render,
render_kwargs=self._render_kwargs,
)
path_len = len(path['actions'])
if (
path_len != max_path_length
and not path['terminals'][-1]
and discard_incomplete_paths
):
break
num_steps_collected += path_len
paths.append(path)
self._num_paths_total += len(paths)
self._num_steps_total += num_steps_collected
self._epoch_paths.extend(paths)
return paths
def get_epoch_paths(self):
return self._epoch_paths
def end_epoch(self, epoch):
self._epoch_paths = deque(maxlen=self._max_num_epoch_paths_saved)
def get_diagnostics(self):
path_lens = [len(path['actions']) for path in self._epoch_paths]
stats = OrderedDict([
('num steps total', self._num_steps_total),
('num paths total', self._num_paths_total),
])
stats.update(create_stats_ordered_dict(
"path length",
path_lens,
always_show_all_stats=True,
))
return stats
def get_snapshot(self):
snapshot_dict = dict(
policy=self._policy,
)
if self._save_env_in_snapshot:
snapshot_dict['env'] = self._env
return snapshot_dict
class GoalConditionedPathCollector(MdpPathCollector):
def __init__(
self,
*args,
observation_key='observation',
desired_goal_key='desired_goal',
goal_sampling_mode=None,
**kwargs
):
def obs_processor(o):
return np.hstack((o[observation_key], o[desired_goal_key]))
rollout_fn = partial(
rollout,
preprocess_obs_for_policy_fn=obs_processor,
)
super().__init__(*args, rollout_fn=rollout_fn, **kwargs)
self._observation_key = observation_key
self._desired_goal_key = desired_goal_key
self._goal_sampling_mode = goal_sampling_mode
def collect_new_paths(self, *args, **kwargs):
self._env.goal_sampling_mode = self._goal_sampling_mode
return super().collect_new_paths(*args, **kwargs)
def get_snapshot(self):
snapshot = super().get_snapshot()
snapshot.update(
observation_key=self._observation_key,
desired_goal_key=self._desired_goal_key,
)
return snapshot
class ObsDictPathCollector(MdpPathCollector):
def __init__(
self,
*args,
observation_key='observation',
**kwargs
):
def obs_processor(obs):
return obs[observation_key]
rollout_fn = partial(
rollout,
preprocess_obs_for_policy_fn=obs_processor,
)
super().__init__(*args, rollout_fn=rollout_fn, **kwargs)
self._observation_key = observation_key
def get_snapshot(self):
snapshot = super().get_snapshot()
snapshot.update(
observation_key=self._observation_key,
)
return snapshot
class VAEWrappedEnvPathCollector(GoalConditionedPathCollector):
def __init__(
self,
env,
policy,
decode_goals=False,
**kwargs
):
super().__init__(env, policy, **kwargs)
self._decode_goals = decode_goals
def collect_new_paths(self, *args, **kwargs):
self._env.decode_goals = self._decode_goals
return super().collect_new_paths(*args, **kwargs)
```
#### File: torch/sets/models.py
```python
import numpy as np
from torch import nn
from rlkit.launchers.experiments.disentanglement import (
contextual_encoder_distance_launcher as cedl,
)
from rlkit.torch.core import PyTorchModule
from rlkit.torch.distributions import MultivariateDiagonalNormal
from rlkit.torch.networks import (
BasicCNN,
Flatten,
Mlp,
ConcatMultiHeadedMlp,
Reshape,
)
from rlkit.torch.networks import basic
from rlkit.torch.networks.dcnn import BasicDCNN
from rlkit.torch.networks.mlp import MultiHeadedMlp
from rlkit.torch.networks.stochastic.distribution_generator import (
BernoulliGenerator,
Gaussian,
IndependentGenerator,
)
from rlkit.torch.vae.vae_torch_trainer import VAE
import rlkit.torch.pytorch_util as ptu
from rlkit.torch.sets.fancy_vae_architecture import (
get_fancy_vae,
)
class DummyNetwork(PyTorchModule):
def __init__(self, *output_shapes):
super().__init__()
self._output_shapes = output_shapes
# if len(output_shapes) == 1:
# self.output = ptu.zeros(output_shapes[0])
# else:
# self.output = tuple(
# ptu.zeros(shape) for shape in output_shapes
# )
def forward(self, input):
# import ipdb; ipdb.set_trace()
if len(self._output_shapes) == 1:
return ptu.zeros((input.shape[0], *self._output_shapes[0]))
else:
return tuple(
ptu.zeros((input.shape[0], *shape))
for shape in self._output_shapes
)
# return self.output
def create_dummy_image_vae(
img_chw,
latent_dim,
*args,
**kwargs
) -> VAE:
encoder_network = DummyNetwork((latent_dim,), (latent_dim,))
decoder_network = DummyNetwork(img_chw)
encoder = Gaussian(encoder_network)
decoder = Gaussian(decoder_network, std=1, reinterpreted_batch_ndims=3)
prior = MultivariateDiagonalNormal(
loc=ptu.zeros(1, latent_dim), scale_diag=ptu.ones(1, latent_dim),
)
return VAE(encoder, decoder, prior)
def create_image_vae(
img_chw,
latent_dim,
encoder_cnn_kwargs,
encoder_mlp_kwargs,
decoder_mlp_kwargs=None,
decoder_dcnn_kwargs=None,
use_mlp_decoder=False,
decoder_distribution="bernoulli",
use_fancy_architecture=False,
) -> VAE:
img_num_channels, img_height, img_width = img_chw
if use_fancy_architecture:
decoder_network, encoder_network = get_fancy_vae(img_height,
img_num_channels,
img_width, latent_dim)
else:
encoder_network = create_image_encoder(
img_chw, latent_dim, encoder_cnn_kwargs, encoder_mlp_kwargs,
)
if decoder_mlp_kwargs is None:
decoder_mlp_kwargs = cedl.invert_encoder_mlp_params(
encoder_mlp_kwargs
)
if use_mlp_decoder:
decoder_network = create_mlp_image_decoder(
img_chw,
latent_dim,
decoder_mlp_kwargs,
two_headed=decoder_distribution == 'gaussian_learned_variance',
)
else:
if decoder_distribution == "gaussian_learned_variance":
raise NotImplementedError()
pre_dcnn_chw = encoder_network._modules["0"].output_shape
if decoder_dcnn_kwargs is None:
decoder_dcnn_kwargs = cedl.invert_encoder_params(
encoder_cnn_kwargs, img_num_channels,
)
decoder_network = create_image_decoder(
pre_dcnn_chw,
latent_dim,
decoder_dcnn_kwargs,
decoder_mlp_kwargs,
)
encoder = Gaussian(encoder_network)
encoder.input_size = encoder_network.input_size
if decoder_distribution in {
"gaussian_learned_global_scalar_variance",
"gaussian_learned_global_image_variance",
"gaussian_learned_variance",
}:
if decoder_distribution == "gaussian_learned_global_image_variance":
log_std = basic.LearnedPositiveConstant(
ptu.zeros((img_num_channels, img_height, img_width))
)
decoder_network = basic.ApplyMany(decoder_network, log_std)
elif decoder_distribution == "gaussian_learned_global_scalar_variance":
log_std = basic.LearnedPositiveConstant(ptu.zeros(1))
decoder_network = basic.ApplyMany(decoder_network, log_std)
decoder = Gaussian(decoder_network, reinterpreted_batch_ndims=3)
elif decoder_distribution == "gaussian_fixed_unit_variance":
decoder = Gaussian(decoder_network, std=1, reinterpreted_batch_ndims=3)
elif decoder_distribution == "bernoulli":
decoder = IndependentGenerator(
BernoulliGenerator(decoder_network), reinterpreted_batch_ndims=3
)
else:
raise NotImplementedError(decoder_distribution)
prior = MultivariateDiagonalNormal(
loc=ptu.zeros(1, latent_dim), scale_diag=ptu.ones(1, latent_dim),
)
return VAE(encoder, decoder, prior)
def create_image_encoder(
img_chw, latent_dim, encoder_cnn_kwargs, encoder_kwargs,
):
img_num_channels, img_height, img_width = img_chw
cnn = BasicCNN(
input_width=img_width,
input_height=img_height,
input_channels=img_num_channels,
**encoder_cnn_kwargs
)
cnn_output_size = np.prod(cnn.output_shape)
mlp = MultiHeadedMlp(
input_size=cnn_output_size,
output_sizes=[latent_dim, latent_dim],
**encoder_kwargs
)
enc = nn.Sequential(cnn, Flatten(), mlp)
enc.input_size = img_width * img_height * img_num_channels
enc.output_size = latent_dim
return enc
def create_image_decoder(
pre_dcnn_chw,
latent_dim,
decoder_dcnn_kwargs,
decoder_kwargs,
):
dcnn_in_channels, dcnn_in_height, dcnn_in_width = pre_dcnn_chw
dcnn_input_size = dcnn_in_channels * dcnn_in_width * dcnn_in_height
dcnn = BasicDCNN(
input_width=dcnn_in_width,
input_height=dcnn_in_height,
input_channels=dcnn_in_channels,
**decoder_dcnn_kwargs
)
mlp = Mlp(
input_size=latent_dim, output_size=dcnn_input_size, **decoder_kwargs
)
dec = nn.Sequential(mlp, dcnn)
dec.input_size = latent_dim
return dec
def create_mlp_image_decoder(
img_chw, latent_dim, decoder_kwargs, two_headed,
):
img_num_channels, img_height, img_width = img_chw
output_size = img_num_channels * img_height * img_width
if two_headed:
dec = nn.Sequential(
MultiHeadedMlp(
input_size=latent_dim,
output_sizes=[output_size, output_size],
**decoder_kwargs
),
basic.Map(Reshape(img_num_channels, img_height, img_width)),
)
else:
dec = nn.Sequential(
Mlp(
input_size=latent_dim, output_size=output_size, **decoder_kwargs
),
Reshape(img_num_channels, img_height, img_width),
)
dec.input_size = latent_dim
dec.output_size = img_num_channels * img_height * img_width
return dec
def create_vector_vae(data_dim, latent_dim, encoder_kwargs):
encoder = create_vector_encoder(data_dim, latent_dim, encoder_kwargs)
decoder_kwargs = cedl.invert_encoder_mlp_params(encoder_kwargs)
decoder = create_vector_decoder(data_dim, latent_dim, decoder_kwargs)
prior = MultivariateDiagonalNormal(
loc=ptu.zeros(1, latent_dim), scale_diag=ptu.ones(1, latent_dim),
)
return VAE(encoder, decoder, prior)
def create_vector_encoder(data_dim, latent_dim, encoder_kwargs):
enc = ConcatMultiHeadedMlp(
input_size=data_dim,
output_sizes=[latent_dim, latent_dim],
**encoder_kwargs
)
enc.input_size = data_dim
enc.output_size = latent_dim
return enc
def create_vector_decoder(data_dim, latent_dim, decoder_kwargs):
dec = Mlp(input_size=latent_dim, output_size=data_dim, **decoder_kwargs)
return dec
```
#### File: torch/sets/set_creation.py
```python
import pickle
from os import path as osp
from multiworld.envs.pygame import PickAndPlaceEnv
from rlkit.envs.pygame import pnp_util
from rlkit.launchers.contextual.util import get_gym_env
from rlkit.misc import asset_loader
from rlkit.torch.sets.set_projection import Set
from rlkit.launchers.config import LOCAL_LOG_DIR
def create_sets(
env_id,
env_class,
env_kwargs,
renderer,
saved_filename=None,
save_to_filename=None,
**kwargs
):
if saved_filename is not None:
sets = asset_loader.load_local_or_remote_file(saved_filename)
else:
env = get_gym_env(env_id, env_class=env_class, env_kwargs=env_kwargs)
if isinstance(env, PickAndPlaceEnv):
sets = sample_pnp_sets(env, renderer, **kwargs)
else:
raise NotImplementedError()
if save_to_filename:
save(sets, save_to_filename)
return sets
def sample_pnp_sets(
env,
renderer,
num_sets=1,
num_samples_per_set=128,
set_configs=None,
example_state_key="example_state",
example_image_key="example_image",
):
if set_configs is None:
print(__file__, "WARNING: will deprecate soon")
set_projections = pnp_util.sample_set_projections(env, num_sets)
else:
set_projections = [
pnp_util.create_set_projection(**set_config)
for set_config in set_configs
]
sets = []
for set_projection in set_projections:
# for set_config in set_configs:
# set_projection = pnp_util.create_set_projection(**set_config)
example_dict = pnp_util.sample_examples_with_images(
env,
renderer,
set_projection,
num_samples_per_set,
state_key=example_state_key,
image_key=example_image_key,
)
sets.append(Set(example_dict, set_projection))
return sets
def get_absolute_path(relative_path):
return osp.join(LOCAL_LOG_DIR, relative_path)
def load(relative_path):
path = get_absolute_path(relative_path)
print("loading data from", path)
return pickle.load(open(path, "rb"))
def save(data, relative_path):
path = get_absolute_path(relative_path)
pickle.dump(data, open(path, "wb"))
```
|
{
"source": "jcorm714/PokemonUtils",
"score": 4
}
|
#### File: PokemonUtils/Objects/Nature.py
```python
from enum import Enum
import typing
from Objects.Stats import Stats
class Nature(Enum):
"""An Enum representing the list of pokemon antures"""
HARDY = "Hardy"
LONELY = "Lonely"
BRAVE = "Brave"
ADAMANT = "Adamant"
NAUGHTY = "Naughty"
BOLD = "Bold"
DOCILE = "Docile"
RELAXED = "Relaxed"
IMPISH = "Impish"
LAX = "Lax"
TIMID = "Timid"
HASTY = "Hasty"
SERIOUS = "Serious"
JOLLY = "Jolly"
NAIVE = "Naive"
MODEST = "Modest"
MILD = "Mild"
QUIET = "Quiet"
BASHFUL = "Bashful"
RASH = "Rash"
CALM = "Calm"
GENTLE = "Gentle"
SASSY = "Sassy"
CAREFUL = "Careful"
QUIRKY = "Quirky"
def get_affected_stats(nature: Nature):
"""Returns the positive and negative stat affected by a nature
Parameters
----------
nature: Nature
An enumeration of the pokemon natures
returns
dictionary called modifiers with positive and negative stat modifed
if the nature is neutral the dictionary will be empty
keys:
POS
NEG"""
modifiers = {"POS": "", "NEG": ""}
if nature == Nature.LONELY:
modifiers["POS"] = Stats.ATTACK
modifiers["NEG"] = Stats.DEFENSE
elif nature == Nature.BRAVE:
modifiers["POS"] = Stats.ATTACK
modifiers["NEG"] = Stats.SPEED
elif nature == Nature.ADAMANT:
modifiers["POS"] = Stats.ATTACK
modifiers["NEG"] = Stats.SP_ATTACK
elif nature == Nature.NAUGHTY:
modifiers["POS"] = Stats.ATTACK
modifiers["NEG"] = Stats.DEFENSE
elif nature == Nature.BOLD:
modifiers["POS"] = Stats.DEFENSE
modifiers["NEG"] = Stats.ATTACK
elif nature == Nature.RELAXED:
modifiers["POS"] = Stats.DEFENSE
modifiers["NEG"] = Stats.SPEED
elif nature == Nature.IMPISH:
modifiers["POS"] = Stats.DEFENSE
modifiers["NEG"] = Stats.SP_ATTACK
elif nature == Nature.LAX:
modifiers["POS"] = Stats.DEFENSE
modifiers["NEG"] = Stats.SP_DEFENSE
elif nature == Nature.TIMID:
modifiers["POS"] = Stats.SPEED
modifiers["NEG"] = Stats.ATTACK
elif nature == Nature.HASTY:
modifiers["POS"] = Stats.SPEED
modifiers["NEG"] = Stats.DEFENSE
elif nature == Nature.JOLLY:
modifiers["POS"] = Stats.SPEED
modifiers["NEG"] = Stats.SP_ATTACK
elif nature == Nature.NAIVE:
modifiers["POS"] = Stats.SPEED
modifiers["NEG"] = Stats.SP_DEFENSE
elif nature == Nature.MODEST:
modifiers["POS"] = Stats.SP_ATTACK
modifiers["NEG"] = Stats.ATTACK
elif nature == Nature.MILD:
modifiers["POS"] = Stats.SP_ATTACK
modifiers["NEG"] = Stats.DEFENSE
elif nature == Nature.QUIET:
modifiers["POS"] = Stats.SP_ATTACK
modifiers["NEG"] = Stats.SPEED
elif nature == Nature.RASH:
modifiers["POS"] = Stats.SP_ATTACK
modifiers["NEG"] = Stats.SP_DEFENSE
elif nature == Nature.CALM:
modifiers["POS"] = Stats.SP_DEFENSE
modifiers["NEG"] = Stats.ATTACK
elif nature == Nature.GENTLE:
modifiers["POS"] = Stats.SP_DEFENSE
modifiers["NEG"] = Stats.DEFENSE
elif nature == Nature.SASSY:
modifiers["POS"] = Stats.SP_DEFENSE
modifiers["NEG"] = Stats.SPEED
elif nature == Nature.CAREFUL:
modifiers["POS"] = Stats.SP_DEFENSE
modifiers["NEG"] = Stats.SP_ATTACK
return modifiers
```
|
{
"source": "jcormier/harvesters",
"score": 2
}
|
#### File: harvesters/util/pfnc.py
```python
from harvesters.util._pfnc import symbolics as _symbolics
#
symbolics = _symbolics
dict_by_ints = symbolics
dict_by_names = {n: i for i, n in symbolics.items()}
# 32-bit value layout
# |31 24|23 16|15 08|07 00|
# | C| Comp. Layout| Effective Size | Pixel ID |
# Custom flag
pfnc_custom = 0x80000000
# Component layout
pfnc_single_component = 0x01000000
pfnc_multiple_component = 0x02000000
pfnc_component_mask = 0x02000000
# Effective size
pfnc_pixel_size_mask = 0x00ff0000
pfnc_pixel_size_shift = 16
def get_effective_pixel_size(pixel_format_value):
"""
Returns the effective pixel size (number of bits a pixel occupies in memory).
This includes padding in many cases and the actually used bits are less.
"""
return (pixel_format_value & pfnc_pixel_size_mask) >> \
pfnc_pixel_size_shift
def is_custom(pixel_format_value):
return (pixel_format_value & pfnc_custom) == pfnc_custom
def is_single_component(pixel_format_value):
return (pixel_format_value & pfnc_component_mask) == pfnc_single_component
def is_multiple_component(pixel_format_value):
return (pixel_format_value & pfnc_component_mask) == pfnc_multiple_component
def get_bits_per_pixel(data_format):
"""
Returns the number of (used) bits per pixel.
So without padding.
Returns None if format is not known.
"""
if data_format in component_8bit_formats:
return 8
elif data_format in component_10bit_formats:
return 10
elif data_format in component_12bit_formats:
return 12
elif data_format in component_14bit_formats:
return 14
elif data_format in component_16bit_formats:
return 16
# format not known
return None
mono_location_formats = [
#
'Mono8',
'Mono8s',
'Mono10',
'Mono12',
'Mono14',
'Mono16',
#
'R8',
'R10',
'R12',
'R16',
'G8',
'G10',
'G12',
'G16',
'B8',
'B10',
'B12',
'B16',
#
'Coord3D_A8',
'Coord3D_B8',
'Coord3D_C8',
'Coord3D_A16',
'Coord3D_B16',
'Coord3D_C16',
'Coord3D_A32f',
'Coord3D_B32f',
'Coord3D_C32f',
#
'Confidence1',
'Confidence8',
'Confidence16',
'Confidence32f',
]
mono_packed_location_formats = [
'Mono1p',
'Mono2p',
'Mono4p',
'Mono10Packed',
'Mono10p',
'Mono12Packed',
'Mono12p',
'Coord3D_A10p',
'Coord3D_B10p',
'Coord3D_C10p',
'Coord3D_A12p',
'Coord3D_B12p',
'Coord3D_C12p',
]
lmn_444_location_formats = [
#
'RGB8',
'RGB10',
'RGB12',
'RGB14',
'RGB16',
#
'BGR8',
'BGR10',
'BGR12',
'BGR14',
'BGR16',
#
'Coord3D_ABC8',
'Coord3D_ABC8_Planar',
'Coord3D_ABC16',
'Coord3D_ABC16_Planar',
'Coord3D_ABC32f',
'Coord3D_ABC32f_Planar',
]
lmn_444_packed_location_formats = [
#
'RGB8Packed',
#
'Coord3D_ABC10p',
'Coord3D_ABC10p_Planar',
'Coord3D_ABC12p',
'Coord3D_ABC12p_Planar',
]
lmn_422_location_formats = [
'YUV422_8_UYVY',
'YUV422_8',
'YCbCr422_8',
'YCbCr601_422_8',
'YCbCr709_422_8',
'YCbCr422_8_CbYCrY',
'YCbCr601_422_8_CbYCrY',
'YCbCr709_422_8_CbYCrY',
'YCbCr422_10',
'YCbCr422_12',
'YCbCr601_422_10',
'YCbCr601_422_12',
'YCbCr709_422_10',
'YCbCr709_422_12',
'YCbCr422_10_CbYCrY',
'YCbCr422_12_CbYCrY',
'YCbCr601_422_10_CbYCrY',
'YCbCr601_422_12_CbYCrY',
'YCbCr709_422_10_CbYCrY',
'YCbCr709_422_12_CbYCrY',
'YCbCr2020_422_8',
'YCbCr2020_422_8_CbYCrY',
'YCbCr2020_422_10',
'YCbCr2020_422_10_CbYCrY',
'YCbCr2020_422_12',
'YCbCr2020_422_12_CbYCrY',
]
lmn_422_packed_location_formats = [
'YCbCr422_10p',
'YCbCr422_12p',
'YCbCr601_422_10p',
'YCbCr601_422_12p',
'YCbCr709_422_10p',
'YCbCr709_422_12p',
'YCbCr422_10p_CbYCrY',
'YCbCr422_12p_CbYCrY',
'YCbCr601_422_10p_CbYCrY',
'YCbCr601_422_12p_CbYCrY',
'YCbCr709_422_10p_CbYCrY',
'YCbCr709_422_12p_CbYCrY',
'YCbCr2020_422_10p',
'YCbCr2020_422_10p_CbYCrY',
'YCbCr2020_422_12p',
'YCbCr2020_422_12p_CbYCrY',
]
lmn_411_location_formats = [
'YUV411_8_UYYVYY',
'YCbCr411_8_CbYYCrYY',
'YCbCr601_411_8_CbYYCrYY',
'YCbCr709_411_8_CbYYCrYY',
'YCbCr411_8',
'YCbCr2020_411_8_CbYYCrYY',
]
lmno_4444_location_formats = [
'RGBa8',
'RGBa10',
'RGBa12',
'RGBa14',
'RGBa16',
'BGRa8',
'BGRa10',
'BGRa12',
'BGRa14',
'BGRa16',
]
lmno_4444_packed_location_formats = [
'RGBa10p',
'RGBa12p',
'BGRa10p',
'BGRa12p',
]
lm_44_location_formats = [
'Coord3D_AC8',
'Coord3D_AC8_Planar',
'Coord3D_AC16',
'Coord3D_AC16_Planar',
'Coord3D_AC32f',
'Coord3D_AC32f_Planar',
]
lm_44_packed_location_formats = [
'Coord3D_AC10p',
'Coord3D_AC10p_Planar',
'Coord3D_AC12p',
'Coord3D_AC12p_Planar',
]
bayer_location_formats = [
'BayerGR8',
'BayerRG8',
'BayerGB8',
'BayerBG8',
'BayerGR10',
'BayerRG10',
'BayerGB10',
'BayerBG10',
'BayerGR12',
'BayerRG12',
'BayerGB12',
'BayerBG12',
'BayerGR16',
'BayerRG16',
'BayerGB16',
'BayerBG16',
]
bayer_packed_location_formats = [
'BayerGR10Packed',
'BayerRG10Packed',
'BayerGB10Packed',
'BayerBG10Packed',
'BayerGR12Packed',
'BayerRG12Packed',
'BayerGB12Packed',
'BayerBG12Packed',
'BayerBG10p',
'BayerBG12p',
'BayerGB10p',
'BayerGB12p',
'BayerGR10p',
'BayerGR12p',
'BayerRG10p',
'BayerRG12p',
]
uint8_formats = [
#
'Mono8',
#
'RGB8',
'RGB8Packed',
'RGBa8',
#
'BGR8',
'BGRa8',
#
'BayerGR8',
'BayerGB8',
'BayerRG8',
'BayerBG8',
#
'Coord3D_A8',
'Coord3D_B8',
'Coord3D_C8',
'Coord3D_ABC8',
'Coord3D_ABC8_Planar',
'Coord3D_AC8',
'Coord3D_AC8_Planar',
#
'Confidence1',
'Confidence8',
]
uint16_formats = [
#
'Mono10',
'Mono12',
'Mono14',
'Mono16',
#
'RGB10',
'RGB12',
'RGB14',
'RGB16',
#
'BGR10',
'BGR12',
'BGR14',
'BGR16',
#
'RGBa10',
'RGBa12',
'RGBa14',
'RGBa16',
#
'BGRa10',
'BGRa12',
'BGRa14',
'BGRa16',
#
'BayerGR10',
'BayerGB10',
'BayerRG10',
'BayerBG10',
#
'BayerGR12',
'BayerGB12',
'BayerRG12',
'BayerBG12',
#
'BayerGR16',
'BayerRG16',
'BayerGB16',
'BayerBG16',
#
'Coord3D_A16',
'Coord3D_B16',
'Coord3D_C16',
#
'Coord3D_ABC16',
'Coord3D_ABC16_Planar',
#
'Coord3D_AC16',
'Coord3D_AC16_Planar',
#
'Coord3D_A10p',
'Coord3D_B10p',
'Coord3D_C10p',
#
'Coord3D_A12p',
'Coord3D_B12p',
'Coord3D_C12p',
#
'Coord3D_ABC10p',
'Coord3D_ABC10p_Planar',
#
'Coord3D_ABC12p',
'Coord3D_ABC12p_Planar',
#
'Coord3D_AC10p',
'Coord3D_AC10p_Planar',
#
'Coord3D_AC12p',
'Coord3D_AC12p_Planar',
#
'Confidence16',
]
uint32_formats = [
'Mono32',
]
float32_formats = [
#
'Coord3D_A32f',
'Coord3D_B32f',
'Coord3D_C32f',
#
'Coord3D_ABC32f',
'Coord3D_ABC32f_Planar',
#
'Coord3D_AC32f',
'Coord3D_AC32f_Planar',
#
'Confidence32f',
]
component_8bit_formats = [
#
'Mono8',
#
'RGB8',
'RGBa8',
#
'BGR8',
'BGRa8',
#
'BayerGR8',
'BayerGB8',
'BayerRG8',
'BayerBG8',
#
'Confidence8',
]
component_10bit_formats = [
#
'Mono10',
#
'RGB10',
'RGBa10',
#
'BGR10',
'BGRa10',
#
'BayerGR10',
'BayerGB10',
'BayerRG10',
'BayerBG10',
]
component_12bit_formats = [
#
'Mono12',
#
'RGB12',
'RGBa12',
#
'BGR12',
'BGRa12',
#
'BayerGR12',
'BayerGB12',
'BayerRG12',
'BayerBG12',
]
component_14bit_formats = [
#
'Mono14',
#
'RGB14',
'RGBa14',
#
'BGR14',
'BGRa14',
]
component_16bit_formats = [
#
'Mono16',
#
'RGB16',
'RGBa16',
#
'BayerGR16',
'BayerRG16',
'BayerGB16',
'BayerBG16',
#
'Coord3D_A16',
'Coord3D_B16',
'Coord3D_C16',
#
'Coord3D_ABC16',
'Coord3D_ABC16_Planar',
#
'Coord3D_AC16',
'Coord3D_AC16_Planar',
#
'Confidence16',
]
component_32bit_formats = [
'Confidence32f',
]
component_2d_formats = [
#
'Mono8',
'Mono10',
'Mono12',
'Mono14',
'Mono16',
#
'RGB8',
'RGB10',
'RGB12',
'RGB14',
'RGB16',
#
'BGR8',
'BGR10',
'BGR12',
'BGR14',
'BGR16',
#
'RGBa8',
'RGBa10',
'RGBa12',
'RGBa14',
'RGBa16',
#
'BGRa8',
'BGRa10',
'BGRa12',
'BGRa14',
'BGRa16',
#
'BayerGR8',
'BayerGB8',
'BayerRG8',
'BayerBG8',
#
'BayerGR10',
'BayerGB10',
'BayerRG10',
'BayerBG10',
#
'BayerGR12',
'BayerGB12',
'BayerRG12',
'BayerBG12',
#
'BayerGR16',
'BayerRG16',
'BayerGB16',
'BayerBG16',
#
'Coord3D_A8',
'Coord3D_B8',
'Coord3D_C8',
'Coord3D_ABC8',
'Coord3D_ABC8_Planar',
'Coord3D_AC8',
'Coord3D_AC8_Planar',
'Coord3D_A16',
'Coord3D_B16',
'Coord3D_C16',
'Coord3D_ABC16',
'Coord3D_ABC16_Planar',
'Coord3D_AC16',
'Coord3D_AC16_Planar',
'Coord3D_A32f',
'Coord3D_B32f',
'Coord3D_C32f',
'Coord3D_ABC32f',
'Coord3D_ABC32f_Planar',
'Coord3D_AC32f',
'Coord3D_AC32f_Planar',
'Coord3D_A10p',
'Coord3D_B10p',
'Coord3D_C10p',
'Coord3D_A12p',
'Coord3D_B12p',
'Coord3D_C12p',
'Coord3D_ABC10p',
'Coord3D_ABC10p_Planar',
'Coord3D_ABC12p',
'Coord3D_ABC12p_Planar',
'Coord3D_AC10p',
'Coord3D_AC10p_Planar',
'Coord3D_AC12p',
'Coord3D_AC12p_Planar',
#
'Confidence1',
'Confidence1p',
'Confidence8',
'Confidence16',
'Confidence32f',
]
rgb_formats = [
#
'RGB8',
'RGB10',
'RGB12',
'RGB14',
'RGB16',
]
rgba_formats = [
#
'RGBa8',
'RGBa10',
'RGBa12',
'RGBa14',
'RGBa16',
]
bgr_formats = [
#
'BGR8',
'BGR10',
'BGR12',
'BGR14',
'BGR16',
]
bgra_formats = [
#
'BGRa8',
'BGRa10',
'BGRa12',
'BGRa14',
'BGRa16',
]
```
|
{
"source": "JCorn64/QT-Study-App",
"score": 3
}
|
#### File: QT-Study-App/students/forms.py
```python
from django import forms
from .models import StudentSignup
class StudentSignupForm(forms.ModelForm):
class Meta:
model = StudentSignup
fields = ('name', 'phone_number', 'classes')
required = ('name', 'phone_number', 'classes')
labels = {
"phone_number": "Phone Number",
"classes": "Classes (ex. CS 3240, CS 2150)",
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for field in self.Meta.required:
self.fields[field].required = True
```
#### File: QT-Study-App/students/tests.py
```python
from django.test import TestCase
from students.models import StudentSignup
from django.contrib.auth.models import User
#test
class BasicTestCase(TestCase):
def test_1eq1(self):
self.assertTrue(1 == 1)
class StudentSignUpTestCase(TestCase):
def setUp(self):
user = User.objects.create_user('john', '<EMAIL>', 'johnpassword')
StudentSignup.objects.create(phone_number="123-456-7890", classes="CS 3240", user=user)
blank = User.objects.create_user('blank', '<EMAIL>', 'password')
StudentSignup.objects.create(phone_number="5", classes="", user = blank)
def test_student_equivalence(self):
student = StudentSignup.objects.get(phone_number="123-456-7890")
self.assertEqual(student.classes, "CS 3240")
def test_student_boundary(self):
blank = StudentSignup.objects.get(phone_number="5")
self.assertEqual(blank.classes, "")
def test_student_edit(self):
student = StudentSignup.objects.get(phone_number="123-456-7890")
student.phone_number="5"
student.classes="CS 2150"
student.save()
self.assertEqual(student.phone_number,"5")
self.assertEqual(student.classes,"CS 2150")
```
|
{
"source": "jcornaz/hesso.cloud.plex",
"score": 2
}
|
#### File: webserver/controllers/file_delete_controller.py
```python
from os import remove
from os.path import isfile
from shutil import rmtree
from flask import jsonify
from services.PathService import get_absolute_storage_path
def file_delete_controller(path):
absolute_path = get_absolute_storage_path(path)
try:
if isfile(absolute_path):
remove(absolute_path)
json = {'error': False, 'message': "File %s successfully deleted." % path}
else:
rmtree(absolute_path)
json = {'error': False, 'message': "Directory %s successfully deleted." % path}
except Exception as e:
json = {'error': True, 'message': e}
return jsonify(json)
```
|
{
"source": "jcornford/ndf_reader",
"score": 2
}
|
#### File: jcornford/ndf_reader/ndfconverter.py
```python
import sys
import struct
import os
import time
import logging
import pandas as pd
import h5py
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as ss
from scipy import signal, stats
#from line_profiler import LineProfiler
if sys.version_info < (3,):
range = xrange
'''
# decorator needed when profiling
def lprofile():
def inner(func):
def profiled_func(*args, **kwargs):
try:
profiler = LineProfiler()
profiler.add_function(func)
profiler.enable_by_count()
return func(*args, **kwargs)
finally:
profiler.print_stats()
return profiled_func
return inner
'''
class NdfFile:
"""
TODO:
- glitch detection is a little messy, relying on bindings.
- bad messages doesnt delete if 2 messages in window for one (minor but would be better to delete both)
- Find out if they ever start recording half way through - for auto fs detection
- Code up printing the ndf object __repr__
- Clean up unused __init__ attributes
Class to load ndf binary files.
The NDF file starts with a header of at least twelve bytes:
- The first four bytes spell the NDF identifier " ndf". The identifier is then
followed by three four-byte big-endian numbers.
- The first number is the address of the meta-data string, i.e. the byte offset from
the first byte of the file (indexed from 0). This is therefore the number of bytes
to skip from the start of the file to begin reading the meta-data.
- The second number is the address of the first data byte. The data extends to the end of the file.
- The third number is the actual length of the meta-data string, as it was last written.
If this number is zero, any routines dealing with the meta-data string must determine
the length of the string themselves.
The messages in the data recorder's message buffer are each four bytes long. The bytes of each message are listed in
the table below. The Channel Number is used to identify the source of the message. Channel number zero is reserved for
clock messages. Following the channel number, each message contains a sixteen-bit data word. In the case of transmitters
, the sixteen-bit data word is a digitized voltage. The last byte of the message is a timestamp.
Byte Contents
0 Channel Number
1 Most Significant Data Byte
2 Least Significant Data Byte
3 Timestamp or Version Number
All messages with channel number zero are clock messages. This channel acts as a reference clock that
is subsequently used to align the data messages from the transmitter channels and do error correction.
The messages in this channel are generated at a frequency of 128 Hz.
Each ndf file typically encodes 1 hour of data at 512 Hz, although it can also encode data at other frequencies
(e.g. 1024 Hz) it does so for up to 14 transmitters. Each channel sends a message roughly 4 times for every
channel 0 message (because they are operating at 512 Hz, while the clock is at 128 Hz).
"""
def __init__(self, file_path, verbose = False, fs = 'auto', amp_factor = 200):
self.filepath = file_path
# some unused
self.tid_set = set()
self.tid_to_fs_dict = {}
self.tid_raw_data_time_dict = {}
self.tid_data_time_dict = {}
self.resampled = False
self.file_label = file_path.split('/')[-1].split('.')[0]
self.identifier = None
self.data_address = None
self.metadata = None
self.t_stamps = None
self.read_ids = None
self.fs = fs
self._n_possible_glitches = None
self._glitch_count = None
self._plot_each_glitch = None
self.read_id = None
self.verbose = verbose
self.file_time_len_sec = 3600
self.micro_volt_div = 0.4 # this is the dac units?
# firmware dependent:
self.clock_tick_cycle = 7.8125e-3 # the "big" clock messages are 128Hz, 1/128 = 7.8125e-3
self.clock_division = self.clock_tick_cycle / 256.0 # diff values from one byte
self._read_file_metadata()
self.get_valid_tids_and_fs()
def __getitem__(self, item):
#assert type(item) == int
assert item in self.tid_set, 'ERROR: Invalid tid for file'
return self.tid_data_time_dict[item]
def _read_file_metadata(self):
with open(self.filepath, 'rb') as f:
f.seek(0)
self.identifier = f.read(4)
assert (self.identifier == b' ndf')
meta_data_string_address = struct.unpack('>I', f.read(4))[0]
self.data_address = struct.unpack('>I', f.read(4))[0]
meta_data_length = struct.unpack('>I', f.read(4))[0]
if meta_data_length != 0:
f.seek(meta_data_string_address)
self.metadata = f.read(meta_data_length)
# need to handle the fact it is in bytes?
#print ('\n'.join(self.metadata.split('\n')[1:-2]))
#print (self.metadata)
else:
print('meta data length unknown - not bothering to work it out...')
def get_valid_tids_and_fs(self):
"""
- Here work out which t_ids are in the file and their
sampling frequency. Threshold of at least 5000 datapoints!
"""
f = open(self.filepath, 'rb')
f.seek(self.data_address)
self._e_bit_reads = np.fromfile(f, dtype = 'u1')
self.transmitter_id_bytes = self._e_bit_reads[::4]
tid_message_counts = pd.Series(self.transmitter_id_bytes).value_counts()
possible_freqs = [256,512,1024]
for tid, count in tid_message_counts.iteritems():
if count > 5000 and tid != 0: # arbitrary threshold to exclude glitches
error = [abs(3600 - count/fs) for fs in possible_freqs]
if self.fs == 'auto':
self.tid_to_fs_dict[tid] = possible_freqs[np.argmin(error)]
else:
self.fs = float(self.fs)
self.tid_to_fs_dict[tid] = self.fs
self.tid_set.add(tid)
self.tid_raw_data_time_dict[tid] = {}
self.tid_data_time_dict[tid] = {}
logging.info(self.filepath +' valid ids and freq are: '+str(self.tid_to_fs_dict))
#@lprofile()
def glitch_removal(self, plot_glitches=False, print_output=False,
plot_sub_glitches = False, tactic = 'mad'):
"""
Tactics can either be 'std', 'mad','roll_med', 'big_guns'
"""
for tid in self.read_ids:
# create binding between tid data and the data to deglitch
self.data_to_deglitch = self.tid_data_time_dict[tid]['data']
self.time_to_deglitch = self.tid_data_time_dict[tid]['time']
#print (self.data_to_deglitch is self.tid_data_time_dict[tid]['data'])
self._n_possible_glitches = 0
self._glitch_count = 0
self._plot_each_glitch = plot_sub_glitches
if plot_glitches:
plt.figure(figsize = (15, 4))
plt.plot(self.time_to_deglitch , self.data_to_deglitch, 'k')
plt.title('Full raw trace');plt.xlabel('Time (seconds)')
plt.xlim(0,self.time_to_deglitch[-1])
plt.show()
if tactic == 'std':
crossing_locations = self._stddev_based_outlier()
self._check_glitch_candidates(crossing_locations)
elif tactic == 'mad':
crossing_locations = np.where(self._mad_based_outlier())[0]
self._check_glitch_candidates(crossing_locations)
elif tactic == 'roll_med':
crossing_locations = np.where(self._rolling_median_based_outlier())[0]
self._check_glitch_candidates(crossing_locations)
elif tactic == 'big_guns':
crossing_locations = np.where(self._rolling_median_based_outlier())[0]
self._check_glitch_candidates(crossing_locations)
crossing_locations = np.where(self._mad_based_outlier())[0]
self._check_glitch_candidates(crossing_locations)
crossing_locations = self._stddev_based_outlier()
self._check_glitch_candidates(crossing_locations)
else:
print ('Please specify detection tactic: ("mad","roll_med","big_guns", "std")')
raise
logging.debug('Tid '+str(tid)+': removed '+str(self._glitch_count)+' datapoints as glitches. There were '+str(self._n_possible_glitches)+' possible glitches.')
if self.verbose:
print('Tid '+str(tid)+': removed '+str(self._glitch_count)+' datapoints as glitches. There were '+str(self._n_possible_glitches)+' possible glitches.')
if plot_glitches:
plt.figure(figsize = (15, 4))
plt.plot(self.time_to_deglitch , self.data_to_deglitch, 'k')
plt.title('De-glitched trace');plt.xlabel('Time (seconds)')
plt.xlim(0,self.time_to_deglitch[-1] )
plt.show()
#self.tid_data_time_dict[tid]['data'] = self.data_to_deglitch[:]
#self.tid_data_time_dict[tid]['time'] = self.time_to_deglitch[:]
def _mad_based_outlier(self, thresh=3.5):
"""
From stackoverflow?
points : An numobservations by numdimensions array of observations
thresh : The modified z-score to use as a threshold. Observations with
a modified z-score (based on the median absolute deviation) greater
than this value will be classified as outliers.
"""
points = self.data_to_deglitch
if len(points.shape) == 1:
points = points[:,None]
median = np.median(points, axis=0)
diff = np.sum((points - median)**2, axis=-1)
diff = np.sqrt(diff)
med_abs_deviation = np.median(diff)
modified_z_score = 0.6745 * diff / med_abs_deviation
return modified_z_score > thresh
def _rolling_median_based_outlier(self, threshold = 1):
data = self.data_to_deglitch
if len(data.shape) == 1:
data = data[:, None]
df = pd.DataFrame(data, columns=['raw'])
df['rolling'] = df['raw'].rolling(window=10, center =
True).median().fillna(method='bfill').fillna(method='ffill')
difference = np.abs(df['raw'] - df['rolling'])
#inlier_idx = difference < threshold
outlier_idx = difference > threshold
n_glitch = sum(abs(outlier_idx))
if n_glitch > 200:
logging.warning('Warning: more than 200 glitches detected! n_glitch = '+str(n_glitch))
return outlier_idx
def _stddev_based_outlier(self, x_std_threshold=10):
std_dev = np.std(self.data_to_deglitch)
mean_point = np.mean(self.data_to_deglitch)
threshold = std_dev * x_std_threshold + mean_point
crossing_locations = np.where(self.data_to_deglitch > threshold)[0]
return crossing_locations
def _check_glitch_candidates(self,crossing_locations, diff_threshold=10,):
self._n_possible_glitches += len(crossing_locations)
# check local difference is much bigger than the mean difference between points
glitch_count = 0
std_dev = np.std(self.data_to_deglitch)
for location in crossing_locations:
i = location - 1
ii = location + 1
try:
if abs(self.data_to_deglitch[location] - self.data_to_deglitch[ii]) > diff_threshold * std_dev:
# plot glitches to be removed if plotting option is on
if self._plot_each_glitch:
plt.figure(figsize = (15, 4))
plt.plot(self.time_to_deglitch[location - 512:location + 512],
self.data_to_deglitch[location - 512:location + 512], 'k')
plt.ylabel('Time (s)'); plt.title('Glitch '+str(glitch_count+1))
plt.show()
try:
value = self.data_to_deglitch[i] + (self.time_to_deglitch[location] - self.time_to_deglitch[i]) * (
self.data_to_deglitch[ii] - self.data_to_deglitch[i]) / (self.data_to_deglitch[ii] - self.data_to_deglitch[i])
self.data_to_deglitch[location] = value
except IndexError:
pass
glitch_count += 1
except IndexError:
pass
self._glitch_count += glitch_count
def correct_sampling_frequency(self):
'''
Remeber, this is acting on the modified data (bad message and glitch already)
so self.tid_data_time dict
:return:
'''
# this occurs after bad messages, so working with data ditc
# first check that we are not interpolating datapoints for more than 1 second?
for tid in self.read_ids:
max_interp = max(np.diff(self.tid_data_time_dict[tid]['time']))
try:
assert max_interp < 2.0
except:
logging.warning('WARNING: You interpolated for greater than two seconds! ('+ str('{first:.2f}'.format(first = max_interp))+' sec)')
logging.warning('File was '+str(os.path.split(self.filepath)[1])+ ', transmitter id was '+ str(tid))
# do linear interpolation between the points, where !nan
regularised_time = np.linspace(0, 3600.0, num= 3600 * self.tid_to_fs_dict[tid])
not_nan = np.logical_not(np.isnan(self.tid_data_time_dict[tid]['data']))
self.tid_data_time_dict[tid]['data'] = np.interp(regularised_time,
self.tid_data_time_dict[tid]['time'][not_nan],
self.tid_data_time_dict[tid]['data'][not_nan])
self.tid_data_time_dict[tid]['time'] = regularised_time
if self.verbose:
print('Tid '+str(tid)+': regularised fs to '+str(self.tid_to_fs_dict[tid])+' Hz '+str(self.tid_data_time_dict[tid]['data'].shape[0]) +' datapoints')
self._resampled = True
def save(self, save_file_name = None):
"""
Saves file in h5 format. Will only save the tid/tids that have loaded.
Args:
save_file_name:
"""
if not save_file_name:
hdf5_filename = self.filepath.strip('.ndf')+'_Tid_'+''.join(str([tid for tid in self.read_ids]))+ '.h5'
else:
hdf5_filename = save_file_name + '.h5'
with h5py.File(hdf5_filename, 'w') as f:
f.attrs['num_channels'] = len(self.read_ids)
f.attrs['t_ids'] = list(self.read_ids)
f.attrs['fs_dict'] = str(self.tid_to_fs_dict)
file_group = f.create_group(os.path.split(self.filepath)[1][:-4])
for tid in self.read_ids:
transmitter_group = file_group.create_group(str(tid))
transmitter_group.attrs['fs'] = self.tid_to_fs_dict[tid]
transmitter_group.attrs['tid'] = tid
transmitter_group.create_dataset('data',
data=self.tid_data_time_dict[tid]['data'],
compression = "gzip", dtype='f4',
chunks = self.tid_data_time_dict[tid]['data'].shape)
transmitter_group.create_dataset('time',
data=self.tid_data_time_dict[tid]['time'],
compression = "gzip", dtype='f4',
chunks = self.tid_data_time_dict[tid]['time'].shape)
transmitter_group.attrs["resampled"] = self._resampled
f.close()
#print f.attrs['fs_dict']
if self.verbose:
print('Saved data as:'+str(hdf5_filename)+ ' Resampled = ' + str(self._resampled))
def _merge_coarse_and_fine_clocks(self):
# convert timestamps into correct time using clock id
t_clock_data = np.zeros(self.voltage_messages.shape)
t_clock_data[self.transmitter_id_bytes == 0] = 1 # this is big ticks
corse_time_vector = np.cumsum(t_clock_data) * self.clock_tick_cycle
fine_time_vector = self.t_stamps_256 * self.clock_division
self.time_array = fine_time_vector + corse_time_vector
def load(self, read_ids = [],
auto_glitch_removal = True,
auto_resampling = True,
auto_filter = True,
scale_and_filter = False):
'''
N.B. Should run glitch removal before high pass filtering and auto resampling... If unhappy with glitches,
turn off filtering and the resampling and then run their methods etc.
Args:
read_ids: ids to load, can be integer of list of integers
auto_glitch_removal: to automatically detect glitches with default tactic median abs deviation
auto_resampling: to resample fs to regular sampling frequency
auto_filter : high pass filter traces at default 1 hz
scale_and_filter: high pass filter (default 1 hz) and scale to mode std dev 5 second blocks of trace
WARNING: This is more for visualisation of what the feature extractor is working on. TO keep things
simple, when saving HDF5 files, save non-scaled.
Returns:
data and time is stored in self.tid_data_time_dict attribute. Access data via obj[tid]['data'].
'''
self.read_ids = read_ids
logging.info('Loading '+ self.filepath +'read ids are: '+str(self.read_ids))
if read_ids == [] or str(read_ids).lower() == 'all':
self.read_ids = list(self.tid_set)
if not hasattr(self.read_ids, '__iter__'):
self.read_ids = [read_ids]
f = open(self.filepath, 'rb')
f.seek(self.data_address)
# read everything in 8bits, grabs time stamps, then get_file props has already read these ids
self.t_stamps_256 = self._e_bit_reads[3::4]
# read again, but in 16 bit chunks, grab messages
f.seek(self.data_address + 1)
self.voltage_messages = np.fromfile(f, '>u2')[::2]
self._merge_coarse_and_fine_clocks() # this assigns self.time_array
for read_id in self.read_ids:
assert read_id in self.tid_set, "Transmitter %i is not a valid transmitter id" % read_id
self.tid_raw_data_time_dict[read_id]['data'] = self.voltage_messages[self.transmitter_id_bytes == read_id] * self.micro_volt_div
self.tid_raw_data_time_dict[read_id]['time'] = self.time_array[self.transmitter_id_bytes == read_id]
# remove bad messages
self._correct_bad_messages()
if auto_glitch_removal:
self.glitch_removal(tactic='mad')
if auto_resampling:
self.correct_sampling_frequency()
# there should now be no nans surviving here!
if auto_filter and not scale_and_filter:
self.highpass_filter()
if scale_and_filter:
self.highpass_filter()
self.standardise_to_mode_stddev()
def highpass_filter(self, cutoff_hz = 1):
'''
Implements high pass digital butterworth filter, order 2.
Args:
cutoff_hz: default is 1hz
'''
for read_id in self.read_ids:
fs = self.tid_to_fs_dict[read_id]
nyq = 0.5 * fs
cutoff_decimal = cutoff_hz/nyq
logging.debug('Highpassfiltering, tid = '+str(read_id)+' fs: ' + str(fs) + ' at '+ str(cutoff_hz)+ ' Hz')
data = self.tid_data_time_dict[read_id]['data']
data = data - np.mean(data) # remove mean to try and reduce any filtering artifacts
b, a = signal.butter(2, cutoff_decimal, 'highpass', analog=False)
filtered_data = signal.filtfilt(b, a, data)
self.tid_data_time_dict[read_id]['data'] = filtered_data
def standardise_to_mode_stddev(self, stdtw = 5, std_sigfigs = 2):
'''
Calculates mode std dev and divides by it.
Args:
stdtw: time period over which to calculate std deviation
std_sigfigs: n signfigs to round to
'''
for read_id in self.read_ids:
fs = self.tid_to_fs_dict[read_id]
data = self.tid_data_time_dict[read_id]['data']
logging.debug('Standardising to mode std dev, tid = '+str(read_id))
reshaped = np.reshape(data, (int(3600/stdtw), int(stdtw*fs)))
#std_vector = self.round_to_sigfigs(np.std(reshaped, axis = 1), sigfigs=std_sigfigs)
std_vector = np.round(np.std(data, axis = 1), 0)
std_vector = std_vector[std_vector != 0]
if std_vector.shape[0] > 0:
mode_std = stats.mode(std_vector)[0] # can be zero if there is big signal loss
scaled = np.divide(data, mode_std)
self.tid_data_time_dict[read_id]['data'] = scaled
logging.debug(str(mode_std)+' is mode std of trace split into '+ str(stdtw)+ ' second chunks')
elif std_vector.shape[0] == 0:
self.tid_data_time_dict[read_id]['data'] = None
logging.error(' File std is all 0, changed data to be None')
@staticmethod
def round_to_sigfigs(x, sigfigs):
"""
N.B Stolen from stack overflow:
http://stackoverflow.com/questions/18915378/rounding-to-significant-figures-in-numpy
Rounds the value(s) in x to the number of significant figures in sigfigs.
Restrictions:
sigfigs must be an integer type and store a positive value.
x must be a real value or an array like object containing only real values.
"""
#The following constant was computed in maxima 5.35.1 using 64 bigfloat digits of precision
__logBase10of2 = 3.010299956639811952137388947244930267681898814621085413104274611e-1
if not ( type(sigfigs) is int or np.issubdtype(sigfigs, np.integer)):
raise TypeError( "RoundToSigFigs: sigfigs must be an integer." )
if not np.all(np.isreal( x )):
raise TypeError( "RoundToSigFigs: all x must be real." )
if sigfigs <= 0:
raise ValueError( "RoundtoSigFigs: sigfigs must be positive." )
mantissas, binaryExponents = np.frexp(x)
decimalExponents = __logBase10of2 * binaryExponents
intParts = np.floor(decimalExponents)
mantissas *= 10.0**(decimalExponents - intParts)
return np.around(mantissas, decimals=sigfigs - 1 ) * 10.0**intParts
def _correct_bad_messages(self):
'''
- okay so we have 128hz as the clock...
- fs / clock_rate is the n_messages between clocks
- 256 / n_messages is the thing we are diving to get the residuals
'''
for tid in self.read_ids:
transmitter_timestamps = self.t_stamps_256[self.transmitter_id_bytes == tid]
fs = self.tid_to_fs_dict[tid]
n_messages = fs/128 # 128 is clock
expected_interval = 256/n_messages # 256 is bits (if 512hz fs this is 64)
timestamp_moduli = transmitter_timestamps % expected_interval
# now get params for reshaping...
n_rows = int(fs*4)
#n_rows = 2000
n_fullcols = int(timestamp_moduli.size//n_rows)
n_extra_stamps = timestamp_moduli.shape[0] - (n_rows*n_fullcols)
end_moduli = timestamp_moduli[-n_extra_stamps:]
if n_extra_stamps:
reshaped_moduli = np.reshape(timestamp_moduli[:-n_extra_stamps], (n_rows, n_fullcols), order = 'F')
# order F reshaped in a "fortran manner, first axis changing fastest", calculating down the columns here
end_mean= ss.circmean(end_moduli, high = expected_interval)
end_moduli_corrected = (end_moduli - end_mean)
mean_vector = ss.circmean(reshaped_moduli, high=expected_interval, axis=0)
moduli_array_corrected = (reshaped_moduli - mean_vector)
drift_corrected_timestamp_moduli = np.concatenate([np.ravel(moduli_array_corrected, order = 'F'), end_moduli_corrected])
elif n_extra_stamps == 0: # can be reshaped exactly
reshaped_moduli = np.reshape(timestamp_moduli, (n_rows, n_fullcols), order = 'F')
mean_vector = ss.circmean(reshaped_moduli, high=expected_interval, axis=0)
moduli_array_corrected = (reshaped_moduli - mean_vector)
drift_corrected_timestamp_moduli = np.ravel(moduli_array_corrected, order = 'F')
drift_corrected_timestamp_moduli = np.absolute(drift_corrected_timestamp_moduli)
self.drift_corrected_timestamp_moduli = drift_corrected_timestamp_moduli
bad_message_locs = np.where(np.logical_and(drift_corrected_timestamp_moduli > 9,
drift_corrected_timestamp_moduli < (expected_interval-9)))[0]
self.tid_data_time_dict[tid]['data'] = np.delete(self.tid_raw_data_time_dict[tid]['data'], bad_message_locs)
self.tid_data_time_dict[tid]['time'] = np.delete(self.tid_raw_data_time_dict[tid]['time'], bad_message_locs)
logging.debug('Tid ' +str(tid)+ ': Detected '+ str(len(bad_message_locs)) + ' bad messages out of '+ str(self.tid_raw_data_time_dict[tid]['data'].shape[0])
+ ' Remaining : '+str(self.tid_data_time_dict[tid]['data'].shape[0]))
if len(bad_message_locs) > 0.5*self.tid_raw_data_time_dict[tid]['data'].shape[0]:
logging.error(' >half messages detected as bad messages. Probably change fs from auto to the correct frequency')
if self.verbose:
print ('Tid ' +str(tid)+ ': Detected '+ str(len(bad_message_locs)) + ' bad messages out of '+ str(self.tid_raw_data_time_dict[tid]['data'].shape[0])
+ ' Remaining : '+str(self.tid_data_time_dict[tid]['data'].shape[0]))
#if len(bad_message_locs) > 0.5*self.tid_raw_data_time_dict[tid]['data'].shape[0]:
# print('WARNING: >half messages detected as bad messages. Probably change fs from auto to the correct frequency')
```
|
{
"source": "jcorredorc/udemy_ros_beginners",
"score": 3
}
|
#### File: src/scan_laser_example/scan_subscriber.py
```python
import rospy
from sensor_msgs.msg import LaserScan
import math
def scan_callback(scan_data):
#Find minimum range
min_value, min_index = min_range_index(scan_data.ranges)
print("\nthe minimum range value is: ", min_value)
print("the minimum range index is: ", min_index)
max_value, max_index = max_range_index(scan_data.ranges)
print("\nthe maximum range value is: ", max_value)
print("the maximum range index is: ", max_index)
average_value = average_range (scan_data.ranges)
print("\nthe average range value is: ", average_value)
average2 = average_between_indices(scan_data.ranges, 2, 7)
print("\nthe average between 2 indices is: ", average2)
print("the field of view: ", field_of_view(scan_data))
def field_of_view(scan_data):
return (scan_data.angle_max-scan_data.angle_min)*180.0/3.14
#find the max range and its index
def min_range_index(ranges):
ranges = [x for x in ranges if not math.isnan(x)]
return (min(ranges), ranges.index(min(ranges)) )
#find the max range
def max_range_index(ranges):
ranges = [x for x in ranges if not math.isnan(x)]
return (max(ranges), ranges.index(max(ranges)) )
#find the average range
def average_range(ranges):
ranges = [x for x in ranges if not math.isnan(x)]
return ( sum(ranges) / float(len(ranges)) )
def average_between_indices(ranges, i, j):
ranges = [x for x in ranges if not math.isnan(x)]
slice_of_array = ranges[i: j+1]
return ( sum(slice_of_array) / float(len(slice_of_array)) )
if __name__ == '__main__':
#init new a node and give it a name
rospy.init_node('scan_node', anonymous=True)
#subscribe to the topic /scan.
rospy.Subscriber("scan", LaserScan, scan_callback)
# spin() simply keeps python from exiting until this node is stopped
rospy.spin()
```
|
{
"source": "jcortejoso/cloud-builders-community",
"score": 3
}
|
#### File: examples/tests/dag_integrity_test.py
```python
import unittest
from airflow.models import DagBag
class TestDags(unittest.TestCase):
LOAD_THRESHOLD_SECONDS = 2
def setUp(self):
self.dagbag = DagBag()
def test_dags_syntax(self):
for key in self.dagbag.dags.keys():
print(key)
self.assertFalse(
len(self.dagbag.import_errors),
f"DAG import errors. Errors: {self.dagbag.import_errors}"
)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestDags)
unittest.TextTestRunner(verbosity=2).run(suite)
```
|
{
"source": "jcostacurta11/panda-env",
"score": 2
}
|
#### File: jcostacurta11/panda-env/objects.py
```python
import os
import pybullet as p
import numpy as np
class Object(object):
def __init__(self):
self.body_id = None
self.loaded = False
def load(self):
if self.loaded:
return self.body_id
self.body_id = self._load()
self.loaded = True
return self.body_id
def get_position(self):
pos, _ = p.getBasePositionAndOrientation(self.body_id)
return pos
def get_orientation(self):
_, orn = p.getBasePositionAndOrientation(self.body_id)
return orn
def set_position(self, pos):
_, old_orn = p.getBasePositionAndOrientation(self.body_id)
p.resetBasePositionAndOrientation(self.body_id, pos, old_orn)
def set_orientation(self, orn):
old_pos, _ = p.getBasePositionAndOrientation(self.body_id)
p.resetBasePositionAndOrientation(self.body_id, old_pos, orn)
def set_position_orientation(self, pos, orn):
p.resetBasePositionAndOrientation(self.body_id, pos, orn)
class YCBObject(Object):
def __init__(self, name, scale=1):
super(YCBObject, self).__init__()
self.visual_filename = os.path.join('assets', 'ycb', name,
'textured_simple.obj')
self.collision_filename = os.path.join('assets', 'ycb', name,
'textured_simple_vhacd.obj')
self.scale = scale
def _load(self):
collision_id = p.createCollisionShape(p.GEOM_MESH,
fileName=self.collision_filename,
meshScale=self.scale)
visual_id = p.createVisualShape(p.GEOM_MESH,
fileName=self.visual_filename,
meshScale=self.scale)
body_id = p.createMultiBody(baseCollisionShapeIndex=collision_id,
baseVisualShapeIndex=visual_id,
basePosition=[0.2, 0.2, 0.1],
baseMass=0.1)
return body_id
class InteractiveObj(Object):
def __init__(self, filename, scale=1):
super(InteractiveObj, self).__init__()
self.filename = filename
self.scale = scale
def _load(self):
body_id = p.loadURDF(self.filename, globalScaling=self.scale,
flags=p.URDF_USE_MATERIAL_COLORS_FROM_MTL)
self.mass = p.getDynamicsInfo(body_id, -1)[0]
return body_id
class RBOObject(InteractiveObj):
def __init__(self, name, scale=1):
filename = os.path.join('assets', 'rbo', name, 'configuration',
'{}.urdf'.format(name))
super(RBOObject, self).__init__(filename, scale)
```
#### File: jcostacurta11/panda-env/traj_mix_mc.py
```python
import numpy as np
from scipy.optimize import minimize
import matplotlib.pyplot as plt
from copy import deepcopy
num_agents = 1
theta = np.array([0, 6, 6, 8]) #TODO: ask mengxi about these variables
num_correction = 4
T = 5
gamma = 0. # human effort
beta = 1. # procedure trajctory cost
num_samples = 1000
seq_decay = 0.9
seq_smooth = False
#distance function for two points
def dist(u, v):
return np.sqrt(np.sum((u-v) * (u-v)))
#distance function for point and segment
# def dist_point_segment(u, v, p):
# l2 = np.sum((u - v) * (u - v))
# if (l2 == 0.0):
# return dist(p, v)
# t = max(0, min(1, np.sum((p-v) * (u-v)) / l2))
# v_proj = v + t * (u-v)
# return dist(v_proj, p)
class TrajEnv(object):
"""docstring for task_env"""
def __init__(self, init_obs, delta_t, num_agents=1):
# init_obs for each agent is 4d array [[x0, y0, vx0, vy0],[x1, y1, vx1, vy1]...]
super(TrajEnv, self).__init__() #inherit all properties from input object
self.init_obs = init_obs
self.delta_t = delta_t
self.num_agents = num_agents
def _v(self, actions):
# actions is n*2*T dimensional array [ax^0_0, ax^0_1, ..., ax^0_T, ay^0_0, ay^0_1, ..., a^0y_T, ax^1_0, ax^1_1, ..., ]
# returned velocity is n*2*(T+1) dimensional array [vx^0_0,vx^0_1, ..., vx^0_{T+1}, vy^0_0, vy^0_1, ..., vy^0_{T+1},vx^1_0,v^0_1, ...]
if len(actions.shape) > 1:
actions = self.flat_action(actions) #working with flat action vector
act_len = len(actions) // (2 * self.num_agents) #account for number of agents and x,y actions
vx = []
vy = []
for i in range(self.num_agents): #complete for each agent
ax, ay = actions[i * 2 * act_len: (i * 2 + 1) * act_len], actions[(
i * 2 + 1) * act_len:(i * 2 + 2) * act_len] #picking out which actions apply to the agent (won't need to do this exactly)
tmp_vx = ax
tmp_vy = ay
# tmp_vx = np.hstack([self.init_obs[i][2], np.cumsum(
# ax) * self.delta_t + self.init_obs[i][2]]) #TODO: I think this is getting the resulting velocities from the actions? ask mengxi what the actions are
# tmp_vy = np.hstack([self.init_obs[i][3], np.cumsum(
# ay) * self.delta_t + self.init_obs[i][3]])
vx.append(tmp_vx)
vy.append(tmp_vy)
vx = np.array(vx)
vy = np.array(vy)
return np.stack([vx, vy]).transpose(1, 0, 2) #reshape: look into this later
def _pos(self, actions):
# actions is n*2*T dimensional array [ax^0_0, ax^0_1, ..., ax^0_T, ay^0_0, ay^0_1, ..., a^0y_T, ax^1_0, ax^1_1, ..., ]
# returned position is n*2*(T+1) dimensional array [x^0_0, x^0_1, ..., x^0_{T+1}, y^0_0, y^0_1, ..., y^0_{T+1}, x^1_0, x^1_1, ...,]
if len(actions.shape) > 1:
actions = self.flat_action(actions)
act_len = len(actions) // (2 * self.num_agents)
x = []
y = []
v = self._v(actions)
for i in range(self.num_agents):
ax, ay = actions[i * 2 * act_len: (i * 2 + 1) * act_len], actions[(
i * 2 + 1) * act_len:(i * 2 + 2) * act_len]
#TODO: make vector of all x positions (is this over time? ask mengxi)
tmp_x = np.hstack([self.init_obs[i][0], self.init_obs[i][0] + np.cumsum(
ax) * self.delta_t ])
tmp_y = np.hstack([self.init_obs[i][1], self.init_obs[i][1] + np.cumsum(
ay) * self.delta_t ])
x.append(tmp_x)
y.append(tmp_y)
x = np.array(x)
y = np.array(y)
return np.stack([x, y]).transpose(1, 0, 2)
def flat_action(self, action):
assert len(action.shape) == 3
assert action.shape[1] == 2
# convert from shape [num_agent, 2, T] to flatten
return action.reshape([-1])
def unflat_action(self, action, shape):
assert shape[1] == 2
return action.reshape([shape[0], shape[1], shape[2]])
def vis_traj(self, actions, task_obj, fig=None, ax=None): #plotter (would be nice to adapt this but not priority)
pos = self._pos(actions)
x = pos[:, 0, :]
y = pos[:, 1, :]
if ax is None:
fig, ax = plt.subplots()
# for i in range(task_obj.num_obstacles):
# ax.add_artist(plt.Circle(
# (task_obj.obs_x[i], task_obj.obs_y[i]), task_obj.obs_r_min[i], color='#000033', alpha=0.5))
for i in range(task_obj.num_agents):
ax.plot(x[i], y[i], marker="o", color=task_obj.colors[i])
ax.set_aspect('equal', adjustable='datalim')
return fig, ax
class Navigation:#the first thing defined in mengxi's main loop, takes in theta
def __init__(self, theta):
self.num_agents = num_agents
#self.num_obstacles = 1
# limit
self.u_max = 5.0
self.v_max = 5.0
# setting initial position and velocities for up to 3 agents
self.mass = np.array([1, 1, 1])
self.x_init = np.array([0, -1, 1])
self.y_init = np.array([2, 0, 0])
self.vx_init = np.array([0, 0, 0])
self.vy_init = np.array([0, 0, 0])
self.colors = ['b', 'g', 'y']
#TODO: what are these?? ask mengxi
self.x_formation = self.x_init[:self.num_agents] - \
self.x_init[:self.num_agents].mean()
self.y_formation = self.y_init[:self.num_agents] - \
self.y_init[:self.num_agents].mean()
#setting goal end states
self.x_end = np.array([0, -1, 1])
self.y_end = np.array([12, 10, 10])
self.vx_end = np.array([0, 0, 0])
self.vy_end = np.array([0, 0, 0])
#obstacle placement and radius
# self.obs_x = np.array([0.5])
# self.obs_y = np.array([5])
# # self.obs_r_max = np.array([2.5])
# self.obs_r_min = np.array([2])
self.theta = theta
class OptimizeMultiTraj(object):
# T: time horizon of action for a single agent
def __init__(self, task_env, task_obj, T, gamma=1., beta=1., seed=0, seq_decay=0, seq_smooth=False):
self.actions = np.zeros(2*T*task_obj.num_agents) #looks like we'll be creating the actions in this class
self.task_env = task_env
self.num_agents = task_env.num_agents
self.task_obj = task_obj
self.T = T
self.gamma = gamma
self.beta = beta
self.seed(seed)
self.epsilon = 1e-6
self.seq_decay = seq_decay
self.seq_smooth = seq_smooth
self.constraints = [{'type': 'ineq', 'fun': self.a_con}, #TODO: look into this syntax, I think it's needed for the optimize command
{'type': 'ineq', 'fun': self.v_con}]
# {'type': 'ineq', 'fun': self.goal_con_ineq}]
self.options = {'maxiter': 150000, 'disp': True}
def seed(self, n):
self.np_random = np.random.RandomState(n) #start at random set of actions
def init_actions(self):
self.actions = np.zeros_like(self.actions) #clear actions to all zero
theta = deepcopy(self.task_obj.theta) #TODO: what is deepcopy? look into this
self.task_obj.theta[:-2] = 0
self.task_obj.theta[-2] = 1
self.task_obj.theta[-1] = 2 #resetting some theta values
#TODO: what is theta?
res = minimize(self.objective, self.actions,
constraints=self.constraints, options=self.options)
self.actions = res.x + self.np_random.randn(len(res.x))
self.task_obj.theta = theta #putting theta values back?
def get_traj_cost(self, x, y, task_obj):
center_x = np.zeros_like(x[0])
center_y = np.zeros_like(y[0])
for i in range(task_obj.num_agents):
center_x = center_x + x[i]
center_y = center_y + y[i]
#average position of agents?
center_x = center_x / task_obj.num_agents
center_y = center_y / task_obj.num_agents
#does this cost have to do with the number of agents?
cost_formation = np.zeros_like(center_x)
for i in range(task_obj.num_agents):
cost_formation += np.square(x[i] - center_x - task_obj.x_formation[i]) + \
np.square(y[i] - center_y - task_obj.y_formation[i])
cost_formation = np.mean(cost_formation)
# cost_formation = 0
#costs for colliding with obstacle (won't need this)
cost_collision = 0
# for i in range(task_obj.num_agents):
# for j in range(task_obj.num_obstacles):
# distance = np.sqrt(
# np.square(x[i] - task_obj.obs_x[j]) + np.square(y[i] - task_obj.obs_y[j]))
# min_ind = np.argmin(distance)
# min_dist = distance[min_ind]
# p = np.array([task_obj.obs_x[j], task_obj.obs_y[j]])
# if min_ind > 0:
# u = np.array([x[i][min_ind-1], y[i][min_ind-1]])
# v = np.array([x[i][min_ind], y[i][min_ind]])
# new_dist = dist_point_segment(u=u, v=v, p=p)
# if new_dist < min_dist:
# min_dist = new_dist
# if min_ind + 1 < len(x[i]):
# u = np.array([x[i][min_ind+1], y[i][min_ind+1]])
# v = np.array([x[i][min_ind], y[i][min_ind]])
# new_dist = dist_point_segment(u=u, v=v, p=p)
# if new_dist < min_dist:
# min_dist = new_dist
# raw_cost = -np.minimum(0, min_dist - task_obj.obs_r_min[j])
# cost_collision += raw_cost
#cost based on how long the path is (need this!)
cost_length = 0
for i in range(task_obj.num_agents):
dx = x[i][:-1] - x[i][1:]
dy = y[i][:-1] - y[i][1:]
tmp_l = np.sum(np.sqrt(dx * dx + dy * dy))
cost_length += tmp_l
#cost based on how far you are from the goal
cost_goal = 0
for i in range(task_obj.num_agents):
pos = np.array([x[i][-1], y[i][-1]])
goal = np.array([task_obj.x_end[i], task_obj.y_end[i]])
cost_goal += dist(pos, goal)
#weight all the costs together
cost = cost_formation * \
task_obj.theta[0] + cost_collision * \
task_obj.theta[1] + cost_length * \
task_obj.theta[2] + cost_goal * task_obj.theta[3]
return cost
def objective(self, actions, task_obj=None):
# actions is all decision variables, (n*T*2, ) array
#gives the cost
if len(actions.shape) > 1:
actions = self.task_env.flat_action(actions)
if task_obj is None:
task_obj = self.task_obj
pos = self.task_env._pos(actions)
x = pos[:, 0, :]
y = pos[:, 1, :]
return self.get_traj_cost(x, y, task_obj)
#these two functions are used somehow in the constraints
def a_con(self, actions, epsilon=0):
return self.task_obj.u_max + epsilon - np.abs(actions)
def v_con(self, actions, epsilon=0):
v = self.task_env._v(actions)
return self.task_obj.v_max + epsilon - np.abs(v.reshape([-1]))
def optimize(self):
self.init_actions()
res = minimize(self.objective, self.actions,
constraints=self.constraints, options=self.options)
self.actions = res.x
self.actions = self.task_env.unflat_action(
self.actions, [self.num_agents, 2, self.T])
return self.actions, self.objective(res.x)
if __name__ == "__main__":
task_obj = Navigation(theta=theta)
init_obs = [[task_obj.x_init[i], task_obj.y_init[i],
task_obj.vx_init[i], task_obj.vy_init[i]] for i in range(num_agents)]
task_env = TrajEnv(init_obs=init_obs, delta_t=1, num_agents=num_agents)
optimizer = OptimizeMultiTraj(
task_env=task_env, task_obj=task_obj, T=T, gamma=gamma, beta=beta, seq_decay=seq_decay, seq_smooth=seq_smooth)
actions, _ = optimizer.optimize()
print(task_env._pos(actions))
print(task_env._v(actions))
fig_0, ax_0 = task_env.vis_traj(actions, task_obj)
plt.show()
```
|
{
"source": "jcostaroberts/mouseloaf",
"score": 3
}
|
#### File: mouseloaf/mouseloaf/actor.py
```python
class Actor(object):
def __init__(self, coordinator):
#self.name = "%s-%s" % \
# (self.__class__.__bases__[0].__name__.replace("Base", ""),
# self.__class__.__name__)
self.name = self.__class__.__name__
self.coordinator = coordinator
""" API exposed to subclasses """
def _subscribe(self, publisher, handler):
self.coordinator.subscribe(self.name, publisher, handler)
def _publish(self, message):
self.coordinator.publish(self.name, message)
def _register_activity(self, activity_name, activity):
self.coordinator.register_activity(activity_name, self.name, activity)
def _register_auxiliary_data(self, name, data):
self.coordinator.register_auxiliary_data(name, data)
def _mount_auxiliary_data(self, name, ready_callback):
return self.coordinator.mount_auxiliary_data(self.name, name,
ready_callback)
```
#### File: plugin/feed/EverythingIsFree.py
```python
from datetime import datetime
from feed import FeedBase
class EverythingIsFree(FeedBase):
def feed_init(self):
self.symbols = []
self._get_quote_on_timer(self.name, self._get_quote, 1)
def _get_quote(self):
for s in self.symbols:
self._publish_data(s, ask_price=0.01)
def handle_symbol_request(self, symbol):
self.symbols.append(symbol)
print "[%s] Feed %s received request for ticker %s" % \
(datetime.now(), self.name, symbol)
```
|
{
"source": "jcostateixeira/fhir-roadmap",
"score": 3
}
|
#### File: jcostateixeira/fhir-roadmap/extract_metadata2.py
```python
import os, json
import pandas as pd
import glob
from csv_diff import load_csv, compare
import datetime
EXCLUSION_LIST=['package-list.json',".index.json",'package.json',"validation-summary","example"]
def create_current_df(path):
"""
reads a csv in the specified folder, if it does not exists returns None
"""
if os.path.exists(path):
df = pd.read_csv(path,sep=";", header=0)
return df
else:
return None
def get_target_id(row,resources_df):
for idx,r in resources_df.iterrows():
# print(idx,r)
# print(row["target_url"])
if row["target_url"]==r["url"]:
return r["id"]
return None
def extract_relation(res,resource_type):
"""
this function takes a unique resource and create the entries for relation.csv
Logic:
Profile:
Bound (Req) = element.binding[strength = required].valueset
Bound (Ext) = element.binding[strength = extensible].valueset
Bound (Pref) = element.binding[strength = preferred].valueset
Bound (Exam) = element.binding[strength = example].valueset
Extension = element.type[code = extension].profile
ValueSet:
valuesFrom = compose.include.system
valuesFrom = expansion.contains.system
includes = compose.include.valueSet
"""
dict_relat=[]
relation_type_data={"required":"Bound_Req","extensible":"Bound_Ext","preferred":"Bound_Pref","example":"Bound_Exam"}
# if res.get("id")=="be-ext-laterality":
# print(resource_type,res.get("id"))
if resource_type in ["Profile","Data type"]:
elements=res.get('snapshot', {}).get('element',[] )
for element in elements:
binding=element.get("binding",{}).get("strength")
value=element.get("binding",{}).get("valueSet")
if binding:
# print(value)
stripped = value.split("|", 1)[0] #remove pipes
# if res.get("id")=="be-allergyintolerance":
# print(stripped)
#print(resource_type,"binding -> ",binding,value)
dict_relat.append({"source":res.get("id"),"target_url":stripped,"relation":relation_type_data[binding]})
for l in element.get("type",[]):
if l.get("code",{})=="Extension":
#pass
if l.get("profile"):
dict_relat.append({"source":res.get("id"),"target_url":l.get("profile")[0],"relation":"extension"})
for target_profile in l.get("targetProfile",[]):
dict_relat.append({"source":res.get("id"),"target_url":target_profile,"relation":"references"})
# print()
elements=res.get('differential', {}).get('element', [])
for element in elements:
binding=element.get("binding",{}).get("strength")
value=element.get("binding",{}).get("valueSet")
if binding:
# print(res.get("id"),value)
# print(value,res.get("id"))
stripped = value.split("|", 1)[0] #remove pipes
#print(resource_type,"binding -> ",binding,value)
dict_relat.append({"source":res.get("id"),"target_url":stripped,"relation":relation_type_data[binding]})
for l in element.get("type",[]):
if l.get("code",{})=="Extension":
#pass
if l.get("profile"):
# print(l.get("profile")[0],res.get("id"))
dict_relat.append({"source":res.get("id"),"target_url":l.get("profile")[0],"relation":"extension"})
for target_profile in l.get("targetProfile",[]):
dict_relat.append({"source":res.get("id"),"target_url":target_profile,"relation":"references"})
# print()
elif resource_type=="ValueSet":
for s in res.get("compose",{}).get("include",[]):
#print(s)
if s.get("system"):
dict_relat.append({"source":res.get("id"),"target_url":s.get("system"),"relation":"valuesFrom"})
if s.get("valueSet"):
# print(s.get("valueSet"))
dict_relat.append({"source":res.get("id"),"target_url":s.get("valueSet")[0],"relation":"includes"})
#print(res.get("expansion",{}).get("contains",[]))
return dict_relat
def read_package(folder):
"""
reads a folder and every json inside it and subsequent folders.
From them, the keys are extracted and a df is built
"""
new_files=[]
# r=root, d=directories, f = files
for r, d, f in os.walk(folder):
for file in f:
if file.endswith(".json"):
new_files.append(os.path.join(r, file))
result=[]
relations=[]
record_upper={}
for index, js in enumerate(new_files):
if (js == 'packages/package.json'):
with open(js, encoding='utf-8') as json_file:
json_text = json.load(json_file)
# print(json_text)
date = '1900' # set a old date to initialize variable and then overwrite as needed
if('date' in json_text):
record_upper["pack_last_review_date"] = json_text['date']
if('author' in json_text):
record_upper["pack_author"] = json_text['author']
if('fhirVersion' in json_text) and (len(json_text['fhirVersion']) == 1) :
record_upper["pack_fhir_version"] = json_text['fhirVersion']
if('maintainers' in json_text):
for m in json_text['maintainers']:
if ('url' in m):
record_upper["pack_wg_url"] = m['url']
#print(record_upper)
for index, js in enumerate(new_files):
# print(js)
if not any(ext in js for ext in EXCLUSION_LIST): # for all other jsons:
with open(js, encoding='utf-8') as json_file:
record=record_upper.copy()
json_text = json.load(json_file)
# get the rtype (resource type) and dtype (actual detailed type)
rtype = json_text['resourceType']
record["id"]= json_text.get('id')
if (rtype=="StructureDefinition"):
if (json_text['kind']=='logical'): # in this case, this is a logical model
record["type"]="Logical Model"
if (json_text['type'].lower()=='extension'): # in this case, it's an extension
record["type"]="Extension"
if (json_text['kind']=='resource'): # in this case, it's a profile
record["type"]="Profile"
if (json_text['kind']=='complex-type') and (json_text['type'].lower()!='extension'): # in this case, it's a data type
record["type"]="Data type"
else:
record["type"]=rtype # for other resources, the resource type is the detailed ty
if (rtype=="NamingSystem"):
if ("uniqueId" in json_text) :
uris = [x for x in json_text["uniqueId"] if (x["type"] == "uri" )]
record["url"] = [x for x in uris if x["preferred"] == True][0]["value"]
else:
record["url"] = json_text.get('url')
# record["type"] = record["dtype"]
# record.pop("dtype")
# check if the paths are correct
record["name"] = json_text.get('name')
record["version"] = json_text.get('version')
record["date"] = json_text.get('date')
record["topic"] = json_text.get('topic')
record["subtopic"] = json_text.get('subtopic')
record["owner"] = json_text.get('owner')
record["maturity"] = json_text.get('maturity')
record["status"] = json_text.get('status')
record["pack_wg_url"] = json_text.get('pack_wg_url')
record["pack_author"] = json_text.get('pack_author')
record["pack_last_review_date"] = json_text.get('pack_last_review_date')
# record["relation"] = json_text.get('relation')
# record["relation_type"] = json_text.get('relation_type')
record["legal"] = json_text.get('legal')
relations.extend(extract_relation(json_text,record["type"])) #adds entries to relation list
result.append(record)
# print(result)
# print(relations)
#relation_unique = {x['source']:x for x in relations}.values() #dont quite know why so much duplicates
#df_relation=pd.DataFrame(relation_unique)#.drop_duplicates()
# try:
df_relation=pd.DataFrame(relations).drop_duplicates()
# except:
# pd.DataFrame(relations).to_csv("erro.csv")
# break
# print(df_relation)
# we cannot assume csv exists when creating, so after each package folder we search for it in the elements
df_relation["target_id"]=df_relation.apply(get_target_id,resources_df=pd.DataFrame(result),axis=1)
# print(df_relation.head(10))
return pd.DataFrame(result),df_relation
# CSV structure:
# idx (autoincrement index)
# Topic
# Subtopic
# Type
# Name
# Status
# URL
# FhirVersion
# Owner
# Date proposed
# Published date
# last revision date
# Maturity
# Legal Status
# Version.
def update_resource_csv(old,new):
#all columns here are overlaped by procedure, for other sutff, remove or change
list_of_changes={"updated":[],"created":[],"other":[]}
for idx,row in new.iterrows(): #for every row in new df
#print(row["url"])
if row["url"] in old["url"].values: #if url in new df is in old df
#update values
list_of_changes["updated"].append(row["url"])
old.loc[old["url"]==row["url"],"date"]=row.get("date")
old.loc[old["url"]==row["url"],"version"]=row.get("version")
old.loc[old["url"]==row["url"],"status"]=row.get("status")
old.loc[old["url"]==row["url"],"type"]=row.get("type")
old.loc[old["url"]==row["url"],"id"]=row.get("id")
old.loc[old["url"]==row["url"],"pack_wg_url"]=row.get("pack_wg_url")
old.loc[old["url"]==row["url"],"pack_author"]=row.get("pack_author")
old.loc[old["url"]==row["url"],"pack_last_review_date"]=row.get("pack_last_review_date")
old.loc[old["url"]==row["url"],"date_started"]=row.get("date_started")
old.loc[old["url"]==row["url"],"date_published"]=row.get("date_published")
old.loc[old["url"]==row["url"],"date_reviewed"]=row.get("date_reviewed")
old.loc[old["url"]==row["url"],"maturity"]=row.get("maturity")
old.loc[old["url"]==row["url"],"legal"]=row.get("legal")
elif row["url"] is not None: #if does not exist, add to df (must have url)
#print(row)
list_of_changes["created"].append(row["url"])
old=old.append(row,ignore_index=True)
else:
list_of_changes["other"].append("something weird on row "+str(idx))
#save the old again
old.to_csv("resources.csv",sep=";",index=False)
#return track changes
return list_of_changes
def update_relation_csv(old,new):
#print(new.head(10))
# print(old,new)
###right now it overlaps manual actions
list_of_changes={"updated":[],"created":[],"other":[]}
#make primary key on both df:
old["pk"]=old["source"]+old["target_url"]
#try:
new["pk"]=new["source"]+new["target_url"]
# except Exception as err:
# print("eerorrrr------------",err)
# print(new)
# new.to_csv("errro.csv")
for idx,row in new.iterrows(): #for every row in new df
#print(row["url"])
if row["pk"] in old["pk"].values: #if primary key exists (source+target)
#update values
list_of_changes["updated"].append(row["source"])
old.loc[old["pk"]==row["pk"],"target_id"]=row.get("target_id")
old.loc[old["pk"]==row["pk"],"relation"]=row.get("relation")
old.loc[old["pk"]==row["pk"],"target_url"]=row.get("target_url")
elif row["source"] is not None: #if does not exist, add to df (must have url)
#print(row)
list_of_changes["created"].append(row["source"])
old=old.append(row,ignore_index=True)
else:
list_of_changes["other"].append("something weird on row "+str(idx))
#save the old again
old.drop(columns=["pk"],inplace=True)
old.to_csv("relation.csv",sep=";",index=False)
#return track changes
return list_of_changes
def create_csv_and_update(current_resource,current_relation,package_folder):
## Done. Now the logic:
## 1. If there is a URL and that URL already exists in the csv, replace the values in the CSV
## 2. If there is no CSV, then create a new entry with the values.
## Also update the data table:
# the URL for the resource is a hyperlink on the resource name - opens a new window
# the URL for the maintainer is a hyperlink on the owner - opens a new window
# url is the
# print(current_resource)
outcome={"resource_status":"error","resource_outcome":"N/A","relation_outcome":"N/A","relation_status":"error"}
resource_df,relation_df=read_package(package_folder)
#print(relation_df)
# print(n_relation)
# n_df.to_csv("new_.csv",sep=";",index=False)
if type(current_resource)==pd.DataFrame and len(resource_df)>0:
print("has a resource csv which was updated")
changes=update_resource_csv(current_resource,resource_df)
#return changes
outcome["resource_status"]="changed"
outcome["resource_outcome"]=changes
elif type(current_resource)!=pd.DataFrame and len(resource_df)>0:
print("no resource csv and new written")
resource_df.to_csv("resources.csv",sep=";",index=False)
#return None
outcome["resource_status"]="new"
else:
print("no resource csv and not able to create new")
if type(current_relation)==pd.DataFrame and len(relation_df)>0:
print("has a relation csv which was updated")
changes=update_relation_csv(current_relation,relation_df)
#return changes
outcome["relation_status"]="changed"
outcome["relation_outcome"]=changes
elif type(current_relation)!=pd.DataFrame and len(relation_df)>0:
print("no relation csv and new written")
relation_df.to_csv("relation.csv",sep=";",index=False)
#return None
outcome["relation_status"]="new"
else:
print("no relation csv and not able to create new")
return outcome
def getPackageFolders(path):
directoryList = []
#return nothing if path is a file
if os.path.isfile(path):
return []
#add dir to directorylist if it contains package.json files
if len([f for f in os.listdir(path) if (f == 'package.json')])>0:
with open(path+"/package.json") as packge_file:
pkg_json=json.load(packge_file)
# print(pkg_json)
package_date=pkg_json.get("date") #not all have date
if not package_date:
raise ValueError("Package without date: "+path)
# package_date="19900801000000" just for testing
directoryList.append((path,datetime.datetime.strptime(package_date, '%Y%m%d%H%M%S')))
# here, check the package.json and populate the list with the directory and the date in the json
for d in os.listdir(path):
new_path = os.path.join(path, d)
if os.path.isdir(new_path):
directoryList.extend(getPackageFolders(new_path))
# now, sort the directotyList by the date
# print(directoryList)
#
return sorted(directoryList, key=lambda tup:(tup[1], tup[0]))
def main(package_folder):
folders = getPackageFolders(package_folder)
print("Folders---",folders)
current_df=create_current_df("resources.csv")
current_relation=create_current_df("relation.csv")
if type(current_df)==pd.DataFrame:
current_df.to_csv("resources_backup.csv",sep=";",index=False)
if type(current_relation)==pd.DataFrame:
current_relation.to_csv("relation_backup.csv",sep=";",index=False)
for pack in folders:
current_df=create_current_df("resources.csv")#redo for newly created data in loop
current_relation=create_current_df("relation.csv")#redo for newly created data in loop
create_csv_and_update(current_df,current_relation,pack[0])
if os.path.exists("resources_backup.csv"):
diff = compare(
load_csv(open("resources_backup.csv"), key="url"),
load_csv(open("resources.csv"), key="url")
)
print(diff)
diff_df=pd.DataFrame.from_dict(diff,orient='index').T
diff_df["timestamp"]=datetime.datetime.now().strftime("%Y%m%d%H")
#check if diff exists:
if not os.path.exists("diff.csv"):
diff_df.to_csv("diff.csv",sep=";",index=False)
else:
current_diff=create_current_df("diff.csv")
current_diff.append(diff_df,ignore_index=True)
current_diff.to_csv("diff.csv",sep=";",index=False)
return "ok"
main("packages")
#print(dict_relat)
```
#### File: jcostateixeira/fhir-roadmap/extract_metadata.py
```python
import os, json
import pandas as pd
import glob
import datetime
def create_current_df(path):
if os.path.exists(path):
df = pd.read_csv(path, sep =';', header=0).to_dict(orient="records")
return df
else:
return None
def update_record(record, key, value):
# if key in record:
if (value != ''):
record[key] = value
# else:
# record[key] = json_text.get(key)
# CSV structure:
# idx (autoincrement index)
# Topic
# Subtopic
# Type
# Name
# Status
# URL
# FhirVersion
# Owner
# Date proposed
# Published date
# last revision date
# Maturity
# Legal Status
# Version.
def read_package(folder):
# 2. read json files from packages - each package is a folder under packages folder
new_files=[]
# r=root, d=directories, f = files
for r, d, f in os.walk(folder):
for file in f:
if file.endswith(".json"):
new_files.append(os.path.join(r, file))
result=[]
record_upper={}
for index, js in enumerate(new_files):
if (js == 'packages/package.json'):
with open(js, encoding='utf-8') as json_file:
json_text = json.load(json_file)
# print(json_text)
date = '1900' # set a old date to initialize variable and then overwrite as needed
if('date' in json_text):
record_upper["pack_last_review_date"] = json_text['date']
if('author' in json_text):
record_upper["pack_author"] = json_text['author']
if('fhirVersion' in json_text) and (len(json_text['fhirVersion']) == 1) :
record_upper["pack_fhir_version"] = json_text['fhirVersion']
if('maintainers' in json_text):
for m in json_text['maintainers']:
if ('url' in m):
record_upper["pack_wg_url"] = m['url']
#print(record_upper)
for index, js in enumerate(new_files):
#print(js)
if not any(ext in js for ext in ['package-list.json',".index.json",'package.json',"validation-summary","example"]): # for all other jsons:
with open(js, encoding='utf-8') as json_file:
# print(js)
record=record_upper.copy()
json_text = json.load(json_file)
# print(json_text)
# print("----")
# get the rtype (resource type) and dtype (actual detailed type)
rtype = json_text['resourceType']
record["id"]= json_text.get('id')
if (rtype=="StructureDefinition"):
if (json_text['kind']=='logical'): # in this case, this is a logical model
dtype="Logical Model"
if (json_text['type']=='extension'): # in this case, it's an extension
dtype="Extension"
if (json_text['kind']=='resource'): # in this case, it's a profile
dtype="Profile"
if (json_text['kind']=='complex-type') and (json_text['type']!='extension'): # in this case, it's a data type
dtype="Data type"
else:
dtype=rtype # for other resources, the resource type is the detailed ty
if('name' in json_text):
name = json_text['name']
else:
name = ''
if('version' in json_text):
version = json_text['version']
else:
name = ''
if('url' in json_text):
url = json_text['url']
else:
url = ''
if('date' in json_text):
date = json_text['date'].split("T")[0]
date = datetime.datetime.strptime(date, '%Y-%m-%d')
else:
date = ''
if('status' in json_text):
status = json_text['status']
else:
status = ''
if('fhirVersion' in json_text):
fhirVersion = json_text['fhirVersion']
else:
fhirVersion = ''
update_record(record, "version", version)
update_record(record, "name", name)
update_record(record, "url", url)
update_record(record, "date",date)
update_record(record, "status", status)
update_record(record, "type", dtype)
update_record(record, "fhirVersion", fhirVersion)
# CSV structure:
# idx (autoincrement index)
# Topic
# Subtopic
# Type
# Name
# Status
# URL
# FhirVersion
# Owner
# Date proposed
# Published date
# last revision date
# Maturity
# Legal Status
# Version.
print("NEW---------------------------------")
result.append(record)
#print(result)
return result
## Done. Now the logic:
## 1. If there is a URL and that URL already exists in the csv, replace the values in the CSV
## 2. If there is no CSV, then create a new entry with the values. Default values if not existing to ''
## Also update the data table:
# the URL for the resource is a hyperlink on the resource name - opens a new window
# the URL for the maintainer is a hyperlink on the owner - opens a new window
# save the CSV back
#df=create_current_df('resources.csv')
if os.path.exists('resources.csv'):
df = pd.read_csv('resources.csv', sep =';', header=0).to_dict(orient="records")
n_list= read_package("packages/")
#print(n_list)
n_df=pd.DataFrame(n_list)
print(n_df)
n_df.to_csv("resources.csv")
```
|
{
"source": "jcosteatcyberark/cyberark-aws-auto-onboarding",
"score": 2
}
|
#### File: src/shared_libraries/pvwa_api_calls.py
```python
import requests
import json
from pvwa_integration import PvwaIntegration
from log_mechanism import LogMechanism
DEBUG_LEVEL_DEBUG = 'debug' # Outputs all information
DEFAULT_HEADER = {"content-type": "application/json"}
pvwa_integration_class = PvwaIntegration()
logger = LogMechanism()
def create_account_on_vault(session, account_name, account_password, store_parameters_class, platform_id, address,
instance_id, username, safe_name):
logger.trace(session, account_name, store_parameters_class, platform_id, address,
instance_id, username, safe_name, caller_name='create_account_on_vault')
logger.info(f'Creating account in vault for {instance_id}')
header = DEFAULT_HEADER
header.update({"Authorization": session})
url = f"{store_parameters_class.pvwa_url}/WebServices/PIMServices.svc/Account"
data = {
"account": {
"safe": safe_name,
"platformID": platform_id,
"address": address,
"accountName": account_name,
"password": <PASSWORD>,
"username": username,
"disableAutoMgmt": "false"
}
}
if platform_id == store_parameters_class.windows_platform_name:
logger.trace(f'Adding LogonDomain property to account {instance_id}', caller_name='create_account_on_vault')
data['account']['properties'] = [{"Key": "LogonDomain", "Value": address}]
rest_response = pvwa_integration_class.call_rest_api_post(url, json.dumps(data), header)
if rest_response.status_code == requests.codes.created:
logger.info(f"Account for {instance_id} was successfully created")
return True, ""
logger.error(f'Failed to create the account for {instance_id} from the vault. status code:{rest_response.status_code}')
return False, f"Error Creating Account, Status Code:{rest_response.status_code}"
def rotate_credentials_immediately(session, pvwa_url, account_id, instance_id):
logger.trace(session, pvwa_url, account_id, instance_id, caller_name='rotate_credentials_immediately')
logger.info(f'Rotating {instance_id} credentials')
header = DEFAULT_HEADER
header.update({"Authorization": session})
url = f"{pvwa_url}/API/Accounts/{account_id}/Change"
data = ""
rest_response = pvwa_integration_class.call_rest_api_post(url, data, header)
if rest_response.status_code == requests.codes.ok:
logger.info(f"Call for immediate key change for {instance_id} performed successfully")
return True
logger.error(f'Failed to call key change for {instance_id}. an error occurred')
return False
def get_account_value(session, account, instance_id, rest_url):
logger.trace(session, account, instance_id, rest_url, caller_name='get_account_value')
logger.info(f'Getting {instance_id} account from vault')
header = DEFAULT_HEADER
header.update({"Authorization": session})
pvwa_url = f"{rest_url}/api/Accounts/{account}/Password/Retrieve"
rest_logon_data = """{ "reason":"AWS Auto On-Boarding Solution" }"""
rest_response = pvwa_integration_class.call_rest_api_post(pvwa_url, rest_logon_data, header)
if rest_response.status_code == requests.codes.ok:
return rest_response.text
elif rest_response.status_code == requests.codes.not_found:
logger.info(f"Account {account} for instance {instance_id}, not found on vault")
return False
logger.error(f"Unexpected result from rest service - get account value, status code: {rest_response.status_code}")
return False
def delete_account_from_vault(session, account_id, instance_id, pvwa_url):
logger.trace(session, account_id, instance_id, pvwa_url, caller_name='delete_account_from_vault')
logger.info(f'Deleting {instance_id} from vault')
header = DEFAULT_HEADER
header.update({"Authorization": session})
rest_url = f"{pvwa_url}/WebServices/PIMServices.svc/Accounts/{account_id}"
rest_response = pvwa_integration_class.call_rest_api_delete(rest_url, header)
if rest_response.status_code != requests.codes.ok:
if rest_response.status_code == requests.codes.not_found:
logger.error(f"Failed to delete the account for {instance_id} from the vault. The account does not exists")
raise Exception(f"Failed to delete the account for {instance_id} from the vault. The account does not exists")
logger.error(f"Failed to delete the account for {instance_id} from the vault. an error occurred")
raise Exception(f"Unknown status code received {rest_response.status_code}")
logger.info(f"The account for {instance_id} was successfully deleted")
return True
def check_if_kp_exists(session, account_name, safe_name, instance_id, rest_url):
logger.trace(session, account_name, safe_name, instance_id, rest_url, caller_name='check_if_kp_exists')
logger.info('Checking if key pair is onboarded')
header = DEFAULT_HEADER
header.update({"Authorization": session})
# 2 options of search - if safe name not empty, add it to query, if not - search without it
if safe_name: # has value
pvwa_url = f"{rest_url}/api/accounts?search={account_name}&filter=safeName eq {safe_name}"
else: # has no value
pvwa_url = f"{rest_url}/api/accounts?search={account_name}"
try:
rest_response = pvwa_integration_class.call_rest_api_get(pvwa_url, header)
if not rest_response:
raise Exception(f"Unknown Error when calling rest service - retrieve account - REST response: {rest_response}")
except Exception as e:
logger.error(f'An error occurred: {str(e)}')
raise Exception(e)
if rest_response.status_code == requests.codes.ok:
# if response received, check account is not empty {"Count": 0,"accounts": []}
if 'value' in rest_response.json() and rest_response.json()["value"]:
parsed_json_response = rest_response.json()['value']
return parsed_json_response[0]['id']
return False
logger.error(f"Status code {rest_response.status_code}, received from REST service")
raise Exception(f"Status code {rest_response.status_code}, received from REST service")
def retrieve_account_id_from_account_name(session, account_name, safe_name, instance_id, rest_url):
logger.trace(session, account_name, safe_name, instance_id, rest_url, caller_name='retrieve_account_id_from_account_name')
logger.info('Retrieving account_id from account_name')
header = DEFAULT_HEADER
header.update({"Authorization": session})
# 2 options of search - if safe name not empty, add it to query, if not - search without it
if safe_name: # has value
pvwa_url = f"{rest_url}/api/accounts?search={account_name}&filter=safeName eq {safe_name}"
else: # has no value
pvwa_url = f"{rest_url}/api/accounts?search={account_name}"
try:
rest_response = pvwa_integration_class.call_rest_api_get(pvwa_url, header)
if not rest_response:
raise Exception("Unknown Error when calling rest service - retrieve account_id")
except Exception as e:
logger.error(f'An error occurred:\n{str(e)}')
raise Exception(e)
if rest_response.status_code == requests.codes.ok:
# if response received, check account is not empty {"Count": 0,"accounts": []}
if 'value' in rest_response.json() and rest_response.json()["value"]:
parsed_json_response = rest_response.json()['value']
return filter_get_accounts_result(parsed_json_response, instance_id)
logger.info(f'No match for account: {account_name}')
return False
logger.error(f"Status code {rest_response.status_code}, received from REST service")
raise Exception(f"Status code {rest_response.status_code}, received from REST service")
def filter_get_accounts_result(parsed_json_response, instance_id):
logger.trace(parsed_json_response, instance_id, caller_name='filter_get_accounts_result')
for element in parsed_json_response:
if instance_id in element['name']:
return element['id']
return False
```
|
{
"source": "jcostlow/suntime",
"score": 3
}
|
#### File: jcostlow/suntime/suntime.py
```python
import argparse
import datetime
import pytz
import astral
import astral.sun
import ics
def main():
args = parse_arguments()
a = astral.Observer(latitude= args.latlong[0], longitude = args.latlong[1], elevation= args.altitude)
thisday = datetime.datetime(args.year,args.month, args.day)
lastday = thisday + datetime.timedelta(days = args.numdays)
if args.icsfile:
c=ics.Calendar()
while thisday < lastday:
suntime = astral.sun.time_at_elevation(a, date=thisday,
elevation=args.elevation, tzinfo=args.timezone)
print (suntime)
thisday = thisday + datetime.timedelta(days=1)
if args.icsfile:
# TODO, add an argument for the title of the calendar entry.
e=ics.event.Event(name='Sun time', begin = suntime, duration= {'minutes':10} )
c.events.add(e)
if args.icsfile:
args.icsfile.write(str(c))
def parse_arguments():
'''Parse arguments to print times when the sun is at your favorite elevation'''
parser = argparse.ArgumentParser(
description='Print times when the sun is at your favorite elevation.'
)
locationgroup = parser.add_mutually_exclusive_group(required=True)
locationgroup.add_argument(
'--city', action='store', help='Specify a city. TODO DOES NOT WORK YET'
)
locationgroup.add_argument(
'--latlong', nargs=2, action='store', type=float, help='Latitude longitude', metavar=('Latitude', 'Longitude')
)
parser.add_argument(
'--altitude', help='Altitude in meters at the location', default = 2
)
parser.add_argument(
'--timezone', help='Local timezone', type=lambda tz: pytz.timezone(tz), default=pytz.timezone('US/Pacific')
)
parser.add_argument(
'--year', '-y', help='Year', type=int, default=datetime.datetime.now().year
)
parser.add_argument(
'--month', '-m', help='Month', type=int, default=datetime.datetime.now().month
)
parser.add_argument(
'--day', help='Day', type=int, default=datetime.datetime.now().day
)
parser.add_argument(
'--numdays', help='Number of Days to write', type=int, default=1
)
parser.add_argument(
'--elevation', type=float, help='Elevation on the sun in degrees, dawn=0, dusk=180', default=174
)
parser.add_argument('--icsfile', type=argparse.FileType('w') )
# TODO, add an argument for whether to set or disable an alarm on the calendar entry.
return parser.parse_args()
if __name__ == '__main__':
main()
```
|
{
"source": "jcosto/news_summary",
"score": 3
}
|
#### File: jcosto/news_summary/news_nlp_lemma.py
```python
from nltk import word_tokenize
from nltk.stem import WordNetLemmatizer
import nltk
# nltk.download('wordnet')
# nltk.download('stopwords')
from nltk.corpus import stopwords
# nltk.download('punkt')
stop_words = set(stopwords.words('english'))
class LemmaTokenizer:
ignore_tokens = [',','.',';',':','"','``',"''",'`']
def __init__(self):
self.wnl = WordNetLemmatizer()
def __call__(self, doc):
return [
self.wnl.lemmatize(t)
for t in word_tokenize(doc)
if t not in self.ignore_tokens
]
tokenizer = LemmaTokenizer()
token_stop = tokenizer(' '.join(stop_words))
```
#### File: jcosto/news_summary/news.py
```python
from pprint import pprint
from typing import Callable, List
from selenium import webdriver
from selenium.webdriver.remote.webdriver import WebDriver as RemoteWebDriver
import time
import os
import requests
dirs = list()
out_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'out')
dirs.append(out_dir)
html_dir = os.path.join(out_dir, "html")
dirs.append(html_dir)
json_dir = os.path.join(out_dir, "json")
dirs.append(json_dir)
def ensure_dirs_exist(dirs_):
for d in dirs_:
if not os.path.exists(d):
os.makedirs(d)
ensure_dirs_exist(dirs)
from msedge.selenium_tools import EdgeOptions, Edge
def get_page_source_selenium(url, sleep_s=0):
print("get_page_source_selenium")
options = EdgeOptions()
options.use_chromium = True
options.add_argument("headless")
options.add_argument("disable-gpu")
driver = Edge('MicrosoftWebDriver.exe', options=options)
driver.get(url)
if sleep_s > 0:
time.sleep(sleep_s)
page_source = driver.page_source
driver.quit()
return page_source
def get_page_source_requests(url, sleep_s=0):
print(f"getting page {url}")
page = requests.get(url)
print(f"retrieved page {url}")
return page.text
def get_page_source(url, dst, sleep_s=0, hold_proc=True, use_selenium=False):
print(dst)
if os.path.exists(dst):
with open(dst,'r', encoding='utf8') as fin:
ret = fin.read()
if len(ret) > 0:
return ret
if use_selenium:
page_source = get_page_source_selenium(url, sleep_s=sleep_s)
else:
page_source = get_page_source_requests(url)
with open(dst, 'w', encoding='utf8') as fout:
fout.write(page_source)
if hold_proc:
input("pausing execution, press enter to continue...")
return page_source
def get_filesafe_url(url):
return url.replace(":","_").replace("/","_").replace(".","_")
from dataclasses import dataclass, asdict, field
import datetime
import bs4
import json
from threading import Thread
@dataclass
class NewsGroup():
base_url: str
url: str
page_html_dst: str
page_json_dst: str
@classmethod
def process(
cls, news_item__cls, front_url, html_dir_=html_dir, json_dir_=json_dir, threaded=False, use_selenium_ng=False,
use_selenium_ni=False
):
print(front_url)
front_filesafe = get_filesafe_url(front_url)
front_html = os.path.join(html_dir_, front_filesafe)
front_json = os.path.join(json_dir_, front_filesafe)
ng = NewsGroup(news_item__cls.BASE_URL, front_url, front_html, front_json)
n_gen = ng.extract_soup(news_item__cls.yield_news, hold_proc=False, use_selenium=use_selenium_ng)
if threaded:
t_list = list()
for n in n_gen:
if isinstance(n, NewsItem):
t = Thread(target=n.process_item, args=[html_dir_], kwargs={"use_selenium":use_selenium_ni},daemon=True)
t_list.append(t)
t.start()
if len(t_list) > 8:
for t_ in t_list:
t_.join()
t_list = list()
for t_ in t_list:
t_.join()
else:
for n in n_gen:
if isinstance(n, NewsItem):
n.process_item(html_dir_, use_selenium=use_selenium_ni)
ng.save_json()
return front_html, front_json, ng
def extract_soup(self, yield_news: Callable, sleep_s=0, hold_proc=True, use_selenium=False):
src_html_path = self.page_html_dst
page_source = get_page_source(
self.url, src_html_path,
sleep_s=sleep_s, hold_proc=hold_proc,
use_selenium=use_selenium
)
soup = bs4.BeautifulSoup(page_source, features='html.parser')
self.news_item__list = list()
for n in yield_news(soup, self.base_url):
self.news_item__list.append(n)
self.save_json()
for n in self.news_item__list:
yield n
def save_json(self):
with open(self.page_json_dst, 'w') as fout:
json.dump(
[asdict(i) for i in self.news_item__list],
fout, sort_keys=True, indent=4
)
@dataclass
class NewsItem():
base_url: str
url: str
date: str
header: str
section: str
front_page_summary: str = None
summary: str = None
full_date: str = None
dt: datetime.datetime = None
content: List = field(default_factory=lambda : list())
content_raw: List = field(default_factory=lambda : list())
@classmethod
def yield_news_reuters(cls, soup: bs4.BeautifulSoup, base_url: str):
pass
def extract_news_content(self, news_content_html_dir, sleep_s=0, hold_proc=True, use_selenium=False):
pass
def cleanup_data(self):
pass
def process_item(self, html_dir_, use_selenium=False):
n = self
print("------------")
print(n.base_url)
print(n.url)
a = n.extract_news_content(html_dir_, hold_proc=False, use_selenium=use_selenium)
if not a is None:
n.cleanup_data()
for i, j in zip(n.content, n.content_raw):
print("-- " + i)
print("++ " + j)
# input("enter to continue...")
def has_class_name(s, c_ref_list):
try:
meron = list()
for c_ref in c_ref_list:
meron.append(sum([c_ref in c for c in s["class"]]) > 0)
return len(meron) == sum(meron)
except KeyError:
return False
def get_filesafe_url(url: str):
return url.replace(":","_").replace("/","_").replace(".","_")
@dataclass
class NewsMinimal():
url: str
header: str
summary: str
doc: str
date: str
```
#### File: news_summary/news_sources/news_cnbc.py
```python
from news import NewsGroup, NewsItem, has_class_name, get_page_source, get_filesafe_url
from news import html_dir, json_dir
import bs4
import os
import datetime
class NewsItem_CNBC(NewsItem):
BASE_URL = "https://www.cnbc.com"
URLS = [
"https://www.cnbc.com/finance/"
]
@classmethod
def yield_news(cls, soup: bs4.BeautifulSoup, base_url: str):
for s in soup.findAll("div"):
# print(s["class"])
# print(['StoryCollection__story' in c for c in s["class"]])
if has_class_name(s, ["Card-titleAndFooter"]):
# print(s)
a = s.findAll("a", {"class": "Card-title"})[0]
n = NewsItem_CNBC(
base_url,
a["href"],
s.findAll("span", {"class": "Card-time"}),
a.findAll("div")[0].text,
""
)
yield n
@classmethod
def cleanup_content(cls, content: list):
return content
@classmethod
def cleanup_content_item(cls, c: str, i: int):
return c
def extract_header(self, soup: bs4.BeautifulSoup):
self.header = soup.find("div", {"class", "ArticleHeader-headerContentContainer"}).find("h1").text
def extract_full_date(self, soup: bs4.BeautifulSoup):
self.full_date = soup.find("div", {"class", "ArticleHeader-headerContentContainer"}).find("time")["datetime"]
print(self.full_date)
def extract_summary_content(self, soup: bs4.BeautifulSoup):
summary = list()
content = list()
d = soup.find("div", {"class": "group"})
if not d:
return
for d in soup.findAll("div", {"class": "group"}):
for p in d.findAll("p"):
if "Check out the companies making headlines before the bell:" in p.text:
continue
elif "Check out the companies making headlines in midday trading." in p.text:
continue
elif "Take a look at some of the biggest movers in the premarket:" in p.text:
continue
if len(summary) < 2:
print(p.text)
summary.append(p.text)
content.append(p.text)
self.summary = ' '.join(summary)
print(self.summary)
self.content = list(content)
self.content_raw = list(content)
def extract_news_content(
self,
news_content_html_dir,
sleep_s=0, hold_proc=True,
use_selenium=False
):
url = self.url
page_source = get_page_source(
url, os.path.join(news_content_html_dir, get_filesafe_url(url)),
sleep_s=sleep_s, hold_proc=hold_proc,
use_selenium=use_selenium
)
soup = bs4.BeautifulSoup(page_source, features='html.parser')
if "We're sorry, the page you were looking for cannot be found." in soup.text:
return None
a = soup.find("a", {"class": "ArticleHeader-eyebrow"})
if a:
self.section = a.text
self.extract_header(soup)
self.extract_full_date(soup)
self.extract_summary_content(soup)
return self
def cleanup_data(self):
self.date = datetime.datetime.strptime(self.full_date[:10],"%Y-%m-%d").isoformat()
self.content = NewsItem_CNBC.cleanup_content(self.content)
if __name__ == "__main__":
for front_url in NewsItem_CNBC.URLS:
html_path, json_path, ng = NewsGroup.process(
NewsItem_CNBC, front_url,
html_dir_=html_dir, json_dir_=json_dir
)
```
|
{
"source": "jcotton42/ficdl",
"score": 2
}
|
#### File: ficdl/ficdl/callbacks.py
```python
from typing import Callable, Union
from ficdl.scrapers.types import StoryMetadata
class InitialStoryDetails:
metadata: StoryMetadata
def __init__(self, metadata: StoryMetadata):
self.metadata = metadata
class ChapterDetails:
chapter_title: str
chatper_number: int
chapter_count: int
def __init__(self, chapter_title, chapter_number, chapter_count):
self.chapter_title = chapter_title
self.chatper_number = chapter_number
self.chapter_count = chapter_count
ProgressCallback = Callable[[Union[InitialStoryDetails, ChapterDetails]], None]
```
#### File: ficdl/gui/converter.py
```python
import tkinter as tk
import tkinter.ttk as ttk
class Converter(tk.Frame):
def __init__(self, master, window):
super().__init__(master)
self.window = window
self.create_widgets()
def create_widgets(self):
self.input_path = tk.StringVar()
self.output_path = tk.StringVar()
row = 0
ttk.Label(self, text='Convert an existing book').grid(row=row, column=0, columnspan=3, sticky=tk.W)
row += 1
ttk.Label(self, text='NOT SUPPORTED YET').grid(row=row, column=0, columnspan=3, sticky=tk.W)
row += 1
ttk.Label(self, text='Input path: ').grid(row=row, column=0, sticky=tk.W)
ttk.Entry(self, textvariable=self.input_path).grid(row=row, column=1, sticky=tk.W)
ttk.Button(self, text='Browse...', command=self.on_browse_input_path).grid(row=row, column=2, sticky=tk.W)
row += 1
ttk.Label(self, text='Output path: ').grid(row=row, column=0, sticky=tk.W)
ttk.Entry(self, textvariable=self.output_path).grid(row=row, column=1, sticky=tk.W)
ttk.Button(self, text='Browse...', command=self.on_browse_output_path).grid(row=row, column=2, sticky=tk.W)
row += 1
ttk.Button(self, text='Convert', command=self.on_convert).grid(row=row, column=0, sticky=tk.W)
def on_browse_input_path(self):
print('Browse input')
def on_browse_output_path(self):
print('Browse output')
def on_convert(self):
print('Convert')
```
#### File: ficdl/gui/downloader.py
```python
from dataclasses import dataclass
from ficdl.scrapers.types import StoryMetadata
from pathlib import Path
import queue
from typing import Optional, Union
import threading
import tkinter as tk
import tkinter.ttk as ttk
import tkinter.filedialog as filedialog
import tkinter.messagebox as messagebox
from bs4 import PageElement
from ficdl.callbacks import ChapterDetails, InitialStoryDetails
from ficdl.config import CONFIG
from ficdl.downloader import download_story, write_story
from ficdl.utils import make_path_safe
from ficdl.writers.types import OutputFormat, WriterOptions
DOWNLOAD_STATE_CHANGED = '<<DownloadStateChanged>>'
@dataclass(eq=False)
class DownloadFinished:
text: list[list[PageElement]]
metadata: StoryMetadata
cover_path: Optional[Path]
@dataclass(eq=False)
class SaveFinished:
save_path: Path
class Downloader(tk.Frame):
def __init__(self, master, window):
super().__init__(master)
self.window = window
self.download_data = queue.SimpleQueue()
self.bind(DOWNLOAD_STATE_CHANGED, self.on_download_state_changed)
self.create_widgets()
def create_widgets(self):
self.url = tk.StringVar()
self.cover_path = tk.StringVar()
self.progress_value = tk.IntVar()
row = 0
ttk.Label(self, text='Download a story').grid(row=row, column=0, columnspan=3, sticky=tk.W)
row += 1
ttk.Label(self, text='URL: ').grid(row=row, column=0, sticky=tk.W)
ttk.Entry(self, textvariable=self.url).grid(row=row, column=1, sticky=tk.W)
row += 1
ttk.Label(self, text='Cover to use in place of story cover:').grid(row=row, column=0, sticky=tk.W)
ttk.Entry(self, textvariable=self.cover_path).grid(row=row, column=1, sticky=tk.W)
ttk.Button(self, text='Browse...', command=self.on_browse_for_cover).grid(row=row, column=2, sticky=tk.W)
row += 1
self.download_progress_bar = ttk.Progressbar(self, orient='horizontal', mode='determinate', variable=self.progress_value)
self.download_progress_bar.grid(row=row, column=0, columnspan=2, sticky='ew')
self.download_button = ttk.Button(self, text='Download...', command=self.on_download)
self.download_button.grid(row=row, column=2, sticky=tk.W)
def on_browse_for_cover(self):
file = filedialog.askopenfilename(
parent=self.window,
filetypes=(
('All supported images', '.jpg .jpeg .png'),
('PNG', '.png'),
('JPG', '.jpg .jpeg'),
)
)
if file == '':
return
self.cover_path.set(file)
def download_callback(self, details: Union[ChapterDetails, InitialStoryDetails]):
# not run on the UI thread, so use events
self.download_data.put(details)
self.event_generate(DOWNLOAD_STATE_CHANGED)
def ask_for_save_location(self, suggest_name: str) -> Optional[Path]:
file = filedialog.asksaveasfilename(
parent=self.window,
defaultextension='.epub',
initialfile=suggest_name,
filetypes=(
('ePub (all eReaders *except* Kindle)', '.epub'),
('Mobipocket (all Kindles)', '.mobi'),
('PDF', '.pdf'),
)
)
if file == '':
# Cancel was clicked
return None
return Path(file)
def on_download_state_changed(self, _event):
def save_to_disk(metadata, text, format, output_path, cover_path):
write_story(format, WriterOptions(
chapter_text=text,
metadata=metadata,
output_path=output_path,
cover_path=cover_path,
font_family=CONFIG.default_font_family,
font_size=CONFIG.default_font_size,
line_height=CONFIG.default_line_height,
page_size=CONFIG.default_page_size,
))
self.download_data.put(SaveFinished(save_path=output_path))
self.event_generate(DOWNLOAD_STATE_CHANGED)
while not self.download_data.empty():
data = self.download_data.get()
if isinstance(data, InitialStoryDetails):
self.progress_value.set((1 / len(data.metadata.chapter_names)) * 100)
elif isinstance(data, ChapterDetails):
self.progress_value.set((data.chatper_number / data.chapter_count) * 100)
elif isinstance(data, DownloadFinished):
suggest_name = make_path_safe(data.metadata.title)
save_path = self.ask_for_save_location(suggest_name)
if save_path is not None:
self.progress_value.set(0)
self.download_progress_bar.configure(mode='indeterminate')
self.download_progress_bar.start()
thread = threading.Thread(target=save_to_disk, args=(
data.metadata,
data.text,
OutputFormat(save_path.suffix.lstrip('.').lower()),
save_path,
data.cover_path,
))
thread.start()
else:
self.progress_value.set(0)
self.download_button.configure(state=tk.NORMAL)
elif isinstance(data, SaveFinished):
self.download_button.configure(state=tk.NORMAL)
self.download_progress_bar.stop()
self.progress_value.set(0)
self.download_progress_bar.configure(mode='determinate')
messagebox.showinfo('Download finished', 'All done.')
else:
raise Exception("A callback case isn't being handled in the GUI: " + type(data))
def on_download(self):
def on_download_inner(url: str, cover: Path):
metadata, text = download_story(url, self.download_callback)
self.download_data.put(DownloadFinished(text=text, metadata=metadata, cover_path=cover))
self.event_generate(DOWNLOAD_STATE_CHANGED)
url = self.url.get().strip()
cover = self.cover_path.get().strip()
if cover == '':
cover = None
else:
cover = Path(cover)
if not cover.exists():
messagebox.showerror(title='Cover does not exist', message='The path you gave for the cover does not exist.')
return
self.download_button.configure(state=tk.DISABLED)
thread = threading.Thread(target=on_download_inner, args=(url, cover))
thread.start()
```
#### File: ficdl/gui/subscription_manager.py
```python
import tkinter as tk
import tkinter.ttk as ttk
import tkinter.filedialog as filedialog
class SubscriptionManager(tk.Frame):
def __init__(self, master, window):
super().__init__(master)
self.window = window
self.create_widgets()
def create_widgets(self):
row = 0
ttk.Label(self, text='Subscribed stories').grid(row=row, column=0, columnspan=3, sticky=tk.W)
row += 1
ttk.Label(self, text='NOT SUPPORTED YET').grid(row=row, column=0, sticky=tk.W)
```
#### File: ficdl/scrapers/ffn.py
```python
from bs4 import BeautifulSoup, PageElement, Tag
from datetime import datetime, timezone
from typing import Optional
from ficdl.scrapers.types import Scraper, StoryMetadata
from ficdl.utils import download_and_decompress
import logging
import re
CENTER_STYLE = re.compile(r'text-align\s*:\s*center', re.IGNORECASE)
UNDERLINE_STYLE = re.compile(r'text-decoration\s*:\s*underline', re.IGNORECASE)
logger = logging.getLogger(__name__)
def extract_story_title(page: BeautifulSoup) -> str:
# sadly there is no ID for the title :/
return page.select('#profile_top > b')[0].string
def extract_author(page: BeautifulSoup) -> str:
# no better way for this either
return page.select('#profile_top > a')[0].string
def extract_chapter_names(page: BeautifulSoup) -> Optional[list[str]]:
chapters = []
data = page.find(id='chap_select')
if data is None:
# story has only 1 chapter
return None
for chapter in data.children:
_, title = chapter.string.split('. ', maxsplit=1)
chapters.append(title)
return chapters
def extract_cover_url(page: BeautifulSoup) -> Optional[str]:
cover = page.select_one('.cimage[data-original]')
if cover is None:
return None
url = cover['data-original']
if url.startswith('//'):
url = 'https:' + url
return url
def extract_text(page: BeautifulSoup) -> list[PageElement]:
text = []
for child in page.find(id='storytext').children:
if isinstance(child, Tag) and child.name == 'p':
if child.has_attr('style') and CENTER_STYLE.match(child['style']):
# pandoc throws away the centering CSS on parsing, so add a div with a custom CSS class
div = page.new_tag('div')
div['class'] = 'center'
del child['style']
child = child.wrap(div)
for span in child.find_all(name='span', style=UNDERLINE_STYLE):
span['class'] = 'underline'
del span['style']
text.append(child)
return text
def extract_description(page: BeautifulSoup) -> str:
return page.select_one('#profile_top > div').string
def extract_update_date_utc(page: BeautifulSoup) -> datetime:
# update date is either by itself, or the first date
return datetime.fromtimestamp(int(page.select('#profile_top span[data-xutime]')[0]['data-xutime']), timezone.utc)
class FFNScraper(Scraper):
base_url: str
title_from_url: str
first_chapter: BeautifulSoup
def __init__(self, url: str) -> None:
self.base_url, _chap_num, self.title_from_url = url.rsplit('/', maxsplit=2)
def get_metadata(self) -> StoryMetadata:
url = f'{self.base_url}/1/{self.title_from_url}'
if getattr(self, 'first_chapter', None) is None:
page, _mimetype = download_and_decompress(url)
self.first_chapter = BeautifulSoup(page, 'html5lib')
title = extract_story_title(self.first_chapter)
author = extract_story_title(self.first_chapter)
cover_url = extract_cover_url(self.first_chapter)
chapter_names = extract_chapter_names(self.first_chapter)
description = extract_description(self.first_chapter)
update_date_utc = extract_update_date_utc(self.first_chapter)
if chapter_names is None:
chapter_names = [title]
return StoryMetadata(
title=title,
author=author,
cover_url=cover_url,
chapter_names=chapter_names,
description=description,
update_date_utc=update_date_utc
)
def get_text_for_chapter(self, number: int) -> list[PageElement]:
if number == 1 and self.first_chapter is not None:
chapter = self.first_chapter
else:
url = f'{self.base_url}/{number}/{self.title_from_url}'
page, _mimetype = download_and_decompress(url)
chapter = BeautifulSoup(page, 'html5lib')
return extract_text(chapter)
```
#### File: ficdl/scrapers/types.py
```python
from abc import ABCMeta, abstractmethod
from dataclasses import dataclass
from datetime import datetime
from typing import Optional
@dataclass(eq=False)
class StoryMetadata:
title: str
author: str
cover_url: Optional[str]
chapter_names: list[str]
description: str
update_date_utc: datetime
class Scraper(metaclass=ABCMeta):
@abstractmethod
def get_metadata(self) -> StoryMetadata:
pass
@abstractmethod
def get_text_for_chapter(self, number: int) -> list:
pass
```
#### File: ficdl/ficdl/updater.py
```python
from pathlib import Path
from typing import NoReturn
from urllib.request import Request, urlopen
import json
import os
import pkgutil
import tempfile
import sys
import ficdl
class ReleaseInfo:
def __init__(self, version, download_url, release_notes):
self.version = version
self.download_url = download_url
self.release_notes = release_notes
def get_latest_release() -> ReleaseInfo:
request = Request(
'https://api.github.com/repos/jcotton42/ficdl/releases/latest',
headers={'Accpet': 'application/vnd.github.v3+json'}
)
with urlopen(request) as respone:
release = json.loads(respone.read())
return ReleaseInfo(
tuple(map(int, release['tag_name'].lstrip('v').split('.'))),
release['assets'][0]['url'],
release['body'],
)
def install_update(download_url: str, restart_app: bool) -> NoReturn:
work_dir = Path(tempfile.mkdtemp())
updater = work_dir.joinpath('updater.py')
updater.write_bytes(pkgutil.get_data('ficdl', 'assets/updater.py'))
current_ficdl_path = Path(ficdl.__file__).parent.parent.resolve()
updated_ficdl_path = work_dir.joinpath('ficdl.pyz')
request = Request(download_url, headers={
'accept': 'application/octet-stream',
})
with urlopen(request) as response:
updated_ficdl_path.write_bytes(response.read())
python_args = [
str(updater),
str(updated_ficdl_path),
str(current_ficdl_path),
str(work_dir)
]
if restart_app:
python_args.append('--restart-app')
# no that's not a typo, the second sys.executable provides sys.argv[0]
os.execl(sys.executable, sys.executable, *python_args)
```
#### File: ficdl/ficdl/utils.py
```python
from gzip import GzipFile
from io import BytesIO
import logging
import os
from pathlib import Path
import re
import shutil
import sys
import tkinter as tk
import tkinter.font as tkfont
from typing import Optional, Tuple
from urllib.request import urlopen
from ficdl.config import CONFIG
logger = logging.getLogger(__name__)
INVALID_PATH_CHARS = re.compile(r'[<>:"/\\|?*]')
INVALID_PATH = re.compile(r'^(con|prn|aux|nul|com[1-9]|lpt[1-9])$', re.IGNORECASE)
def make_path_safe(stem: str) -> str:
if INVALID_PATH.match(stem):
return stem + '_'
else:
return INVALID_PATH_CHARS.sub('_', stem)
def get_font_families(root: Optional[tk.Misc]) -> list[str]:
if root is None:
root = tk.Tk()
fonts = sorted(tkfont.families(root))
root.destroy()
else:
fonts = sorted(tkfont.families(root))
return fonts
def find_tool(tool: str) -> Optional[Path]:
path = os.environ.get(f'FICDL_TOOL_{tool.upper()}', None)
if path is not None:
return Path(path)
path = CONFIG.tool_paths.get(tool, None)
if path is not None:
return Path(path)
path = shutil.which(tool)
if path is not None:
return Path(path)
else:
return None
def download_and_decompress(url) -> Tuple[bytes, str]:
with urlopen(url) as response:
if not 200 <= response.status < 300:
logger.critical('Failed to download story asset with HTTP status %d', response.status)
sys.exit(2) # FIXME, should raise instead
data = response.read()
is_gzip = response.headers['Content-Encoding'] == 'gzip'
mimetype = response.headers['Content-Type']
return (decompress(data, is_gzip), mimetype)
def decompress(data, is_gzip) -> bytes:
if is_gzip:
buf = BytesIO(data)
with GzipFile(fileobj=buf) as f:
return f.read()
else:
return data
```
|
{
"source": "JCouderc/gs15-kasumi",
"score": 3
}
|
#### File: blockchain/blockchain/wallet.py
```python
from os import path
from app.blockchain.signature.rsa import RSASignature
from app.keys_generator.keys_manager.rsa_keys import RSAKeysManager
from app.utils.file_manager import read_file, write_file
path_data = path.join(path.abspath(path.dirname(__file__)), '../../../data/')
path_wallet = path.join(path_data, 'wallets/')
path_keys = path.join(path_data, 'keys/')
path_public = path.join(path_keys, 'public/')
path_private = path.join(path_keys, 'private/')
class Wallet:
def __init__(self, wallet_filename: str = None):
if path.exists(path.join(path_wallet, wallet_filename)): # Load user wallet from file
wallet_lines = read_file(path.join(path_wallet, wallet_filename)).splitlines()
self.__user = wallet_lines[0]
self.__keys_filename = wallet_lines[1]
self.__keys_mngr = RSAKeysManager(self.__keys_filename)
else: # Create user wallet from keys
keys_filename = 'key_' + wallet_filename.split('_')[0] + '.txt'
self.__keys_mngr = RSAKeysManager(keys_filename)
self.__user = keys_filename.split('.')[0].split('_')[1] # Use filename as username
self.__keys_filename = keys_filename
# Checks if user already exists
if path.exists(path.join(path_wallet, self.__user + '_wallet.txt')):
raise Exception("Le wallet de cet utilisateur existe déjà")
self.__save() # Write the new wallet file
def execute_transaction(self, recipient: "Wallet", amount: int) -> str:
"""
Signs a transaction
:param recipient:
:param amount: The amount of currency the sender will give and the recipient will receive
:return: Signature of the transaction
"""
# Sign the transaction
rsa_signer = RSASignature(self.__keys_mngr)
trans_signature = rsa_signer.sign(self.__user + recipient.user + str(amount))
self.__save()
return trans_signature
def __save(self):
"""
Saves the Wallet into a file
:return:
"""
write_file(path.join(path_wallet, self.__user + '_wallet.txt'),
self.__user + '\n'
+ self.__keys_filename)
@property
def user(self):
return self.__user
@property
def public_key(self):
return self.__keys_mngr.get_public()
```
#### File: gs15-kasumi/app/cli.py
```python
import getpass
import os
from app.kasumi import cli as kasumi
from app.keys_generator import cli as keysgen
from app.hashes import cli as hashes
from app.blockchain import cli as blockchain
def cls():
""" Efface le contenu du terminal """
os.system('cls' if os.name == 'nt' else 'clear')
def check_all_cmds():
""" Effectue toutes les commandes du menu """
kasumi.all()
keysgen.all()
hashes.all()
blockchain.all()
def switch_command(cmd):
available_cmds = {
1: kasumi.encrypt,
2: kasumi.decrypt,
3: keysgen.generate_keys,
4: hashes.generate,
5: hashes.check,
6: blockchain.create_proof_of_work,
7: blockchain.check_transaction,
8: blockchain.increment,
9: blockchain.check_integrity,
10: check_all_cmds,
11: blockchain.print_blockchain,
12: blockchain.users_wallets,
}
cmd_nb = 0
try:
cmd_nb = int(cmd)
except ValueError:
print('La commande entrée doit être un nombre')
if cmd_nb in available_cmds:
func_to_run = available_cmds.get(cmd_nb)
func_to_run()
input("\nAppuyez sur Entrée pour continuer...\n")
else:
print('La commande n\'est pas disponible')
def run():
while True:
print(f'Bonjour ô maître {getpass.getuser()} ! Que souhaitez vous faire aujourd’hui ? (q pour quitter)')
print('->1<- Chiffrer un message.')
print('->2<- Déchiffrer un message.')
print('->3<- Générer des couples de clés publiques / privées.')
print('->4<- Générer un hash / une empreinte.')
print('->5<- Vérifier un hash / une empreinte.')
print('->6<- Effectuer une preuve de travail.')
print('->7<- Vérifier une transaction (une signature).')
print('->8<- Débuter / incrémenter la Block-chain.')
print('->9<- Vérifier l’intégrité de la block-chain.')
print('->10<- I WANT IT ALL !! I WANT IT NOW !!')
cmd = input()
if cmd == 'q':
print(f'Au revoir, ô maître {getpass.getuser()} !')
break
switch_command(cmd)
```
#### File: hashes/hash/sponge_hash.py
```python
from app.hashes.hash.hash import Hash
from app.hashes.hash.sha256 import SHA256
class SpongeHash(Hash):
__message_part_size = 256 # Size of of the messages parts in bits (chosen arbitrarily)
__message_count_size = 64 # Size of the allocated part for message payload size in bits (chosen arbitrarily)
def hash(self, message: str, to_hex: bool = True):
"""
Calculate the hash of the message with SHA-256 encapsulated in a sponge function
:param message:
:param to_hex: If true, returns an hexadecimal string of the hash else an integer
:return: String or integer format of the hash
"""
message_parts = self.__split_message(message)
state = self.__absorb(message_parts)
hash_parts = self.__squeeze(state)
raw_hash = self.__merge_hash(hash_parts)
if to_hex:
return hex(raw_hash).lstrip('0x')
return raw_hash
def __absorb(self, message_parts: list) -> int:
"""
Absorb the message parts and gives the final state
:param message_parts:
:return: The final state of the absorb phase
"""
# State is composed of the bitrate and capacity parts
# | State |
# | capacity | bitrate |
state = 0
f_hash = SHA256()
for msg_part in message_parts:
state ^= msg_part
state = f_hash.hash(state, to_hex=False, pad_message=False)
if len(message_parts) < 2: # Following the constraint to apply the function at least twice
state = f_hash.hash(state, to_hex=False, pad_message=False)
return state
def __squeeze(self, state: int) -> list:
"""
Extract the final hash from the squeezed states
:param state: The last state of the absorb process
:return: The final hash as a list
"""
bitrate_mask = (1 << self.__message_part_size) - 1
hash_parts = []
f_hash = SHA256()
for i in range(f_hash.get_block_size() // self.__message_part_size):
hash_parts.insert(0, state & bitrate_mask)
state = f_hash.hash(state, to_hex=False, pad_message=False)
return hash_parts
def __split_message(self, message: str) -> list:
"""
Splits the message into a list of integers to be processed by the absorption phase
:param message: Given message as a string
:return: The list of integers representing the message
"""
# String to integer
message_bytes = bytearray(message, 'utf-8') # String to byte array
message_length = len(message_bytes) * 8
if message_length >= (1 << self.__message_count_size):
raise Exception('Message size must be lower than 2^', self.__message_count_size, ' bits')
message_int = message_bytes[0]
for i in range(1, len(message_bytes)):
message_int = (message_int << 8) | message_bytes[i]
# Padding
nb_blocks = (message_length // self.__message_part_size) + 1
remaining_bits = message_length % self.__message_part_size
if remaining_bits + 1 + self.__message_count_size <= self.__message_part_size: # Enough space in the last block
nb_padding_zeros = self.__message_part_size - remaining_bits - 1 - self.__message_count_size
else: # Not enough place in the last block so we will create one more
nb_padding_zeros = (2 * self.__message_part_size) - remaining_bits - 1 - self.__message_count_size
nb_blocks += 1
message_int = (message_int << 1) | 1 # End of message separator
message_int = message_int << nb_padding_zeros # Padding of the message to fit the blocks size
message_int = (message_int << self.__message_count_size) | message_length # Message bit length at the end
# Parsing
mask_block = (1 << self.__message_part_size) - 1
message_payload = []
for i in range(nb_blocks):
message_payload.append((message_int >> ((nb_blocks - i - 1) * self.__message_part_size)) & mask_block)
return message_payload
def __merge_hash(self, hash_parts):
final_hash = 0
for i in range(len(hash_parts)):
final_hash = (final_hash << self.__message_part_size) | hash_parts[i]
return final_hash
```
#### File: kasumi/cipher_mode/pcbc.py
```python
from app.kasumi.cipher_mode.cipher_mode import CipherMode
from app.kasumi.kasumi import Kasumi
class PCBC(CipherMode):
def __init__(self, kasumi: Kasumi):
super().__init__(kasumi)
def encrypt(self, message: str, key: str, iv: str) -> str:
"""
Encrypts message with key using PCBC mode
:param message:
:param key:
:param iv:
:return:
"""
kasumi_blocks = super()._string_to_blocks(message)
key = int(key, 16)
prev_block = int(iv, 16)
# PCBC
for i in range(len(kasumi_blocks)):
curr_plaintext_block = kasumi_blocks[i]
kasumi_blocks[i] = prev_block = self.kasumi.encrypt(kasumi_blocks[i] ^ prev_block, key)
prev_block ^= curr_plaintext_block
return super()._blocks_to_string(kasumi_blocks, True)
def decrypt(self, message: str, key: str, iv: str) -> str:
"""
Decrypts message with key using PCBC mode
:param message:
:param key:
:param iv:
:return:
"""
kasumi_blocks = super()._string_to_blocks(message, True)
key = int(key, 16)
prev_block = int(iv, 16)
# PCBC
for i in range(len(kasumi_blocks)):
curr_block = kasumi_blocks[i]
kasumi_blocks[i] = self.kasumi.decrypt(curr_block, key) ^ prev_block
prev_block = curr_block ^ kasumi_blocks[i]
return super()._blocks_to_string(kasumi_blocks)
```
#### File: app/kasumi/cli.py
```python
from app.kasumi.cipher_mode.cbc import CBC
from app.kasumi.cipher_mode.cfb import CFB
from app.kasumi.cipher_mode.ctr import CTR
from app.kasumi.cipher_mode.gcm import GCM
from app.kasumi.cipher_mode.ofb import OFB
from app.kasumi.cipher_mode.ecb import ECB
from app.kasumi.cipher_mode.pcbc import PCBC
from app.kasumi.kasumi import Kasumi
from os import path
from app.utils.file_manager import file_loader, add_to_filename, write_file, read_file
path_data = path.join(path.abspath(path.dirname(__file__)), '../../data/')
path_keys = path.join(path_data, 'keys/')
path_ivs = path.join(path_data, 'iv/')
available_ciphermodes = {
"ECB": ECB,
"CBC": CBC,
"PCBC": PCBC,
"CFB": CFB,
"OFB": OFB,
"CTR": CTR,
"GCM": GCM,
}
def encrypt():
"""
CLI to encrypt a file using Kasumi
:return:
"""
# Message
message, filename, q_pressed = file_loader("Fichier du dossier data à chiffrer (kasumi.txt par défaut) :",
"kasumi.txt",
path_data)
if q_pressed:
return
# Key (128 bits in hexadecimal)
key, _, q_pressed = file_loader("Clé du dossier data/keys à utiliser (kasumi_key.txt par défaut) :",
"kasumi_key.txt",
path_keys)
if q_pressed:
return
# Cipher mode
cipher_name, q_pressed = _get_cipher_name()
if q_pressed:
return
# Initialization vector (64 bits in hexadecimal)
iv, _, q_pressed = file_loader("Vecteur d'initialisation du dossier data/iv à utiliser (iv.txt par défaut, "
"entrée pour ECB) :",
"iv.txt",
path_ivs)
# Creates cipher the desired cipher mode
cipher_mode = available_ciphermodes.get(cipher_name)
cipher = cipher_mode(Kasumi())
encrypted = cipher.encrypt(message, key, iv)
# Add '-encrypted' to the filename
filename = add_to_filename(filename, '-encrypted')
write_file(path.join(path_data, filename), encrypted)
print('Chiffré :')
print(encrypted)
def decrypt():
"""
CLI to decrypt a file using Kasumi
:return:
"""
# Message
message, filename, q_pressed = file_loader(
"Fichier du dossier data à déchiffrer (kasumi-encrypted.txt par défaut) :",
"kasumi-encrypted.txt",
path_data
)
if q_pressed:
return
# Key
key, _, q_pressed = file_loader("Clé du dossier data/keys à utiliser (kasumi_key.txt par défaut) :",
"kasumi_key.txt",
path_keys)
if q_pressed:
return
# Cipher mode
cipher_name, q_pressed = _get_cipher_name()
if q_pressed:
return
# Initialization vector (64 bits in hexadecimal)
iv, _, q_pressed = file_loader("Vecteur d'initialisation du dossier data/iv à utiliser (iv.txt par défaut, "
"entrée pour ECB) :",
"iv.txt",
path_ivs)
# Creates cipher the desired cipher mode
cipher_mode = available_ciphermodes.get(cipher_name)
cipher = cipher_mode(Kasumi())
decrypted = cipher.decrypt(message, key, iv)
# Add '-decrypted' to the filename
filename = add_to_filename(filename, '-decrypted')
write_file(path.join(path_data, filename), decrypted)
print('Déchiffré :')
print(decrypted)
def all():
"""
Does all the possible actions in Kasumi
:return:
"""
print('Kasumi\n')
cleartext = read_file(path.join(path_data, "kasumi.txt"))
key = read_file(path.join(path_keys, "kasumi_key.txt"))
iv = read_file(path.join(path_ivs, "iv.txt"))
kasumi = Kasumi()
# ECB
ecb = ECB(kasumi)
if ecb.decrypt(ecb.encrypt(cleartext, key, iv), key, iv) != cleartext:
print('Erreur Kasumi ECB\n')
else:
print('ECB ok!\n')
# CBC
cbc = CBC(kasumi)
if cbc.decrypt(cbc.encrypt(cleartext, key, iv), key, iv) != cleartext:
print('Erreur Kasumi CBC\n')
else:
print('CBC ok!\n')
# PCBC
pcbc = PCBC(kasumi)
if pcbc.decrypt(pcbc.encrypt(cleartext, key, iv), key, iv) != cleartext:
print('Erreur Kasumi PCBC\n')
else:
print('PCBC ok!\n')
# CFB
cfb = CFB(kasumi)
if cfb.decrypt(cfb.encrypt(cleartext, key, iv), key, iv) != cleartext:
print('Erreur Kasumi CFB\n')
else:
print('CFB ok!\n')
# OFB
ofb = OFB(kasumi)
if ofb.decrypt(ofb.encrypt(cleartext, key, iv), key, iv) != cleartext:
print('Erreur Kasumi OFB\n')
else:
print('OFB ok!\n')
# CTR
ctr = CTR(kasumi)
if ctr.decrypt(ctr.encrypt(cleartext, key, iv), key, iv) != cleartext:
print('Erreur Kasumi CTR\n')
else:
print('CTR ok!\n')
# GCM
gcm = GCM(kasumi)
if gcm.decrypt(gcm.encrypt(cleartext, key, iv), key, iv) != cleartext:
print('Erreur Kasumi GCM\n')
else:
print('GCM ok!\n')
def _get_cipher_name() -> (str, bool):
"""
Get the desired cipher name from the user input
:return: cipher name and quit boolean
"""
cipher_name = None
while True:
print("Mode de chiffrement à utiliser (ECB (défaut), CBC, PCBC, CFB, OFB, CTR, GCM) :")
cipher_name = input().upper()
if cipher_name == 'q':
return '', True
if len(cipher_name) == 0:
cipher_name = 'ECB'
if cipher_name in available_ciphermodes:
break
else:
print("Ce mode de chiffrement n'est pas dans la liste")
return cipher_name, False
```
#### File: app/kasumi/galois_field.py
```python
from os import path
import random
from pyfinite import ffield # TODO Create our own implementation !
def multiply_galois_64(a: int, b: int):
"""
Multiplies a and b in a Galois Field of 2^64
:param a:
:param b:
:return:
"""
galois_128 = ffield.FField(64, 2**64 + 2**63 + 2**62 + 2**60 + 2**59 + 2**57 + 2**54 + 2**53 + 2**52 + 2**51 +
2**46 + 2**44 + 2**43 + 2**42 + 2**41 + 2**40 + 2**39 + 2**38 + 2**34 + 2**31 + 2**0)
return galois_128.Multiply(a, b)
class GaloisField:
"""
Manages a Galois Field of the form GF(2^n)
"""
def __init__(self, bit_length: int = 16, fn_poly: str = 'polynomial.txt'):
self.irr_polys_8 = [
2 ** 8 + 2 ** 4 + 2 ** 3 + 2 ** 2 + 2 ** 0,
2 ** 8 + 2 ** 5 + 2 ** 3 + 2 ** 1 + 2 ** 0,
2 ** 8 + 2 ** 6 + 2 ** 4 + 2 ** 3 + 2 ** 2 + 2 ** 1 + 2 ** 0,
2 ** 8 + 2 ** 6 + 2 ** 5 + 2 ** 1 + 2 ** 0,
2 ** 8 + 2 ** 6 + 2 ** 5 + 2 ** 2 + 2 ** 0,
2 ** 8 + 2 ** 6 + 2 ** 5 + 2 ** 3 + 2 ** 0,
2 ** 8 + 2 ** 7 + 2 ** 6 + 2 ** 1 + 2 ** 0,
2 ** 8 + 2 ** 7 + 2 ** 6 + 2 ** 5 + 2 ** 2 + 2 ** 1 + 2 ** 0
]
self.irr_polys_16 = [
2 ** 16 + 2 ** 9 + 2 ** 8 + 2 ** 7 + 2 ** 6 + 2 ** 4 + 2 ** 3 + 2 ** 2 + 2 ** 0,
2 ** 16 + 2 ** 12 + 2 ** 3 + 2 ** 1 + 2 ** 0,
2 ** 16 + 2 ** 12 + 2 ** 7 + 2 ** 2 + 2 ** 0,
2 ** 16 + 2 ** 13 + 2 ** 12 + 2 ** 10 + 2 ** 9 + 2 ** 7 + 2 ** 6 + 2 ** 1 + 2 ** 0,
2 ** 16 + 2 ** 13 + 2 ** 12 + 2 ** 11 + 2 ** 7 + 2 ** 6 + 2 ** 3 + 2 ** 1 + 2 ** 0,
2 ** 16 + 2 ** 13 + 2 ** 12 + 2 ** 11 + 2 ** 10 + 2 ** 6 + 2 ** 2 + 2 ** 1 + 2 ** 0,
2 ** 16 + 2 ** 14 + 2 ** 10 + 2 ** 8 + 2 ** 3 + 2 ** 1 + 2 ** 0,
2 ** 16 + 2 ** 14 + 2 ** 13 + 2 ** 12 + 2 ** 6 + 2 ** 5 + 2 ** 3 + 2 ** 2 + 2 ** 0,
2 ** 16 + 2 ** 14 + 2 ** 13 + 2 ** 12 + 2 ** 10 + 2 ** 7 + 2 ** 0,
2 ** 16 + 2 ** 15 + 2 ** 10 + 2 ** 6 + 2 ** 5 + 2 ** 3 + 2 ** 2 + 2 ** 1 + 2 ** 0,
2 ** 16 + 2 ** 15 + 2 ** 11 + 2 ** 9 + 2 ** 8 + 2 ** 7 + 2 ** 5 + 2 ** 4 + 2 ** 2 + 2 ** 1 + 2 ** 0,
2 ** 16 + 2 ** 15 + 2 ** 11 + 2 ** 10 + 2 ** 7 + 2 ** 6 + 2 ** 5 + 2 ** 3 + 2 ** 2 + 2 ** 1 + 2 ** 0,
2 ** 16 + 2 ** 15 + 2 ** 11 + 2 ** 10 + 2 ** 9 + 2 ** 6 + 2 ** 2 + 2 ** 1 + 2 ** 0,
2 ** 16 + 2 ** 15 + 2 ** 11 + 2 ** 10 + 2 ** 9 + 2 ** 8 + 2 ** 6 + 2 ** 4 + 2 ** 2 + 2 ** 1 + 2 ** 0
]
self.polynomial = 0
self.generator = 0
self.bit_length = bit_length
self.path_poly = path.join(path.join(path.abspath(path.dirname(__file__)), '../../data/'), fn_poly)
self.galois_field = None
if path.exists(self.path_poly):
# Open previously written polynomial setting
with open(self.path_poly, 'r') as file_poly:
line_elems = file_poly.readline().split(' ')
if int(line_elems[0]) == self.bit_length:
self.polynomial = int(line_elems[1])
self.generator = int(line_elems[2])
self.galois_field = ffield.FField(self.bit_length, self.polynomial)
if self.polynomial == 0:
self.__generate_polynomial()
def __generate_polynomial(self):
""" Generates a generator element in the polynomial """
power = 0
res = 1
# Select a random generator possibility
# primes_field, _ = _prime_factorize(2 ** self.bit_length - 1)
# possibilities = _products(primes_field)
possibilities = [i for i in range(2, 2 ** self.bit_length - 1)]
# Select a random irreducible polynomial according to the degree
if self.bit_length == 16:
self.polynomial = self.irr_polys_16[random.randint(0, len(self.irr_polys_16) - 1)]
elif self.bit_length == 8:
self.polynomial = self.irr_polys_8[random.randint(0, len(self.irr_polys_8) - 1)]
else:
# TODO select an irreducible polynomial for degrees other than 16 (not required for now)
print('Oups')
self.galois_field = ffield.FField(self.bit_length, self.polynomial)
while True:
# Select one random polynomial to check if it is a generator
elem = possibilities[random.randint(0, len(possibilities) - 1)]
possibilities.remove(elem) # Remove it from remaining possibilities
for power in range(2 ** self.bit_length):
res = self.galois_field.Multiply(res, elem)
if res == 1:
break
if power == (2 ** self.bit_length) - 2: # Generator !!!
break
self.generator = elem
with open(self.path_poly, 'w+') as file_poly:
file_poly.write(str(self.bit_length) + ' ' + str(self.polynomial) + ' ' + str(self.generator))
def inverse(self, a: int) -> int:
"""
Gives the inverse of 'a' in the irreducible polynomial
:param a: The number to get inverse off of it
:return: The inverse of 'a'
"""
return self.galois_field.DoInverseForSmallField(a)
```
#### File: keys_generator/keys_manager/keys_manager.py
```python
import abc
class KeysManager(abc.ABC):
"""
Manages a public / private key pair
"""
@classmethod
def __subclasshook__(cls, subclass):
return (hasattr(subclass, 'get_public') and
callable(subclass.get_public) and
hasattr(subclass, 'get_public_key') and
callable(subclass.get_public_key) and
hasattr(subclass, 'get_private_key') and
callable(subclass.get_private_key) or
NotImplemented)
@abc.abstractmethod
def get_public(self):
"""
Gives the public elements of the keys
:return:
"""
raise NotImplementedError
@abc.abstractmethod
def get_public_key(self):
"""
Gives the public key
:return:
"""
raise NotImplementedError
@abc.abstractmethod
def get_private_key(self):
"""
Gives the private key
:return:
"""
raise NotImplementedError
```
#### File: app/utils/bit_operation.py
```python
def add_mod(x: int, y: int, modulo: int = 32) -> int:
"""
Modular addition
:param x:
:param y:
:param modulo:
:return:
"""
return (x + y) & ((1 << modulo) - 1)
def left_circ_shift(x: int, shift: int, n_bits: int) -> int:
"""
Does a left binary circular shift on the number x of n_bits bits
:param x: A number
:param shift: The number of bits to shift
:param n_bits: The number of bits of x
:return: The shifted result
"""
mask = (1 << n_bits) - 1 # Trick to create a ones mask of n bits
x_base = (x << shift) & mask # Keep it in the n_bits range
return x_base | (x >> (n_bits - shift)) # Add the out of bounds bits
def right_circ_shift(x: int, shift: int, n_bits: int) -> int:
"""
Does a right binary circular shift on the number x of n_bits bits
:param x: A number
:param shift: The number of bits to shift
:param n_bits: The number of bits of x
:return: The shifted result
"""
mask = (1 << n_bits) - 1 # Trick to create a ones mask of n bits
x_base = x >> shift
return x_base | (x << (n_bits - shift) & mask)
def merge_bytes(payload_list: list) -> int:
"""
Gives an 8 bytes value from a byte list
:param payload_list: Byte list (max size is 8)
:return:
"""
while len(payload_list) < 8:
payload_list.append(0x00)
result = payload_list[0]
for i in range(1, 8):
result = (result << 8) | payload_list[i]
return result
def split_bytes(payload: int) -> bytearray:
"""
Gives a byte list of an 8 bytes value
:param payload: The 8 bytes value
:return:
"""
payload_list = bytearray()
for i in range(8):
payload_list.insert(0, (payload >> (8 * i)) & 0xFF) # Extract bytes by shifting right and masking
return payload_list
```
#### File: app/utils/modular_arithmetic.py
```python
from collections import Counter
def prime_decomposition(n: int) -> (list, list):
"""
Decompose a number in prime factors
:param n: The number to decompose
:return: List of prime factors and list of occurrences of prime factors
"""
prime_factors = []
while n % 2 == 0: # Check if divisible by 2
prime_factors.append(2)
n /= 2
factor = 3
while (factor * factor) <= n:
if n % factor == 0:
prime_factors.append(factor)
n /= factor
else:
factor += 2 # Next possible factor
if n != 0:
prime_factors.append(n) # Add last prime
prime_factors_count = Counter(prime_factors).values() # Count prime factors occurrences
prime_factors = list(set(prime_factors)) # Remove duplicates
return prime_factors, prime_factors_count
def square_and_multiply(a: int, power: int, modulo: int) -> int:
"""
Fast technique for exponentiation of a base to a power with a modulo
:param a:
:param power:
:param modulo:
:return:
"""
result = 1
while power > 0:
if (power & 1) == 1:
result = (result * a) % modulo
power //= 2
a = (a * a) % modulo
return result
def products(el: list) -> list:
"""
Generates all possible multiplication combinations of given numbers
:param el: List of numbers
:return: List of the combinations
"""
result = []
bit_size = len(el)
for i in range(2 ** bit_size):
cur_num = 1
for j in range(bit_size):
if i & (1 << j): # If the bit is on for this position then multiply by the number at that index
cur_num *= el[j]
result.append(int(cur_num))
return result
def gcd_extended(a: int, b: int) -> list:
"""
Calculates the greatest common divisor of a and b and gives the bezout coefficients
:param a:
:param b:
:return:
"""
# Bezout coefficients
u_prev = v = 1
v_prev = u = 0
round_nb = 1
while True:
quotient, remains = divmod(a, b)
if remains == 0:
if round_nb & 1:
return [b, -u, v]
return [b, u, -v]
# updating coefficients
u_tmp = u
v_tmp = v
u = u * quotient + u_prev
v = v * quotient + v_prev
u_prev = u_tmp
v_prev = v_tmp
# updating remains
a = b
b = remains
# Increment round number
round_nb += 1
def gcd(a: int, b: int) -> int:
"""
Calculates the greatest common divisor of a and b
:param a:
:param b:
:return:
"""
while True:
quotient, remains = divmod(a, b)
if remains == 0:
return b
# updating remains
a = b
b = remains
def inverse(a: int, b: int) -> int:
"""
Calculates the modular inverse of a in b
:param a:
:param b:
:return:
"""
_, inv, _ = gcd_extended(a, b)
return inv % b
```
|
{
"source": "jcoughlin11/scholarphi",
"score": 2
}
|
#### File: common/commands/locate_entities.py
```python
import glob
import logging
import os.path
import shutil
from abc import ABC, abstractmethod
from argparse import ArgumentParser
from collections import deque
from dataclasses import dataclass
from typing import Any, Callable, Deque, Dict, Iterator, List, Optional, Type
from common import directories, file_utils
from common.colorize_tex import ColorizedTex, ColorizeOptions, colorize_entities
from common.commands.base import ArxivBatchCommand
from common.commands.compile_tex import save_compilation_result
from common.commands.raster_pages import raster_pages
from common.compile import (
compile_tex,
get_last_autotex_compiler,
get_last_colorized_entity_id,
)
from common.diff_images import diff_images_in_raster_dirs
from common.locate_entities import locate_entities
from common.types import (
ArxivId,
ColorizationRecord,
FileContents,
HueLocationInfo,
RelativePath,
SerializableEntity,
)
from common.unpack import unpack
@dataclass(frozen=True)
class LocationTask:
arxiv_id: ArxivId
tex_path: RelativePath
file_contents: FileContents
entities: List[SerializableEntity]
ColorizeFunc = Callable[[str, List[SerializableEntity], ColorizeOptions], ColorizedTex]
class LocateEntitiesCommand(ArxivBatchCommand[LocationTask, HueLocationInfo], ABC):
"""
Integrated batch processing for locating entities. Includes adding colorization commands to
the LaTeX, compiling the document, rastering the pages, differencing the pages, and
searching for the colorized entities.
Attempts to be tolerant to colorization faults. The command detects which entity appears
to cause colorization issues (both compilation issues, and formula layout changes), and
isolates those entities to be extracted on their own.
Because of this fault sensitivity, this command merges together many stages of the pipeline
that could theoretically be separated---adding color commands, compiling, rastering pages,
and image processing. They are merged to make it easier to loop back to earlier stages
of the pipeline when errors are detected in later stages.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
super(LocateEntitiesCommand, LocateEntitiesCommand).__init__(
self, *args, **kwargs
)
entity_name = self.get_entity_name()
self.output_base_dirs = {
"sources": f"sources-with-colorized-{entity_name}",
"compiled-sources": f"compiled-sources-with-colorized-{entity_name}",
"paper-images": f"paper-images-with-colorized-{entity_name}",
"diffed-images": f"diffed-images-with-colorized-{entity_name}",
"entity-locations": f"{entity_name}-locations",
}
@staticmethod
def init_parser(parser: ArgumentParser) -> None:
super(LocateEntitiesCommand, LocateEntitiesCommand).init_parser(parser)
parser.add_argument(
"--batch-size",
type=int,
default=30,
help=(
"Number of entities to detect at a time. This number is limited by the number "
+ "of distinct hues that OpenCV can detect."
),
)
parser.add_argument(
"--skip-visual-validation",
action="store_true",
help=(
"Whether to skip visual validation. When visual validation is enabled, the "
+ "paper diff will be checked for black pixels before hues are located. Black "
+ "pixels indicate that the layout of the page changed based on changes made to "
+ "the TeX. If visual validation fails for a diff for a paper, that diff will "
+ "not be processed. Set this flag to skip visual validation and therefore "
+ "process all diffs of all papers regardless of evidence of layout shift."
),
)
@staticmethod
@abstractmethod
def get_entity_name() -> str:
"""
Get the name of the type of entity that will be batch processed in these commands.
This command will be used to determine the names of output directories.
"""
def get_arxiv_ids_dirkey(self) -> str:
return f"detected-{self.get_entity_name()}"
@staticmethod
def get_detected_entity_type() -> Type[SerializableEntity]:
"""
Override this method if you need access to entity data that are present on a subclass of
'SerializableEntity'. For example, if you need colorization to occur only when equations
have a specific depth, this function should return the 'Equation' type, so that the 'when'
colorization callback can have access to the 'depth' property.
"""
return SerializableEntity
@staticmethod
@abstractmethod
def get_colorize_options() -> ColorizeOptions:
"""
Override this to set a custom options for colorizing entities. One example is that you may
want to wrap all entities in curly braces before applying colorization commands to them.
"""
@staticmethod
@abstractmethod
def get_colorize_func() -> Optional[ColorizeFunc]:
"""
Override this when you want to set a custom function for colorizing entities. One example
of when to set this is for bibliography items, where colorization commands are to be
inserted at the top of a TeX file, instead of around the citation commands.
"""
@staticmethod
@abstractmethod
def should_sanity_check_images() -> Optional[bool]:
"""
Force visual validation of images before locating hues, or force skipping of validation.
"""
def load(self) -> Iterator[LocationTask]:
entity_name = self.get_entity_name()
for arxiv_id in self.arxiv_ids:
for output_base_dir in self.output_base_dirs.values():
file_utils.clean_directory(
directories.arxiv_subdir(output_base_dir, arxiv_id)
)
# A directory of entities may contain files for each of multiple types of entities.
# One example is that the definition detector detects both terms and definitions.
# In that case, the colorizer colorizes all entities from all of these files.
# Earlier entity extractor commands should include enough information in the entity IDs
# so that the type of entities can be inferred from the entity ID in later commands.
entities_dir = directories.arxiv_subdir(f"detected-{entity_name}", arxiv_id)
entities: List[SerializableEntity] = []
for entities_path in glob.glob(os.path.join(entities_dir, "entities*.csv")):
entities.extend(
file_utils.load_from_csv(
entities_path, self.get_detected_entity_type()
)
)
original_sources_path = directories.arxiv_subdir("sources", arxiv_id)
for tex_path in file_utils.find_files(
original_sources_path, [".tex"], relative=True
):
file_contents = file_utils.read_file_tolerant(
os.path.join(original_sources_path, tex_path)
)
entities_for_tex_path = [
e for e in entities if e.tex_path == tex_path or e.tex_path == "N/A"
]
if file_contents is not None:
yield LocationTask(
arxiv_id, tex_path, file_contents, entities_for_tex_path
)
def process(self, item: LocationTask) -> Iterator[HueLocationInfo]:
# Filter out entities that are empty (i.e., have nothing to color)
# A '-1' in the 'start' or 'end' field indicates that the entity does not occur in a
# specific place in the TeX, but rather a custom coloring technique based on other
# entity properties will be used. So entities that have a '-1' for their start and
# end should still be processed even though they appear to be zero-length.
entities_filtered = [e for e in item.entities if e.start == -1 or e.end == -1 or e.start != e.end]
# Sort entities by the order in which they appear in the TeX. This allows the pipeline
# to keep track of which ones appear first, when trying to recover from errors (i.e., when
# trying to detect which entity in a batch may have shifted to cause many others to move.)
entities_ordered = sorted(entities_filtered, key=lambda e: e.start)
# Construct a queue of entities to detect.
entities_by_id = {e.id_: e for e in entities_ordered}
to_process = deque([e.id_ for e in entities_ordered])
to_process_alone: Deque[str] = deque()
def next_batch() -> List[str]:
"""
Get the next batch of entities to process. First tries to sample a batch from
'to_process', and then attempts to sample individual entities from 'to_process_alone'.
"""
if len(to_process) > 0:
return [
to_process.popleft()
for _ in range(min(self.args.batch_size, len(to_process)))
]
return [to_process_alone.popleft()]
batch_index = -1
while len(to_process) > 0 or len(to_process_alone) > 0:
# Fetch the next batch of entities to process.
batch_index += 1
logging.debug(
"Locating bounding boxes for batch %d of entities of type %s for paper %s.",
batch_index,
self.get_entity_name(),
item.arxiv_id,
)
iteration_id = directories.tex_iteration(item.tex_path, str(batch_index))
batch = next_batch()
entities: List[SerializableEntity] = [entities_by_id[id_] for id_ in batch]
# Colorize the TeX for all the entities.
custom_colorize_func = self.get_colorize_func()
if custom_colorize_func is not None:
colorized_tex = custom_colorize_func(
item.file_contents.contents, entities, self.get_colorize_options()
)
if len(colorized_tex.entity_hues) == 0:
logging.info( # pylint: disable=logging-not-lazy
"Custom colorization function colored nothing for entity batch %d of "
+ "paper %s when coloring file %s. The function probably decide there was "
+ "nothing to do for this file, and will hopefullly colorize these "
+ "entities in another file. Skipping this batch for this file.",
batch_index,
item.arxiv_id,
item.file_contents.path,
)
continue
else:
colorized_tex = colorize_entities(
item.file_contents.contents, entities, self.get_colorize_options()
)
# If some entities were skipped during colorization, perhaps because they
# overlapped with each other, add them back to the work queue.
if colorized_tex.skipped is not None and len(colorized_tex.skipped) > 0:
logging.info( # pylint: disable=logging-not-lazy
"Entities %s were skipped during colorization batch %d for paper "
+ "%s. They will be processed in a later batch.",
[e.id_ for e in colorized_tex.skipped],
batch_index,
item.arxiv_id,
)
# Queue skipped entities in the order that they initially appeared in the batch.
reprocess_ids = {e.id_ for e in colorized_tex.skipped}
reprocess_sorted = [id_ for id_ in batch if id_ in reprocess_ids]
to_process.extendleft(reversed(reprocess_sorted))
# Remove skipped entities from the current batch.
for skip in colorized_tex.skipped:
del batch[batch.index(skip.id_)]
# Save the colorized TeX to the file system.
colorized_tex_dir = directories.iteration(
self.output_base_dirs["sources"], item.arxiv_id, iteration_id
)
save_success = save_colorized_tex(
item.arxiv_id,
colorized_tex_dir,
item.tex_path,
iteration_id,
colorized_tex.tex,
item.file_contents.encoding,
colorized_tex.entity_hues,
)
if not save_success:
logging.error( # pylint: disable=logging-not-lazy
"Failed to save colorized TeX files for arXiv paper %s. "
"This paper will be skipped.",
item.arxiv_id,
)
# Compile the TeX with the colors.
compiled_tex_dir = directories.iteration(
self.output_base_dirs["compiled-sources"], item.arxiv_id, iteration_id,
)
shutil.copytree(colorized_tex_dir, compiled_tex_dir)
compilation_result = compile_tex(compiled_tex_dir)
save_compilation_result(
"compiled-sources", item.arxiv_id, compiled_tex_dir, compilation_result
)
if not compilation_result.success:
# If colorizing a specific entity caused the failure, remove the entity that caused
# the problem from the batch and restart with a new batch, minus this entity.
last_colorized_entity_id = get_last_colorized_entity(
item.arxiv_id, compiled_tex_dir
)
if last_colorized_entity_id is not None:
problem_ids = [last_colorized_entity_id]
if batch.index(last_colorized_entity_id) < len(batch) - 1:
problem_ids += [batch[batch.index(last_colorized_entity_id) + 1]]
if len(batch) == 1:
logging.warning( # pylint: disable=logging-not-lazy
"Failed to compile paper %s with colorized entity %s, even when it was "
+ "colorized in isolation. The location of this entity will not be detected.",
item.arxiv_id,
batch[0]
)
continue
logging.warning( # pylint: disable=logging-not-lazy
"Failed to compile paper %s with colorized entities. The culprit may be "
+ "the colorization command for entity %s. The problematic entities will be "
+ "colorized on their own, and the rest of the entities will be colorized "
+ "together in the next batch.",
item.arxiv_id,
" or ".join(problem_ids),
)
for id_ in problem_ids:
to_process_alone.append(id_)
del batch[batch.index(id_)]
to_process.extendleft(reversed(batch))
continue
# If there was some other reason for the error, remove just the first entity from the batch.
logging.error( # pylint: disable=logging-not-lazy
"Failed to compile paper %s with colorized entities %s. The cause "
+ "is assumed to be in the first colorized entity. The location for the "
+ "first entity %s will not be detected. The remainder of the entities in "
+ "this batch will be processed in another batch.",
item.arxiv_id,
batch,
batch[0],
)
del [batch[0]]
to_process.extendleft(reversed(batch))
continue
# Raster the pages to images, and compute diffs from the original images.
output_files = compilation_result.output_files
raster_output_dir = directories.iteration(
self.output_base_dirs["paper-images"], item.arxiv_id, iteration_id
)
for output_file in output_files:
raster_success = raster_pages(
compiled_tex_dir,
os.path.join(
raster_output_dir, directories.escape_slashes(output_file.path)
),
output_file.path,
output_file.output_type,
)
if not raster_success:
logging.error( # pylint: disable=logging-not-lazy
"Failed to rasterize pages %s iteration %d. The locations for entities "
+ "with IDs %s with not be detected.",
item.arxiv_id,
iteration_id,
batch,
)
continue
diffs_output_dir = directories.iteration(
self.output_base_dirs["diffed-images"], item.arxiv_id, iteration_id
)
diff_success = diff_images_in_raster_dirs(
output_files, raster_output_dir, diffs_output_dir, item.arxiv_id,
)
if not diff_success:
logging.error( # pylint: disable=logging-not-lazy
"Failed to difference images of original and colorized versions of "
+ "papers %s in batch processing iteration %d. The locations for entities with IDs "
+ "%s will not be detected.",
item.arxiv_id,
iteration_id,
batch,
)
continue
# Locate the entities in the diffed images.
entity_hues = colorized_tex.entity_hues
location_result = locate_entities(
item.arxiv_id, raster_output_dir, diffs_output_dir, entity_hues
)
if location_result is None:
logging.warning( # pylint: disable=logging-not-lazy
"Error occurred when locating entities by hue in diffed images "
+ "for paper %s. None of the entities in batch %s will be detected.",
item.arxiv_id,
batch,
)
continue
if self.should_sanity_check_images() and location_result.black_pixels_found:
logging.warning( # pylint: disable=logging-not-lazy
"Ignoring bounding boxes found for paper %s in batch %s due to "
+ "black pixels found in the images. This might indicate that the colorization "
+ "commands introduced subtle shifts of the text.",
item.arxiv_id,
batch,
)
continue
# If colorizing entities seemed to cause drift in the document...
if len(location_result.shifted_entities) > 0:
logging.warning( # pylint: disable=logging-not-lazy
"Some entities shifted position in the colorized TeX for paper %s batch %s: "
+ "%s. Attempting to remove the first shifted entity from the batch.",
item.arxiv_id,
batch,
location_result.shifted_entities,
)
first_shifted_entity_id = None
for entity_id in batch:
if entity_id in location_result.shifted_entities:
first_shifted_entity_id = entity_id
break
if first_shifted_entity_id is not None:
if len(batch) > 1:
logging.info( # pylint: disable=logging-not-lazy
"Entity %s has been marked as being the potential cause of shifting in "
+ "the colorized document for paper %s batch %d. It will be processed "
+ "later on its own. The other shifted entities in %s will be queued to "
+ "process as a group in an upcoming batch.",
first_shifted_entity_id,
item.arxiv_id,
batch_index,
location_result.shifted_entities,
)
# Get the index of the first entity for which the location has shifted
# during colorization.
moved_entity_index = batch.index(first_shifted_entity_id)
# Mark all other entities that have shifted after the first one one to be processed
# in a later batch (instead of on their own). It could be that they won't shift
# once the first shifted entity is removed.
for i in range(len(batch) - 1, moved_entity_index, -1):
if batch[i] in location_result.shifted_entities:
to_process.appendleft(batch[i])
del batch[i]
# Mark the first entity that shifted to be reprocessed alone, where its position
# might be discoverable, without affecting the positions of other element.
del batch[moved_entity_index]
to_process_alone.append(first_shifted_entity_id)
elif len(batch) == 1 and self.should_sanity_check_images():
logging.info( # pylint: disable=logging-not-lazy
"Skipping entity %s for paper %s as it caused "
+ "colorization errors even when colorized in isolation.",
first_shifted_entity_id,
item.arxiv_id,
)
continue
elif len(batch) == 1:
logging.info( # pylint: disable=logging-not-lazy
"Entity %s has been marked as the cause of shifting in "
+ "the colorized document for paper %s. Its location will "
+ "still be saved (if one was found), though this location should be "
+ "considered potentially inaccurate.",
first_shifted_entity_id,
item.arxiv_id,
)
else:
logging.warning( # pylint: disable=logging-not-lazy
"Could not find a single entity that was likely responsible for shifting in "
+ "the colorized version of paper %s batch %d. All entities in batch %s will "
+ "be processed on their own.",
item.arxiv_id,
batch_index,
batch,
)
to_process_alone.extend(batch)
# The code above is responsible for filter 'batch' to ensure that it doesn't include
# any entity IDs that shouldn't be save to file, for example if the client has asked that
# entity IDs that cause colorization errors be omitted from the results.
for entity_id in batch:
for box in location_result.locations[entity_id]:
yield HueLocationInfo(
tex_path=item.tex_path,
iteration=iteration_id,
hue=entity_hues[entity_id],
entity_id=entity_id,
page=box.page,
left=box.left,
top=box.top,
width=box.width,
height=box.height,
)
def save(self, item: LocationTask, result: HueLocationInfo) -> None:
logging.debug(
"Found bounding box for %s entity %s in iteration %s, hue %f",
item.arxiv_id,
result.entity_id,
result.iteration,
result.hue,
)
output_dir = directories.arxiv_subdir(
self.output_base_dirs["entity-locations"], item.arxiv_id
)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
output_path = os.path.join(output_dir, "entity_locations.csv")
file_utils.append_to_csv(output_path, result)
EntityId = str
def get_last_colorized_entity(
arxiv_id: ArxivId, compilation_path: RelativePath
) -> Optional[EntityId]:
original_compilation_path = directories.arxiv_subdir("compiled-sources", arxiv_id)
original_autogen_log_path = os.path.join(
original_compilation_path, "auto_gen_ps.log"
)
error_message = None
if not os.path.exists(original_autogen_log_path):
error_message = (
f"Could not find auto_gen_ps.log output from AutoTeX at {original_autogen_log_path}. "
+ "Has the original TeX for paper {arxiv_id} been compiled?"
)
new_autogen_log_path = os.path.join(compilation_path, "auto_gen_ps.log")
if not os.path.exists(new_autogen_log_path):
error_message = (
f"Could not find auto_gen_ps.log output from AutoTeX at {original_autogen_log_path}. "
+ f"There may have been an error running AutoTeX on a colorized copy of paper {arxiv_id}."
)
if error_message is not None:
logging.warning( # pylint: disable=logging-not-lazy
error_message
+ "It will not be possible to determine what compiler succeeded at compiling the "
+ "original paper, and therefore to determine which entities may have been "
+ "responsible for compilation failure. Entity batching may be less efficient.",
)
return None
with open(original_autogen_log_path) as file_:
original_autogen_log = file_.read()
with open(new_autogen_log_path) as file_:
new_autogen_log = file_.read()
# Get the name of the TeX compiler that successfully compiled the original TeX.
compiler_name = get_last_autotex_compiler(original_autogen_log)
if compiler_name is None:
logging.warning( # pylint: disable=logging-not-lazy
"Could not find the name of the TeX compiler that compiled the original TeX by "
+ "scanning the logs at %s. It will not be possible to determine what was the last "
+ "entity colorized before the compilation failure. Entity batching may be less efficient.",
original_autogen_log_path,
)
return None
# Get the ID of the last entity that was colorized before compilation failure
last_colorized_entity_id = get_last_colorized_entity_id(
new_autogen_log, compiler_name
)
if last_colorized_entity_id is not None:
logging.debug( # pylint: disable=logging-not-lazy
"Entity '%s' was the last entity colorized before compilation failure in "
+ "directory %s. The colorization of this entity may be responsible for the "
+ "compilation error.",
last_colorized_entity_id,
original_autogen_log_path,
)
else:
logging.warning( # pylint: disable=logging-not-lazy
"Unable to determine what was the last entity colorized before compilation failure "
+ "in source directory %s from log %s for compiler '%s'. Entity batching may be less efficient.",
compilation_path,
new_autogen_log_path,
compiler_name,
)
return last_colorized_entity_id
def save_colorized_tex(
arxiv_id: ArxivId,
output_sources_path: RelativePath,
tex_path: RelativePath,
iteration: str,
tex: str,
encoding: str,
entity_hues: Dict[str, float],
) -> bool:
logging.debug("Outputting colorized TeX to %s.", output_sources_path)
# Each colorization batch gets a new sources directory.
unpack_path = unpack(arxiv_id, output_sources_path)
sources_unpacked = unpack_path is not None
if unpack_path is None:
logging.warning("Could not unpack sources into %s.", output_sources_path)
return False
if sources_unpacked:
# Rewrite the TeX with the colorized TeX.
tex_path = os.path.join(output_sources_path, tex_path)
with open(tex_path, "w", encoding=encoding) as tex_file:
tex_file.write(tex)
# Save a log of which hues were assigned to which entities.
hues_path = os.path.join(output_sources_path, "entity_hues.csv")
for entity_id, hue in entity_hues.items():
file_utils.append_to_csv(
hues_path,
ColorizationRecord(
tex_path=tex_path,
iteration=str(iteration),
hue=hue,
entity_id=entity_id,
),
)
return True
def make_locate_entities_command(
entity_name: str,
DetectedEntityType: Optional[Type[SerializableEntity]] = None,
colorize_options: ColorizeOptions = ColorizeOptions(),
colorize_func: Optional[ColorizeFunc] = None,
sanity_check_images: Optional[bool] = None,
) -> Type[LocateEntitiesCommand]:
"""
Create a command for locating the bounding boxes for entities. Help the command cast
the entities loaded into the right data type by providing a 'DetectedEntityType'.
Colorization of entities can be customized, either by providing a unique 'colorize_func',
or by providing a set of 'colorize_options'. Specify 'sanity_check_images' to force
visual validation of image differences. Bounding boxes will be omitted for entities
when unexpected visual artifacts are found in image differences.
"""
class C(LocateEntitiesCommand):
@staticmethod
def get_name() -> str:
return f"locate-bounding-boxes-for-{entity_name}"
@staticmethod
def get_description() -> str:
return f"Find bounding boxes of {entity_name}."
@staticmethod
def get_entity_name() -> str:
return entity_name
@staticmethod
def get_detected_entity_type() -> Type[SerializableEntity]:
if DetectedEntityType is None:
return super(C, C).get_detected_entity_type()
return DetectedEntityType
@staticmethod
def get_colorize_options() -> ColorizeOptions:
return colorize_options
@staticmethod
def get_colorize_func() -> Optional[ColorizeFunc]:
return colorize_func
@staticmethod
def should_sanity_check_images() -> Optional[bool]:
return sanity_check_images
return C
```
#### File: sentences/commands/extract_contexts.py
```python
import logging
import os.path
from abc import abstractmethod
from collections import defaultdict
from dataclasses import dataclass
from typing import Any, Callable, Dict, Iterator, List, Optional, Type
from common import directories, file_utils
from common.colorize_tex import wrap_span
from common.commands.base import ArxivBatchCommand
from common.types import ArxivId, RelativePath, SerializableEntity
from ..types import Context, Sentence, TexWrapper
@dataclass(frozen=True)
class Task:
arxiv_id: ArxivId
tex_path: RelativePath
entities: List[SerializableEntity]
sentences: List[Sentence]
SentenceId = str
EntityKey = Any
class ExtractContextsCommand(ArxivBatchCommand[Task, Context]):
"""
Extract contexts in which entities appear in the paper. This command is meant to be
used as a part of the pipelines for collecting descriptive data for other entities.
"""
@abstractmethod
def get_entity_name(self) -> str:
" Get the key for the type of entity for which contexts will be extracted. "
@staticmethod
def get_entity_type() -> Type[SerializableEntity]:
"""
Override this method if you need access to entity data that are present on a subclass of
'SerializableEntity'. For example, to have access to the MathML property on a symbol during
comparison of symbols in the 'compare' callback, override this method to return the type
'SerializableSymbol'.
"""
return SerializableEntity
@abstractmethod
def get_wrapper(
self, entity: SerializableEntity
) -> Optional[TexWrapper]: # pylint: disable=unused-argument
"""
Override this method to insert custom TeX before and after each appearance of the entity in
the TeX, i.e., to add custom styling around the entity where it appears in the TeX.
"""
def get_key(self, entity: SerializableEntity) -> Any:
"""
Get key that can be used to compare entities for semantic equality. Used when composing
text snippets to determine whether two entities refer to the same thing. For example,
a key for symbols might be the symbol's MathML, so that other symbols with the same
MathML also get highlighted in the snippet. Override this to use a custom key.
"""
return entity.tex
def load(self) -> Iterator[Task]:
for arxiv_id in self.arxiv_ids:
output_dir = directories.arxiv_subdir(
f"contexts-for-{self.get_entity_name()}", arxiv_id
)
file_utils.clean_directory(output_dir)
# Load entities from file.
entities_path = os.path.join(
directories.arxiv_subdir(
f"detected-{self.get_entity_name()}", arxiv_id
),
"entities.csv",
)
entities = list(
file_utils.load_from_csv(entities_path, self.get_entity_type())
)
# Load sentences from file.
sentences_path = os.path.join(
directories.arxiv_subdir("detected-sentences", arxiv_id), "entities.csv"
)
try:
sentences = list(file_utils.load_from_csv(sentences_path, Sentence))
except FileNotFoundError:
logging.warning( # pylint: disable=logging-not-lazy
"No sentences data found for arXiv paper %s. Try re-running the pipeline, "
+ "this time enabling the processing of sentences. If that doesn't work, "
+ "there was likely an error in detecting sentences for this paper.",
arxiv_id,
)
continue
tex_paths = {e.tex_path for e in entities}
for tex_path in tex_paths:
entities_for_file = [e for e in entities if e.tex_path == tex_path]
sentences_for_file = [s for s in sentences if s.tex_path == tex_path]
yield Task(arxiv_id, tex_path, entities_for_file, sentences_for_file)
def process(self, item: Task) -> Iterator[Context]:
sentences_ordered = iter(sorted(item.sentences, key=lambda s: s.start))
sentences_by_id = {s.id_: s for s in item.sentences}
entities_ordered = iter(sorted(item.entities, key=lambda e: e.start))
# Entities, grouped by similarity and sentence.
sentence_entities: Dict[
SentenceId, Dict[EntityKey, List[SerializableEntity]]
] = {}
if len(item.sentences) == 0:
logging.warning( # pylint: disable=logging-not-lazy
"No sentences found for file %s for arXiv ID %s. Skipping detection of sentences "
+ "that contain entities.",
item.tex_path,
item.arxiv_id,
)
return
if len(item.entities) == 0:
logging.warning( # pylint: disable=logging-not-lazy
"No entities found for file %s for arXiv ID %s for sentence detection task."
+ "Skipping detection of sentences for this entity for this paper.",
item.tex_path,
item.arxiv_id,
)
return
sentence = next(sentences_ordered)
sentence_entities[sentence.id_] = defaultdict(list)
entity = next(entities_ordered)
while True:
try:
if entity.start < sentence.start:
logging.warning( # pylint: disable=logging-not-lazy
"Could not find sentence for entity from character %d to %d for arXiv "
+ "paper %s.",
entity.start,
entity.end,
item.arxiv_id,
)
entity = next(entities_ordered)
elif entity.start >= sentence.start and entity.end <= sentence.end:
sentence_entities[sentence.id_][self.get_key(entity)].append(entity)
entity = next(entities_ordered)
else:
sentence = next(sentences_ordered)
sentence_entities[sentence.id_] = defaultdict(list)
except StopIteration:
break
for sentence_id in sentence_entities:
sentence = sentences_by_id[sentence_id]
for entity_key in sentence_entities[sentence_id]:
entities = sentence_entities[sentence_id][entity_key]
# Assemble a snippet for this sentences with entity appearances highlighted.
# Wrap all repeat appearances of the same entity in a tag that can be used
# by the KaTeX browser-based LaTeX renderer to style the matches.
snippet = sentence.tex
for entity in sorted(entities, key=lambda e: e.start, reverse=True):
start_in_snippet = entity.start - sentence.start
end_in_snippet = entity.end - sentence.start
tex_wrapper = self.get_wrapper(entity)
if tex_wrapper is not None:
snippet = wrap_span(
snippet,
start_in_snippet,
end_in_snippet,
before=tex_wrapper.before,
after=tex_wrapper.after,
braces=tex_wrapper.braces,
)
for entity in entities:
neighbor_ids = [e.id_ for e in entities if e != entity]
yield Context(
tex_path=entity.tex_path,
entity_id=entity.id_,
sentence_id=sentence_id,
snippet=snippet,
neighbor_entity_ids=neighbor_ids,
)
def save(self, item: Task, result: Context) -> None:
output_dir = directories.arxiv_subdir(
f"contexts-for-{self.get_entity_name()}", item.arxiv_id
)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
contexts_path = os.path.join(output_dir, "contexts.csv")
file_utils.append_to_csv(contexts_path, result)
EntityKeyFunc = Callable[[SerializableEntity], Any]
def make_extract_contexts_command(
entity_name: str,
entity_key: Optional[EntityKeyFunc] = None,
tex_wrapper: Optional[TexWrapper] = None,
EntityType: Optional[Type[SerializableEntity]] = None,
) -> Type[ExtractContextsCommand]:
class C(ExtractContextsCommand):
@staticmethod
def get_name() -> str:
return f"extract-contexts-for-{entity_name}"
def get_entity_name(self) -> str:
return entity_name
@staticmethod
def get_entity_type() -> Type[SerializableEntity]:
if EntityType is None:
return super(C, C).get_entity_type()
return EntityType
def get_key(self, entity: SerializableEntity) -> Any:
if entity_key is None:
return super(C, C).get_key(self, entity)
return entity_key(entity)
def get_wrapper(self, entity: SerializableEntity) -> Optional[TexWrapper]:
return tex_wrapper
@staticmethod
def get_description() -> str:
return f"Extract contexts for each appearance of {entity_name}."
def get_arxiv_ids_dirkey(self) -> str:
return self.get_detected_entities_dirkey()
def get_detected_entities_dirkey(self) -> str:
return f"detected-{entity_name}"
def get_output_base_dirkey(self) -> str:
return f"sentences-for-{entity_name}"
return C
```
#### File: symbols/commands/upload_symbols.py
```python
import logging
import os.path
from collections import defaultdict
from typing import Callable, Dict, Iterator, Optional
from common import directories, file_utils
from common.colorize_tex import wrap_span
from common.commands.database import DatabaseUploadCommand
from common.s2_data import get_s2_id
from common.types import (
BoundingBox,
EntityData,
EntityInformation,
EntityReference,
EntityRelationships,
Match,
Matches,
SymbolId,
SymbolLocation,
)
from common.upload_entities import upload_entities
from entities.sentences.types import Context
from ..types import SymbolData, DefiningFormula
class UploadSymbols(DatabaseUploadCommand[SymbolData, None]):
@staticmethod
def get_name() -> str:
return "upload-symbols"
@staticmethod
def get_description() -> str:
return "Upload symbols to the database."
def get_arxiv_ids_dirkey(self) -> str:
return "sources"
def load(self) -> Iterator[SymbolData]:
for arxiv_id in self.arxiv_ids:
s2_id = get_s2_id(arxiv_id)
if s2_id is None:
continue
symbols_with_ids = file_utils.load_symbols(arxiv_id)
if symbols_with_ids is None:
continue
symbols_by_id = {s.symbol_id: s.symbol for s in symbols_with_ids}
boxes: Dict[SymbolId, BoundingBox] = {}
boxes_path = os.path.join(
directories.arxiv_subdir("symbol-locations", arxiv_id),
"symbol_locations.csv",
)
if not os.path.exists(boxes_path):
logging.warning(
"Could not find bounding boxes information for %s. Skipping",
arxiv_id,
)
continue
for location in file_utils.load_from_csv(boxes_path, SymbolLocation):
symbol_id = SymbolId(
tex_path=location.tex_path,
equation_index=location.equation_index,
symbol_index=location.symbol_index,
)
box = BoundingBox(
page=int(location.page),
left=location.left,
top=location.top,
width=location.width,
height=location.height,
)
boxes[symbol_id] = box
matches: Matches = {}
matches_path = os.path.join(
directories.arxiv_subdir("symbol-matches", arxiv_id), "matches.csv"
)
if not os.path.exists(matches_path):
logging.warning(
"Could not find symbol matches information for %s. Skipping",
arxiv_id,
)
continue
for match in file_utils.load_from_csv(matches_path, Match):
if match.queried_mathml not in matches:
matches[match.queried_mathml] = []
matches[match.queried_mathml].append(match)
context_data_missing = False
contexts_path = os.path.join(
directories.arxiv_subdir("contexts-for-symbols", arxiv_id),
"contexts.csv",
)
if not os.path.exists(contexts_path):
logging.warning( # pylint: disable=logging-not-lazy
"Contexts have not been found for symbols for arXiv paper %s. "
+ "Symbol data will be uploaded without contexts.",
arxiv_id,
)
context_data_missing = True
symbol_contexts = {}
mathml_contexts = defaultdict(list)
if not context_data_missing:
for context in file_utils.load_from_csv(contexts_path, Context):
tex_path = context.tex_path
equation_index, symbol_index = [
int(t) for t in context.entity_id.split("-")
]
symbol_id = SymbolId(tex_path, equation_index, symbol_index)
symbol_contexts[symbol_id] = context
symbol = symbols_by_id[symbol_id]
mathml_contexts[symbol.mathml].append(context)
symbol_formulas = {}
mathml_formulas = defaultdict(set)
for id_, symbol in symbols_by_id.items():
if (
symbol.is_definition
and symbol.equation is not None
and symbol.relative_start is not None
and symbol.relative_end is not None
):
highlighted = wrap_span(
symbol.equation,
symbol.relative_start,
symbol.relative_end,
before=r"\htmlClass{match-highlight}{",
after="}",
braces=True,
)
formula = DefiningFormula(
tex=highlighted,
tex_path=id_.tex_path,
equation_id=id_.equation_index,
)
symbol_formulas[id_] = formula
mathml_formulas[symbol.mathml].add(formula)
yield SymbolData(
arxiv_id,
s2_id,
symbols_with_ids,
boxes,
symbol_contexts,
symbol_formulas,
mathml_contexts,
mathml_formulas,
matches,
)
def process(self, _: SymbolData) -> Iterator[None]:
yield None
def save(self, item: SymbolData, _: None) -> None:
symbols_with_ids = item.symbols_with_ids
boxes = item.boxes
matches = item.matches
symbol_contexts = item.symbol_contexts
mathml_contexts = item.mathml_contexts
symbol_formulas = item.symbol_formulas
mathml_formulas = item.mathml_formulas
symbol_ids_by_symbol_object_ids = {}
for symbol_with_id in symbols_with_ids:
symbol_ids_by_symbol_object_ids[
id(symbol_with_id.symbol)
] = symbol_with_id.symbol_id
entity_infos = []
for symbol_with_id in symbols_with_ids:
symbol = symbol_with_id.symbol
# TODO(andrewhead): move this filtering condition into 'parse_equation'
if symbol.tex in ["$|$", "|"]:
continue
symbol_id = symbol_with_id.symbol_id
# Get context and formula of the symbol, and other matching ones.
context = symbol_contexts.get(symbol_id)
matching_contexts = mathml_contexts.get(symbol.mathml, [])
other_context_texs = []
other_context_sentence_ids = []
for c in matching_contexts:
matching_sentence_id = f"{c.tex_path}-{c.sentence_id}"
if (
matching_sentence_id not in other_context_sentence_ids
# and c.sentence_id != context.sentence_id
):
other_context_texs.append(c.snippet)
other_context_sentence_ids.append(matching_sentence_id)
formula = symbol_formulas.get(symbol_id)
matching_formulas = mathml_formulas.get(symbol.mathml, [])
other_formula_texs = []
other_formula_ids = []
for f in matching_formulas:
equation_id = f"{f.tex_path}-{f.equation_id}"
if equation_id not in other_formula_ids :
# and (
# : formula is None or equation_id != formula.equation_id
# )
other_formula_texs.append(f.tex)
other_formula_ids.append(equation_id)
box = boxes.get(symbol_id)
if box is None:
continue
data: EntityData = {
"tex": f"${symbol.tex}$",
"tex_start": symbol.start,
"tex_end": symbol.end,
"mathml": symbol.mathml,
"mathml_near_matches": [
m.matching_mathml for m in matches[symbol.mathml]
],
# "snippet": context.snippet,
"snippets": other_context_texs,
"defining_formulas": other_formula_texs,
"is_definition": symbol.is_definition or False,
}
# if formula is not None:
# data['formula'] = formula.tex
create_symbol_id_string: Callable[[SymbolId], str] = (
lambda sid: f"{sid.tex_path}-{sid.equation_index}-{sid.symbol_index}"
)
sentence_id = (
f"{context.tex_path}-{context.sentence_id}"
if context is not None
else None
)
parent_id: Optional[str] = None
for other_symbol_with_id in symbols_with_ids:
other_symbol_id = other_symbol_with_id.symbol_id
other_symbol = other_symbol_with_id.symbol
try:
other_symbol.children.index(symbol)
parent_id = create_symbol_id_string(other_symbol_id)
except ValueError:
continue
child_ids = []
for child_symbol in symbol.children:
child_symbol_id = symbol_ids_by_symbol_object_ids[id(child_symbol)]
string_id = create_symbol_id_string(child_symbol_id)
child_ids.append(string_id)
relationships: EntityRelationships = {
"equation": EntityReference(
type_="equation",
id_=f"{symbol_id.tex_path}-{symbol_id.equation_index}",
),
"parent": EntityReference(type_="symbol", id_=parent_id),
"children": [
EntityReference(type_="symbol", id_=id_) for id_ in child_ids
],
"sentence": EntityReference(type_="sentence", id_=sentence_id)
if sentence_id is not None
else EntityReference(type_="sentence", id_=None),
"defining_formula_equations": [
EntityReference(type_="equation", id_=id_)
for id_ in other_formula_ids
],
"snippet_sentences": [
EntityReference(type_="sentence", id_=id_)
for id_ in other_context_sentence_ids
],
# "snippet_sentence": EntityReference(
# type_="sentence", id_=f"{symbol_id.tex_path}-f{context.sentence_id}"
# )
# if context is not None
# else None,
# "formula_equation": EntityReference(
# type_="equation",
# id_=f"{symbol_id.tex_path}-f{formula.equation_id}"
# if formula is not None
# else None,
# ),
}
entity_information = EntityInformation(
id_=f"{symbol_id.tex_path}-{symbol_id.equation_index}-{symbol_id.symbol_index}",
type_="symbol",
bounding_boxes=[box],
data=data,
relationships=relationships,
)
entity_infos.append(entity_information)
upload_entities(item.s2_id, item.arxiv_id, entity_infos, self.args.data_version)
```
#### File: data-processing/tests/test_colorize_sentences.py
```python
from entities.sentences.colorize import adjust_color_positions
from entities.sentences.types import Sentence
def create_sentence(tex: str, start: int) -> Sentence:
return Sentence(
start=start,
end=start + len(tex),
tex_path="main.tex",
id_=0,
tex=tex,
context_tex="<extracted tex>",
text="<plaintext>",
text_journal=None,
sanitized="<sanitized>",
sanitized_journal=None,
validity_guess=True,
section_name="<current section>",
in_figure=False,
in_table=False,
in_itemize=False,
label=[],
ref=[],
cite=[],
url=[],
others=[],
)
def test_colorize_at_sentence_bounds():
sentence = create_sentence("This is a sentence.", start=100)
positions = adjust_color_positions(sentence)
assert positions.start == 100
assert positions.end == 119
def test_adjust_color_start_to_within_equation():
sentence = create_sentence(
"$x$ is an equation at the start of the sentence.", start=0
)
positions = adjust_color_positions(sentence)
# The start position should not be at 0 (the start of the equation), but instead should be
# adjusted to fall within the equation at the start of the sentence.
assert positions.start != 0
assert positions.start == 1
assert positions.end == 48
def test_adjust_color_end_to_within_equation():
sentence = create_sentence(
# Note that this sentence doesn't end with a period. My hunch is that if a sentence ends
# with a period, it's okay to put the coloring commands after the period, as they will not
# be in the problematic position of right after the equation.
"This sentence ends with the equation $x$",
start=0,
)
positions = adjust_color_positions(sentence)
assert positions.start == 0
assert positions.end != 40
assert positions.end == 39
def test_no_adjust_color_positions_if_equation_within_text():
sentence = create_sentence(
"This sentence has the equation $x$ in the middle.", start=0
)
positions = adjust_color_positions(sentence)
assert positions.start == 0
assert positions.end == 49
def test_adjust_color_to_within_equations_even_if_sentence_starts_or_ends_with_space():
sentence = create_sentence(
" $x$ is the equation that opens the text, even though it appears after a space, and"
+ "this sentence ends with an equation followed by space $x$ ",
start=0,
)
positions = adjust_color_positions(sentence)
assert positions.start == 4
assert positions.end == 141
```
#### File: data-processing/tests/test_extract_abbreviations.py
```python
import pytest
from entities.abbreviations.extractor import AbbreviationExtractor
extractor = None
@pytest.fixture(scope="module", autouse=True)
def setup_extractor():
"""
Only set up the extractor once, as it requires the time-consuming initialization
of an NLP model.
"""
global extractor # pylint: disable=global-statement
extractor = AbbreviationExtractor()
def assert_abbreviation(abb, id_, text, start, end, expansion, tex):
# test the equivalence of an abbreviation instance abb and the values it should have
assert abb.id_ == id_, "ID Incorrect"
assert abb.text == text, "Abbreviation Short Form Incorrect"
assert abb.start == start, "Abbreviation Start Position Incorrect"
assert abb.end == end, "Abbreviation End Position Incorrect"
assert abb.expansion == expansion, "Abbreviation Expansion Incorrect"
assert abb.tex == tex, "Abbreviation Instance Incorrect"
def test_basic():
# tests the most basic plaintext version
abbs = list(
extractor.parse(
"",
"Natural Language Processing (NLP) is a sub-field of artificial "
+ "intelligence (AI).",
)
)
# this ensures if you have a different yield order it still works
abbs.sort(key=lambda x: x.start)
assert len(abbs) == 4, "Incorrect Number of Abbreviations Detected"
assert_abbreviation(
abbs[0],
"expansion-0",
"NLP",
0,
27,
"Natural Language Processing",
"Natural Language Processing",
)
assert_abbreviation(
abbs[1],
"abbreviation-0-0",
"NLP",
29,
32,
"Natural Language Processing",
"NLP",
)
assert_abbreviation(
abbs[2],
"expansion-1",
"AI",
52,
75,
"artificial intelligence",
"artificial intelligence",
)
assert_abbreviation(
abbs[3], "abbreviation-1-0", "AI", 77, 79, "artificial intelligence", "AI",
)
def test_multiple():
# slightly more involved example, fail-safe to catch additional bugs anywhere
abbs = list(
extractor.parse(
"",
"Natural Language Processing (NLP) is a sub-field of artificial intelligence (AI).\n"
+ " AI can also be applied to other areas that are not NLP.",
)
)
abbs.sort(key=lambda x: x.start)
assert len(abbs) == 6
assert_abbreviation(
abbs[4], "abbreviation-1-1", "AI", 83, 85, "artificial intelligence", "AI"
)
assert_abbreviation(
abbs[5],
"abbreviation-0-1",
"NLP",
134,
137,
"Natural Language Processing",
"NLP",
)
def test_latex():
# More realistic example, latex formatting included.
abbs = list(
extractor.parse(
"",
"Natural Language Processing \\textbf{(NLP)} is a sub-field of artificial intelligence (AI).\n"
+ " Here's a random equation: $E=mc^2$ to throw you off. AI can also be applied to \\textbf{c}omputer \\textbf{v}ision (CV).",
)
)
abbs.sort(key=lambda x: x.start)
assert len(abbs) == 7
assert_abbreviation(
abbs[0],
"expansion-0",
"NLP",
0,
27,
"Natural Language Processing",
"Natural Language Processing",
)
assert_abbreviation(
abbs[1],
"abbreviation-0-0",
"NLP",
37,
40,
"Natural Language Processing",
"NLP",
)
assert_abbreviation(
abbs[2],
"expansion-1",
"AI",
61,
84,
"artificial intelligence",
"artificial intelligence",
)
assert_abbreviation(
abbs[3], "abbreviation-1-0", "AI", 86, 88, "artificial intelligence", "AI"
)
assert_abbreviation(
abbs[5],
"expansion-2",
"CV",
179,
204,
"computer vision",
"c}omputer \\textbf{v}ision",
)
assert_abbreviation(
abbs[6], "abbreviation-2-0", "CV", 206, 208, "computer vision", "CV"
)
```
|
{
"source": "jcoughlin11/yt",
"score": 2
}
|
#### File: jcoughlin11/yt/conftest.py
```python
import os
import shutil
import tempfile
from importlib.util import find_spec
from pathlib import Path
import matplotlib
import pytest
import yaml
from packaging.version import Version
from yt.config import ytcfg
from yt.utilities.answer_testing.testing_utilities import (
_compare_raw_arrays,
_hash_results,
_save_raw_arrays,
_save_result,
_streamline_for_io,
data_dir_load,
)
MPL_VERSION = Version(matplotlib.__version__)
def pytest_addoption(parser):
"""
Lets options be passed to test functions.
"""
parser.addoption(
"--with-answer-testing",
action="store_true",
)
parser.addoption(
"--answer-store",
action="store_true",
)
parser.addoption(
"--answer-raw-arrays",
action="store_true",
)
parser.addoption(
"--raw-answer-store",
action="store_true",
)
parser.addoption(
"--force-overwrite",
action="store_true",
)
parser.addoption(
"--no-hash",
action="store_true",
)
parser.addoption("--local-dir", default=None, help="Where answers are saved.")
# Tell pytest about the local-dir option in the ini files. This
# option is used for creating the answer directory on CI
parser.addini(
"local-dir",
default=str(Path(__file__).parent / "answer-store"),
help="answer directory.",
)
parser.addini(
"test_data_dir",
default=ytcfg.get("yt", "test_data_dir"),
help="Directory where data for tests is stored.",
)
def pytest_configure(config):
r"""
Reads in the tests/tests.yaml file. This file contains a list of
each answer test's answer file (including the changeset number).
"""
ytcfg["yt", "internals", "within_pytest"] = True
# Register custom marks for answer tests and big data
config.addinivalue_line("markers", "answer_test: Run the answer tests.")
config.addinivalue_line(
"markers", "big_data: Run answer tests that require large data files."
)
for value in (
# treat most warnings as errors
"error",
# >>> internal deprecation warnings with no obvious solution
# see https://github.com/yt-project/yt/issues/3381
(
r"ignore:The requested field name 'pd?[xyz]' is ambiguous and corresponds "
"to any one of the following field types.*:yt._maintenance.deprecation.VisibleDeprecationWarning"
),
# >>> warnings emitted by testing frameworks, or in testing contexts
# we still have some yield-based tests, awaiting for transition into pytest
"ignore::pytest.PytestCollectionWarning",
# imp is used in nosetest
"ignore:the imp module is deprecated in favour of importlib; see the module's documentation for alternative uses:DeprecationWarning",
# the deprecation warning message for imp changed in Python 3.10, so we ignore both versions
"ignore:the imp module is deprecated in favour of importlib and slated for removal in Python 3.12; see the module's documentation for alternative uses:DeprecationWarning",
# matplotlib warnings related to the Agg backend which is used in CI, not much we can do about it
"ignore:Matplotlib is currently using agg, which is a non-GUI backend, so cannot show the figure.:UserWarning",
"ignore:tight_layout . falling back to Agg renderer:UserWarning",
#
# >>> warnings from wrong values passed to numpy
# these should normally be curated out of the test suite but they are too numerous
# to deal with in a reasonable time at the moment.
"ignore:invalid value encountered in log10:RuntimeWarning",
"ignore:divide by zero encountered in log10:RuntimeWarning",
"ignore:invalid value encountered in true_divide:RuntimeWarning",
#
# >>> there are many places in yt (most notably at the frontend level)
# where we open files but never explicitly close them
# Although this is in general bad practice, it can be intentional and
# justified in contexts where reading speeds should be optimized.
# It is not clear at the time of writing how to approach this,
# so I'm going to ignore this class of warnings altogether for now.
"ignore:unclosed file.*:ResourceWarning",
):
config.addinivalue_line("filterwarnings", value)
if MPL_VERSION < Version("3.0.0"):
config.addinivalue_line(
"filterwarnings",
(
"ignore:Using or importing the ABCs from 'collections' instead of from 'collections.abc' "
"is deprecated since Python 3.3,and in 3.9 it will stop working:DeprecationWarning"
),
)
# at the time of writing, astropy's wheels are behind numpy's latest
# version but this doesn't cause actual problems in our test suite, so
# we allow this warning to pass.
# last checked with astropy 4.2.1
config.addinivalue_line(
"filterwarnings",
(
"ignore:numpy.ndarray size changed, may indicate binary incompatibility. "
"Expected 80 from C header, got 88 from PyObject:RuntimeWarning"
),
)
if find_spec("astropy") is not None:
# astropy triggers this warning from itself, there's not much we can do on our side
# last checked with astropy 4.2.1
config.addinivalue_line(
"filterwarnings", "ignore::astropy.wcs.wcs.FITSFixedWarning"
)
if find_spec("cartopy") is not None:
# cartopy still triggers this numpy warning
# last checked with cartopy 0.19.0
config.addinivalue_line(
"filterwarnings",
(
"ignore:`np.float` is a deprecated alias for the builtin `float`. "
"To silence this warning, use `float` by itself. "
"Doing this will not modify any behavior and is safe. "
"If you specifically wanted the numpy scalar type, use `np.float64` here."
":DeprecationWarning: "
),
)
# this warning *still* shows up on cartopy 0.19 so we'll ignore it
config.addinivalue_line(
"filterwarnings",
(
r"ignore:The default value for the \*approx\* keyword argument to "
r"\w+ will change from True to False after 0\.18\.:UserWarning"
),
)
# this one could be resolved by upgrading PROJ on Jenkins,
# but there's isn't much else that can be done about it.
config.addinivalue_line(
"filterwarnings",
(
"ignore:The Stereographic projection in Proj older than 5.0.0 incorrectly "
"transforms points when central_latitude=0. Use this projection with caution.:UserWarning"
),
)
def pytest_collection_modifyitems(config, items):
r"""
Decide which tests to skip based on command-line options.
"""
# Set up the skip marks
skip_answer = pytest.mark.skip(reason="--with-answer-testing not set.")
skip_unit = pytest.mark.skip(reason="Running answer tests, so skipping unit tests.")
skip_big = pytest.mark.skip(reason="--answer-big-data not set.")
# Loop over every collected test function
for item in items:
# If it's an answer test and the appropriate CL option hasn't
# been set, skip it
if "answer_test" in item.keywords and not config.getoption(
"--with-answer-testing"
):
item.add_marker(skip_answer)
# If it's an answer test that requires big data and the CL
# option hasn't been set, skip it
if (
"big_data" in item.keywords
and not config.getoption("--with-answer-testing")
and not config.getoption("--answer-big-data")
):
item.add_marker(skip_big)
if "answer_test" not in item.keywords and config.getoption(
"--with-answer-testing"
):
item.add_marker(skip_unit)
def _param_list(request):
r"""
Saves the non-ds, non-fixture function arguments for saving to
the answer file.
"""
# pytest treats parameterized arguments as fixtures, so there's no
# clean way to separate them out from other other fixtures (that I
# know of), so we do it explicitly
blacklist = [
"hashing",
"answer_file",
"request",
"answer_compare",
"temp_dir",
"orbit_traj",
"etc_traj",
]
test_params = {}
for key, val in request.node.funcargs.items():
if key not in blacklist:
# For plotwindow, the callback arg is a tuple and the second
# element contains a memory address, so we need to drop it.
# The first element is the callback name, which is all that's
# needed
if key == "callback":
val = val[0]
test_params[key] = str(val)
# Convert python-specific data objects (such as tuples) to a more
# io-friendly format (in order to not have python-specific anchors
# in the answer yaml file)
test_params = _streamline_for_io(test_params)
return test_params
def _get_answer_files(request):
"""
Gets the path to where the hashed and raw answers are saved.
"""
answer_file = f"{request.cls.__name__}_{request.cls.answer_version}.yaml"
raw_answer_file = f"{request.cls.__name__}_{request.cls.answer_version}.h5"
# Add the local-dir aspect of the path. If there's a command line value,
# have that override the ini file value
clLocalDir = request.config.getoption("--local-dir")
iniLocalDir = request.config.getini("local-dir")
if clLocalDir is not None:
answer_file = os.path.join(os.path.expanduser(clLocalDir), answer_file)
raw_answer_file = os.path.join(os.path.expanduser(clLocalDir), raw_answer_file)
else:
answer_file = os.path.join(os.path.expanduser(iniLocalDir), answer_file)
raw_answer_file = os.path.join(os.path.expanduser(iniLocalDir), raw_answer_file)
# Make sure we don't overwrite unless we mean to
overwrite = request.config.getoption("--force-overwrite")
storing = request.config.getoption("--answer-store")
raw_storing = request.config.getoption("--raw-answer-store")
raw = request.config.getoption("--answer-raw-arrays")
if os.path.exists(answer_file) and storing and not overwrite:
raise FileExistsError(
"Use `--force-overwrite` to overwrite an existing answer file."
)
if os.path.exists(raw_answer_file) and raw_storing and raw and not overwrite:
raise FileExistsError(
"Use `--force-overwrite` to overwrite an existing raw answer file."
)
# If we do mean to overwrite, do so here by deleting the original file
if os.path.exists(answer_file) and storing and overwrite:
os.remove(answer_file)
if os.path.exists(raw_answer_file) and raw_storing and raw and overwrite:
os.remove(raw_answer_file)
print(os.path.abspath(answer_file))
return answer_file, raw_answer_file
@pytest.fixture(scope="function")
def hashing(request):
r"""
Handles initialization, generation, and saving of answer test
result hashes.
"""
no_hash = request.config.getoption("--no-hash")
store_hash = request.config.getoption("--answer-store")
raw = request.config.getoption("--answer-raw-arrays")
raw_store = request.config.getoption("--raw-answer-store")
# This check is so that, when checking if the answer file exists in
# _get_answer_files, we don't continuously fail. With this check,
# _get_answer_files is called once per class, despite this having function
# scope
if request.cls.answer_file is None:
request.cls.answer_file, request.cls.raw_answer_file = _get_answer_files(
request
)
if not no_hash and not store_hash and request.cls.saved_hashes is None:
try:
with open(request.cls.answer_file) as fd:
request.cls.saved_hashes = yaml.safe_load(fd)
except FileNotFoundError:
module_filename = f"{request.function.__module__.replace('.', os.sep)}.py"
with open(f"generate_test_{os.getpid()}.txt", "a") as fp:
fp.write(f"{module_filename}::{request.cls.__name__}\n")
pytest.fail(msg="Answer file not found.", pytrace=False)
request.cls.hashes = {}
# Load the saved answers if we're comparing. We don't do this for the raw
# answers because those are huge
yield
# Get arguments and their values passed to the test (e.g., axis, field, etc.)
params = _param_list(request)
# Hash the test results. Don't save to request.cls.hashes so we still have
# raw data, in case we want to work with that
hashes = _hash_results(request.cls.hashes)
# Add the other test parameters
hashes.update(params)
# Add the function name as the "master" key to the hashes dict
hashes = {request.node.name: hashes}
# Save hashes
if not no_hash and store_hash:
_save_result(hashes, request.cls.answer_file)
# Compare hashes
elif not no_hash and not store_hash:
try:
for test_name, test_hash in hashes.items():
assert test_name in request.cls.saved_hashes
assert test_hash == request.cls.saved_hashes[test_name]
except AssertionError:
pytest.fail(f"Comparison failure: {request.node.name}", pytrace=False)
# Save raw data
if raw and raw_store:
_save_raw_arrays(
request.cls.hashes, request.cls.raw_answer_file, request.node.name
)
# Compare raw data. This is done one test at a time because the
# arrays can get quite large and storing everything in memory would
# be bad
if raw and not raw_store:
_compare_raw_arrays(
request.cls.hashes, request.cls.raw_answer_file, request.node.name
)
@pytest.fixture(scope="function")
def temp_dir():
r"""
Creates a temporary directory needed by certain tests.
"""
curdir = os.getcwd()
if int(os.environ.get("GENERATE_YTDATA", 0)):
tmpdir = os.getcwd()
else:
tmpdir = tempfile.mkdtemp()
os.chdir(tmpdir)
yield tmpdir
os.chdir(curdir)
if tmpdir != curdir:
shutil.rmtree(tmpdir)
@pytest.fixture(scope="class")
def ds(request):
# data_dir_load can take the cls, args, and kwargs. These optional
# arguments, if present, are given in a dictionary as the second
# element of the list
if isinstance(request.param, str):
ds_fn = request.param
opts = {}
else:
ds_fn, opts = request.param
try:
return data_dir_load(
ds_fn, cls=opts.get("cls"), args=opts.get("args"), kwargs=opts.get("kwargs")
)
except FileNotFoundError:
return pytest.skip(f"Data file: `{request.param}` not found.")
@pytest.fixture(scope="class")
def field(request):
"""
Fixture for returning the field. Needed because indirect=True is
used for loading the datasets.
"""
return request.param
@pytest.fixture(scope="class")
def dobj(request):
"""
Fixture for returning the ds_obj. Needed because indirect=True is
used for loading the datasets.
"""
return request.param
@pytest.fixture(scope="class")
def axis(request):
"""
Fixture for returning the axis. Needed because indirect=True is
used for loading the datasets.
"""
return request.param
@pytest.fixture(scope="class")
def weight(request):
"""
Fixture for returning the weight_field. Needed because
indirect=True is used for loading the datasets.
"""
return request.param
@pytest.fixture(scope="class")
def ds_repr(request):
"""
Fixture for returning the string representation of a dataset.
Needed because indirect=True is used for loading the datasets.
"""
return request.param
@pytest.fixture(scope="class")
def Npart(request):
"""
Fixture for returning the number of particles in a dataset.
Needed because indirect=True is used for loading the datasets.
"""
return request.param
```
#### File: yt/_maintenance/backports.py
```python
import sys
if sys.version_info < (3, 8):
from _thread import RLock
from typing import List
GenericAlias = type(List[int])
_NOT_FOUND = object()
class cached_property:
def __init__(self, func):
self.func = func
self.attrname = None
self.__doc__ = func.__doc__
self.lock = RLock()
def __set_name__(self, owner, name):
if self.attrname is None:
self.attrname = name
elif name != self.attrname:
raise TypeError(
"Cannot assign the same cached_property to two different names "
f"({self.attrname!r} and {name!r})."
)
def __get__(self, instance, owner=None):
if instance is None:
return self
if self.attrname is None:
raise TypeError(
"Cannot use cached_property instance without calling __set_name__ on it."
)
try:
cache = instance.__dict__
except AttributeError: # not all objects have __dict__ (e.g. class defines slots)
msg = (
f"No '__dict__' attribute on {type(instance).__name__!r} "
f"instance to cache {self.attrname!r} property."
)
raise TypeError(msg) from None
val = cache.get(self.attrname, _NOT_FOUND)
if val is _NOT_FOUND:
with self.lock:
# check if another thread filled cache while we awaited lock
val = cache.get(self.attrname, _NOT_FOUND)
if val is _NOT_FOUND:
val = self.func(instance)
try:
cache[self.attrname] = val
except TypeError:
msg = (
f"The '__dict__' attribute on {type(instance).__name__!r} instance "
f"does not support item assignment for caching {self.attrname!r} property."
)
raise TypeError(msg) from None
return val
__class_getitem__ = classmethod(GenericAlias)
else:
pass
```
#### File: yt/tests/test_load_sample.py
```python
import logging
import os
import re
import sys
import textwrap
import numpy as np
import pytest
from yt.config import ytcfg
from yt.loaders import load_sample
from yt.sample_data.api import get_data_registry_table
from yt.testing import requires_module_pytest
from yt.utilities.logger import ytLogger
@pytest.fixture()
def tmp_data_dir(tmp_path):
pre_test_data_dir = ytcfg["yt", "test_data_dir"]
ytcfg.set("yt", "test_data_dir", str(tmp_path))
yield tmp_path
ytcfg.set("yt", "test_data_dir", pre_test_data_dir)
@pytest.fixture()
def capturable_logger(caplog):
"""
This set the minimal conditions to make pytest's caplog fixture usable.
"""
propagate = ytLogger.propagate
ytLogger.propagate = True
with caplog.at_level(logging.INFO):
yield
ytLogger.propagate = propagate
@requires_module_pytest("pandas", "pooch")
@pytest.mark.usefixtures("capturable_logger")
@pytest.mark.parametrize(
"fn, archive, exact_loc, class_",
[
(
"ToroShockTube",
"ToroShockTube.tar.gz",
"ToroShockTube/DD0001/data0001",
"EnzoDataset",
),
(
"KeplerianDisk",
"KeplerianDisk.tar.gz",
"KeplerianDisk/disk.out1.00000.athdf",
"AthenaPPDataset",
),
# trying with an exact name as well
(
"KeplerianDisk/disk.out1.00000.athdf",
"KeplerianDisk.tar.gz",
"KeplerianDisk/disk.out1.00000.athdf",
"AthenaPPDataset",
),
# check this special case because it relies on implementations
# details in the AMRVAC frontend (using parfiles)
# and could easily fail to load. See GH PR #3343
(
"rmi_dust_2d",
"rmi_dust_2d.tar.gz",
"rmi_dust_2d/output0001.dat",
"AMRVACDataset",
),
],
)
def test_load_sample_small_dataset(
fn, archive, exact_loc, class_: str, tmp_data_dir, caplog
):
ds = load_sample(fn, progressbar=False, timeout=30)
assert type(ds).__name__ == class_
text = textwrap.dedent(
f"""
'{fn.replace('/', os.path.sep)}' is not available locally. Looking up online.
Downloading from https://yt-project.org/data/{archive}
Untaring downloaded file to '{str(tmp_data_dir)}'
"""
).strip("\n")
expected = [("yt", 20, message) for message in text.split("\n")]
assert caplog.record_tuples[:3] == expected
caplog.clear()
# loading a second time should not result in a download request
ds2 = load_sample(fn)
assert type(ds2).__name__ == class_
assert caplog.record_tuples[0] == (
"yt",
20,
f"Sample dataset found in '{os.path.join(str(tmp_data_dir), *exact_loc.split('/'))}'",
)
@requires_module_pytest("pandas", "pooch")
@pytest.mark.usefixtures("capturable_logger")
def test_load_sample_timeout(tmp_data_dir, caplog):
from requests.exceptions import ConnectTimeout, ReadTimeout
# note that requests is a direct dependency to pooch,
# so we don't need to mark it in a decorator.
with pytest.raises((ConnectTimeout, ReadTimeout)):
load_sample("IsolatedGalaxy", progressbar=False, timeout=0.00001)
@requires_module_pytest("pandas", "requests")
@pytest.mark.xfail(reason="Registry is currently incomplete.")
def test_registry_integrity():
reg = get_data_registry_table()
assert not any(reg.isna())
@pytest.fixture()
def sound_subreg():
# this selection is needed because the full dataset is incomplete
# so we test only the values that can be parsed
reg = get_data_registry_table()
return reg[["size", "byte_size"]][reg["size"].notna()]
@requires_module_pytest("pandas", "requests")
def test_registry_byte_size_dtype(sound_subreg):
from pandas import Int64Dtype
assert sound_subreg["byte_size"].dtype == Int64Dtype()
@requires_module_pytest("pandas", "requests")
def test_registry_byte_size_sign(sound_subreg):
np.testing.assert_array_less(0, sound_subreg["byte_size"])
@requires_module_pytest("pandas", "requests")
def test_unknown_filename():
fake_name = "these_are_not_the_files_your_looking_for"
with pytest.raises(ValueError, match=f"'{fake_name}' is not an available dataset."):
load_sample(fake_name)
@requires_module_pytest("pandas", "requests")
def test_typo_filename():
wrong_name = "Isolatedgalaxy"
with pytest.raises(
ValueError,
match=re.escape(
f"'{wrong_name}' is not an available dataset. Did you mean 'IsolatedGalaxy' ?"
),
):
load_sample(wrong_name, timeout=1)
@pytest.fixture()
def fake_data_dir_in_a_vaccum(tmp_path):
pre_test_data_dir = ytcfg["yt", "test_data_dir"]
ytcfg.set("yt", "test_data_dir", "/does/not/exist")
origin = os.getcwd()
os.chdir(tmp_path)
yield
ytcfg.set("yt", "test_data_dir", pre_test_data_dir)
os.chdir(origin)
@pytest.mark.skipif(
sys.platform.startswith("win"),
reason="can't figure out how to match the warning message in a cross-platform way",
)
@requires_module_pytest("pandas", "pooch")
@pytest.mark.usefixtures("fake_data_dir_in_a_vaccum")
def test_data_dir_broken():
# check that load_sample still works if the test_data_dir
# isn't properly set, in which case we should dl to the
# cwd and issue a warning.
msg = (
r"Storage directory from yt config doesn't exist "
r"\(currently set to '/does/not/exist'\)\. "
r"Current working directory will be used instead\."
)
with pytest.warns(UserWarning, match=msg):
load_sample("ToroShockTube")
```
#### File: visualization/tests/test_line_annotation_unit.py
```python
import numpy as np
from yt.loaders import load_uniform_grid
from yt.visualization.plot_window import ProjectionPlot
def test_ds_arr_invariance_under_projection_plot(tmp_path):
data_array = np.random.random((10, 10, 10))
bbox = np.array([[-100, 100], [-100, 100], [-100, 100]])
data = {("gas", "density"): (data_array, "g*cm**(-3)")}
ds = load_uniform_grid(data, data_array.shape, length_unit="kpc", bbox=bbox)
start_source = np.array((0, 0, -0.5))
end_source = np.array((0, 0, 0.5))
start = ds.arr(start_source, "unitary")
end = ds.arr(end_source, "unitary")
start_i = start.copy()
end_i = end.copy()
p = ProjectionPlot(ds, 0, "number_density")
p.annotate_line(start, end)
p.save(tmp_path)
# for lack of a unyt.testing.assert_unit_array_equal function
np.testing.assert_array_equal(start_i, start)
assert start_i.units == start.units
np.testing.assert_array_equal(end_i, end)
assert end_i.units == end.units
```
|
{
"source": "jcourt562/aries-cloudagent-python",
"score": 2
}
|
#### File: v1_0/models/credential_exchange.py
```python
from typing import Any
from marshmallow import fields, validate
from .....config.injection_context import InjectionContext
from .....messaging.models.base_record import BaseExchangeRecord, BaseExchangeSchema
from .....messaging.valid import INDY_CRED_DEF_ID, INDY_SCHEMA_ID, UUIDFour
class V10CredentialExchange(BaseExchangeRecord):
"""Represents an Aries#0036 credential exchange."""
class Meta:
"""CredentialExchange metadata."""
schema_class = "V10CredentialExchangeSchema"
RECORD_TYPE = "credential_exchange_v10"
RECORD_ID_NAME = "credential_exchange_id"
WEBHOOK_TOPIC = "issue_credential"
TAG_NAMES = {"thread_id"}
INITIATOR_SELF = "self"
INITIATOR_EXTERNAL = "external"
ROLE_ISSUER = "issuer"
ROLE_HOLDER = "holder"
STATE_PROPOSAL_SENT = "proposal_sent"
STATE_PROPOSAL_RECEIVED = "proposal_received"
STATE_OFFER_SENT = "offer_sent"
STATE_OFFER_RECEIVED = "offer_received"
STATE_REQUEST_SENT = "request_sent"
STATE_REQUEST_RECEIVED = "request_received"
STATE_ISSUED = "credential_issued"
STATE_CREDENTIAL_RECEIVED = "credential_received"
STATE_ACKED = "credential_acked"
def __init__(
self,
*,
credential_exchange_id: str = None,
connection_id: str = None,
thread_id: str = None,
parent_thread_id: str = None,
initiator: str = None,
role: str = None,
state: str = None,
credential_definition_id: str = None,
schema_id: str = None,
credential_proposal_dict: dict = None, # serialized credential proposal message
credential_offer: dict = None, # indy credential offer
credential_request: dict = None, # indy credential request
credential_request_metadata: dict = None,
credential_id: str = None,
raw_credential: dict = None, # indy credential as received
credential: dict = None, # indy credential as stored
revoc_reg_id: str = None,
revocation_id: str = None,
auto_offer: bool = False,
auto_issue: bool = False,
auto_remove: bool = True,
error_msg: str = None,
trace: bool = False,
**kwargs,
):
"""Initialize a new V10CredentialExchange."""
super().__init__(credential_exchange_id, state, trace=trace, **kwargs)
self._id = credential_exchange_id
self.connection_id = connection_id
self.thread_id = thread_id
self.parent_thread_id = parent_thread_id
self.initiator = initiator
self.role = role
self.state = state
self.credential_definition_id = credential_definition_id
self.schema_id = schema_id
self.credential_proposal_dict = credential_proposal_dict
self.credential_offer = credential_offer
self.credential_request = credential_request
self.credential_request_metadata = credential_request_metadata
self.credential_id = credential_id
self.raw_credential = raw_credential
self.credential = credential
self.revoc_reg_id = revoc_reg_id
self.revocation_id = revocation_id
self.auto_offer = auto_offer
self.auto_issue = auto_issue
self.auto_remove = auto_remove
self.error_msg = error_msg
self.trace = trace
@property
def credential_exchange_id(self) -> str:
"""Accessor for the ID associated with this exchange."""
return self._id
@property
def record_value(self) -> dict:
"""Accessor for the JSON record value generated for this credential exchange."""
return {
prop: getattr(self, prop)
for prop in (
"connection_id",
"credential_proposal_dict",
"credential_offer",
"credential_request",
"credential_request_metadata",
"error_msg",
"auto_offer",
"auto_issue",
"auto_remove",
"raw_credential",
"credential",
"parent_thread_id",
"initiator",
"credential_definition_id",
"schema_id",
"credential_id",
"revoc_reg_id",
"revocation_id",
"role",
"state",
"trace",
)
}
@classmethod
async def retrieve_by_connection_and_thread(
cls, context: InjectionContext, connection_id: str, thread_id: str
) -> "V10CredentialExchange":
"""Retrieve a credential exchange record by connection and thread ID."""
cache_key = f"credential_exchange_ctidx::{connection_id}::{thread_id}"
record_id = await cls.get_cached_key(context, cache_key)
if record_id:
record = await cls.retrieve_by_id(context, record_id)
else:
record = await cls.retrieve_by_tag_filter(
context, {"thread_id": thread_id}, {"connection_id": connection_id}
)
await cls.set_cached_key(context, cache_key, record.credential_exchange_id)
return record
def __eq__(self, other: Any) -> bool:
"""Comparison between records."""
return super().__eq__(other)
class V10CredentialExchangeSchema(BaseExchangeSchema):
"""Schema to allow serialization/deserialization of credential exchange records."""
class Meta:
"""V10CredentialExchangeSchema metadata."""
model_class = V10CredentialExchange
credential_exchange_id = fields.Str(
required=False,
description="Credential exchange identifier",
example=UUIDFour.EXAMPLE,
)
connection_id = fields.Str(
required=False, description="Connection identifier", example=UUIDFour.EXAMPLE
)
thread_id = fields.Str(
required=False, description="Thread identifier", example=UUIDFour.EXAMPLE
)
parent_thread_id = fields.Str(
required=False, description="Parent thread identifier", example=UUIDFour.EXAMPLE
)
initiator = fields.Str(
required=False,
description="Issue-credential exchange initiator: self or external",
example=V10CredentialExchange.INITIATOR_SELF,
validate=validate.OneOf(["self", "external"]),
)
role = fields.Str(
required=False,
description="Issue-credential exchange role: holder or issuer",
example=V10CredentialExchange.ROLE_ISSUER,
validate=validate.OneOf(["holder", "issuer"]),
)
state = fields.Str(
required=False,
description="Issue-credential exchange state",
example=V10CredentialExchange.STATE_ACKED,
)
credential_definition_id = fields.Str(
required=False,
description="Credential definition identifier",
**INDY_CRED_DEF_ID,
)
schema_id = fields.Str(
required=False, description="Schema identifier", **INDY_SCHEMA_ID
)
credential_proposal_dict = fields.Dict(
required=False, description="Serialized credential proposal message"
)
credential_offer = fields.Dict(
required=False, description="(Indy) credential offer"
)
credential_request = fields.Dict(
required=False, description="(Indy) credential request"
)
credential_request_metadata = fields.Dict(
required=False, description="(Indy) credential request metadata"
)
credential_id = fields.Str(
required=False, description="Credential identifier", example=UUIDFour.EXAMPLE
)
raw_credential = fields.Dict(
required=False,
description="Credential as received, prior to storage in holder wallet",
)
credential = fields.Dict(required=False, description="Credential as stored")
auto_offer = fields.Bool(
required=False,
description="Holder choice to accept offer in this credential exchange",
example=False,
)
auto_issue = fields.Bool(
required=False,
description="Issuer choice to issue to request in this credential exchange",
example=False,
)
auto_remove = fields.Bool(
required=False,
default=True,
description=(
"Issuer choice to remove this credential exchange record when complete"
),
example=False,
)
error_msg = fields.Str(
required=False,
description="Error message",
example="credential definition identifier is not set in proposal",
)
revoc_reg_id = fields.Str(
required=False, description="Revocation registry identifier"
)
revocation_id = fields.Str(
required=False, description="Credential identifier within revocation registry"
)
```
#### File: v1_0/models/route_record.py
```python
from marshmallow import fields
from aries_cloudagent.messaging.models.base import BaseModel, BaseModelSchema
class RouteRecord(BaseModel):
"""Class representing stored route information."""
class Meta:
"""RouteRecord metadata."""
schema_class = "RouteRecordSchema"
def __init__(
self,
*,
record_id: str = None,
connection_id: str = None,
recipient_key: str = None,
created_at: str = None,
updated_at: str = None,
**kwargs
):
"""
Initialize a RouteRecord instance.
Args:
recipient_key: The recipient verkey of the route
"""
super(RouteRecord, self).__init__(**kwargs)
self.record_id = record_id
self.connection_id = connection_id
self.recipient_key = recipient_key
self.created_at = created_at
self.updated_at = updated_at
class RouteRecordSchema(BaseModelSchema):
"""RouteRecord schema."""
class Meta:
"""RouteRecordSchema metadata."""
model_class = "RouteRecord"
record_id = fields.Str(required=False)
connection_id = fields.Str(required=True)
recipient_key = fields.Str(required=True)
created_at = fields.Str(required=False)
updated_at = fields.Str(required=False)
```
#### File: v1_0/tests/test_routes.py
```python
import json
import pytest
from asynctest import TestCase as AsyncTestCase
from asynctest import mock as async_mock
from .. import routes as test_module
class TestHolderRoutes(AsyncTestCase):
def setUp(self):
self.context = async_mock.MagicMock()
self.app = {
"request_context": self.context,
"outbound_message_router": async_mock.CoroutineMock(),
}
async def test_connections_send_ping(self):
request = async_mock.MagicMock(
app=self.app,
json=async_mock.CoroutineMock(return_value={"comment": "some comment"}),
match_info={"conn_id": "dummy"},
)
with async_mock.patch.object(
test_module.ConnectionRecord, "retrieve_by_id", async_mock.CoroutineMock()
) as mock_retrieve, async_mock.patch.object(
test_module, "Ping", async_mock.MagicMock()
) as mock_ping, async_mock.patch.object(
test_module.web, "json_response", async_mock.MagicMock()
) as json_response:
mock_ping.return_value = async_mock.MagicMock(_thread_id="dummy")
mock_retrieve.return_value = async_mock.MagicMock(is_ready=True)
result = await test_module.connections_send_ping(request)
json_response.assert_called_once_with({"thread_id": "dummy"})
assert result is json_response.return_value
async def test_connections_send_ping_no_conn(self):
request = async_mock.MagicMock(
app=self.app,
json=async_mock.CoroutineMock(return_value={"comment": "some comment"}),
match_info={"conn_id": "dummy"},
)
with async_mock.patch.object(
test_module.ConnectionRecord, "retrieve_by_id", async_mock.CoroutineMock()
) as mock_retrieve, async_mock.patch.object(
test_module.web, "json_response", async_mock.MagicMock()
) as json_response:
mock_retrieve.side_effect = test_module.StorageNotFoundError()
with self.assertRaises(test_module.web.HTTPNotFound):
await test_module.connections_send_ping(request)
async def test_connections_send_ping_not_ready(self):
request = async_mock.MagicMock(
app=self.app,
json=async_mock.CoroutineMock(return_value={"comment": "some comment"}),
match_info={"conn_id": "dummy"},
)
with async_mock.patch.object(
test_module.ConnectionRecord, "retrieve_by_id", async_mock.CoroutineMock()
) as mock_retrieve, async_mock.patch.object(
test_module.web, "json_response", async_mock.MagicMock()
) as json_response:
mock_retrieve.return_value = async_mock.MagicMock(is_ready=False)
with self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.connections_send_ping(request)
async def test_register(self):
mock_app = async_mock.MagicMock()
mock_app.add_routes = async_mock.MagicMock()
await test_module.register(mock_app)
mock_app.add_routes.assert_called_once()
```
|
{
"source": "jcox-dev/django-localflavor",
"score": 3
}
|
#### File: localflavor/is_/forms.py
```python
from __future__ import unicode_literals
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import RegexField
from django.forms.widgets import Select
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
from localflavor.generic.forms import DeprecatedPhoneNumberFormFieldMixin
from .is_postalcodes import IS_POSTALCODES
class ISIdNumberField(RegexField):
"""
Icelandic identification number (kennitala).
This is a number every citizen of Iceland has.
"""
default_error_messages = {
'invalid': _('Enter a valid Icelandic identification number. The format is XXXXXX-XXXX.'),
'checksum': _('The Icelandic identification number is not valid.'),
}
def __init__(self, max_length=11, min_length=10, *args, **kwargs):
super(ISIdNumberField, self).__init__(r'^\d{6}(-| )?\d{4}$',
max_length, min_length, *args, **kwargs)
def clean(self, value):
value = super(ISIdNumberField, self).clean(value)
if value in EMPTY_VALUES:
return ''
value = self._canonify(value)
if self._validate(value):
return self._format(value)
else:
raise ValidationError(self.error_messages['checksum'])
def _canonify(self, value):
"""Returns the value as only digits."""
return value.replace('-', '').replace(' ', '')
def _validate(self, value):
"""
Takes in the value in canonical form and checks the verifier digit.
The method is modulo 11.
"""
check = [3, 2, 7, 6, 5, 4, 3, 2, 1, 0]
return sum([int(value[i]) * check[i] for i in range(10)]) % 11 == 0
def _format(self, value):
"""Takes in the value in canonical form and returns it in the common display format."""
return force_text(value[:6] + '-' + value[6:])
class ISPhoneNumberField(RegexField, DeprecatedPhoneNumberFormFieldMixin):
"""
Icelandic phone number.
Seven digits with an optional hyphen or space after the first three digits.
"""
def __init__(self, max_length=8, min_length=7, *args, **kwargs):
super(ISPhoneNumberField, self).__init__(r'^\d{3}(-| )?\d{4}$',
max_length, min_length, *args, **kwargs)
def clean(self, value):
value = super(ISPhoneNumberField, self).clean(value)
if value in EMPTY_VALUES:
return ''
return value.replace('-', '').replace(' ', '')
class ISPostalCodeSelect(Select):
"""A Select widget that uses a list of Icelandic postal codes as its choices."""
def __init__(self, attrs=None):
super(ISPostalCodeSelect, self).__init__(attrs, choices=IS_POSTALCODES)
```
|
{
"source": "jcpayne/aerial_survey_ai",
"score": 2
}
|
#### File: dev_packages/trident_dev/model.py
```python
__all__ = ['build_augmentation', 'RandomCorruption', 'TridentDatasetMapper', 'TileDatasetMapper', 'IterableTileDataset',
'build_train_loader', 'JTFMEvaluator', 'Trainer', 'JP_RepeatFactorTrainingSampler']
# Cell
import math
import random
import itertools
import numpy as np
import torch
from torch import nn
from torch.utils.data.distributed import DistributedSampler
from torchvision import transforms as transforms
import imgaug as ia
from imgaug import augmenters as iaa
import imagecorruptions
from adabound import AdaBound
from typing import List, Optional, Union
from collections import defaultdict
import math
import logging
#Detectron imports
import detectron2
from detectron2.config import configurable
from detectron2.data import transforms as T
from detectron2.data.transforms import TransformGen
from detectron2.data import DatasetCatalog, MetadataCatalog, DatasetMapper,build_detection_test_loader
from detectron2.data import DatasetFromList, MapDataset, detection_utils as utils, get_detection_dataset_dicts
from detectron2.data import transforms as T
from detectron2.data.samplers import InferenceSampler, RepeatFactorTrainingSampler
from detectron2.data.build import build_detection_test_loader, build_batch_data_loader, trivial_batch_collator
from detectron2.data.transforms import TransformGen
from fvcore.transforms.transform import (BlendTransform,NoOpTransform,Transform)
from detectron2.engine import DefaultTrainer
from detectron2.evaluation import DatasetEvaluator,DatasetEvaluators,COCOEvaluator
from detectron2.solver.build import *
#from detectron2.solver import build_optimizer
import detectron2.utils.comm as comm
# Cell
def build_augmentation(cfg, is_train):
"""Overrides the default method (in detection_utils.py) to add image corruption as an augmentation."""
#logger = logging.getLogger(__name__)
result = utils.build_augmentation(cfg, is_train) #Returns list[Augmentation(s)] containing default behavior
if is_train:
random_corruption = RandomCorruption(cfg.INPUT.corruption) #prob of adding corruption ([0,1])
result.append(random_corruption)
print("Random corruption augmentation used in training")
#logger.info("Random corruption augmentation used in training: " + str(random_corruption))
print(result)
return result
# Cell
class RandomCorruption(TransformGen):
"""
Randomly transforms image corruption using the 'imgaug' package
(which is only guaranteed to work for uint8 images).
Returns an Numpy ndarray.
"""
def __init__(self, p):
"""
Args:
p probability of applying corruption (p is on [0,1])
"""
super().__init__()
self._init(locals())
self.p = p
def get_transform(self, img):
r = random.random()
if(r <= self.p):
#A selection of effects from imgaug
#ia.seed(None)
severity = random.randint(1,5)
augmenter_list = [
iaa.BlendAlphaSimplexNoise(
foreground=iaa.EdgeDetect(alpha=(0.5, 0.9)),
background=iaa.LinearContrast((0.5, 0.2)),
per_channel=0.5),
iaa.CoarseDropout(p=0.25, size_px=8),
iaa.imgcorruptlike.GaussianBlur(severity),
iaa.imgcorruptlike.SpeckleNoise(severity),
iaa.Cutout(fill_mode="gaussian", fill_per_channel=True,nb_iterations=(1, 5), size=0.2, squared=False),
iaa.imgcorruptlike.Spatter(severity)]
#Blend noise with the source image
augmenter = random.choice(augmenter_list)
blended_img = augmenter.augment_image(img)
return BlendTransform(src_image=blended_img, src_weight=1, dst_weight=0)
else:
return(NoOpTransform())
# Cell
class TridentDatasetMapper(DatasetMapper):
"""
A customized version of DatasetMapper.
A callable which takes a dataset dict in Detectron2 Dataset format,
and maps it into a format used by the model.
"""
#The only change I made is to switch build_augmentation for utils.build_augmentation
@classmethod
def from_config(cls, cfg, is_train: bool = True):
augs = build_augmentation(cfg, is_train) #changed here
if cfg.INPUT.CROP.ENABLED and is_train:
augs.insert(0, T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE))
recompute_boxes = cfg.MODEL.MASK_ON
else:
recompute_boxes = False
ret = {
"is_train": is_train,
"augmentations": augs,
"image_format": cfg.INPUT.FORMAT,
"use_instance_mask": cfg.MODEL.MASK_ON,
"instance_mask_format": cfg.INPUT.MASK_FORMAT,
"use_keypoint": cfg.MODEL.KEYPOINT_ON,
"recompute_boxes": recompute_boxes,
}
if cfg.MODEL.KEYPOINT_ON:
ret["keypoint_hflip_indices"] = utils.create_keypoint_hflip_indices(cfg.DATASETS.TRAIN)
if cfg.MODEL.LOAD_PROPOSALS:
ret["precomputed_proposal_topk"] = (
cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN
if is_train
else cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST
)
return ret
# Cell
class TileDatasetMapper(DatasetMapper):
"""
Just like DatasetMapper except instead of opening the file, it is passed a dict
containing an already-opened file.
Note: the __init__ class is decorated with @configurable so you can pass
a cfgNode object and it will use the from_config() method for initiation.
1. Accept an opened image as a Pytorch array
2. Potentially applies cropping/geometric transforms to the image and annotations
"""
@configurable
def __init__(
self,
is_train: bool,
*,
augmentations: List[Union[T.Augmentation, T.Transform]],
image_format: str,
use_instance_mask: bool = False,
use_keypoint: bool = False,
instance_mask_format: str = "polygon",
keypoint_hflip_indices: Optional[np.ndarray] = None,
precomputed_proposal_topk: Optional[int] = None,
recompute_boxes: bool = False
):
if recompute_boxes:
assert use_instance_mask, "recompute_boxes requires instance masks"
# fmt: off
self.is_train = is_train
self.augmentations = augmentations
self.image_format = image_format
self.use_instance_mask = use_instance_mask
self.instance_mask_format = instance_mask_format
self.use_keypoint = use_keypoint
self.keypoint_hflip_indices = keypoint_hflip_indices
self.proposal_topk = precomputed_proposal_topk
self.recompute_boxes = recompute_boxes
# fmt: on
logger = logging.getLogger(__name__)
logger.info("Augmentations used in training: " + str(augmentations))
@classmethod
def from_config(cls, cfg, is_train: bool = False):
augs = utils.build_augmentation(cfg, is_train) #returns T.ResizeShortestEdge, plus optionally others
if cfg.INPUT.CROP.ENABLED and is_train:
augs.insert(0, T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE))
recompute_boxes = cfg.MODEL.MASK_ON
else:
recompute_boxes = False
ret = {
"is_train": is_train,
"augmentations": augs,
"image_format": cfg.INPUT.FORMAT,
"use_instance_mask": cfg.MODEL.MASK_ON,
"instance_mask_format": cfg.INPUT.MASK_FORMAT,
"use_keypoint": cfg.MODEL.KEYPOINT_ON,
"recompute_boxes": recompute_boxes,
}
return ret
def __call__(self, dataset_dict):
"""
Args:
dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
Returns:
dict: a format that builtin models in detectron2 accept
"""
# dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
# # USER: Write your own image loading if it's not from a file
# # We just use the image that is passed in, instead of opening the file and converting
# #The returned dict has 2 things in it: file_name and image (which contains a tensor). That's all.
# image = dataset_dict["image"]
# image_shape = image.shape[1:] # h, w
if not self.is_train:
return dataset_dict
# Cell
class IterableTileDataset(torch.utils.data.IterableDataset):
"""
An IterableDataset for passing arbitrary-length data to a Dataloader. The
iter() method ensures that data is not duplicated if there is more than one worker, by splitting
the data into separate pieces and only passing each worker one piece to iterate.
"""
def __init__(self, tilelist):
super(IterableTileDataset).__init__() #The init method of IterableDataset
self.data = tilelist
self.start = 0
self.end = len(self.data)
def reset_dataset(self, newdataset):
self.data = newdataset
self.start = 0
self.end = len(newdataset)
def __iter__(self):
worker_info = torch.utils.data.get_worker_info()
print("IterableDataset worker_info.id",worker_info.id)
if worker_info is None: # single-process data loading, return the full iterator
indexes = range(self.start,self.end)
else: # in a worker process
# split workload
per_worker = int(math.ceil((self.end - self.start) / float(worker_info.num_workers)))
worker_id = worker_info.id
# print()
iter_start = self.start + worker_id * per_worker
iter_end = min(iter_start + per_worker, self.end)
indexes = range(iter_start, iter_end)
itemlist = [self.data[t] for t in indexes]
#print("worker_id:",worker_id,"items:",itemlist) #very useful for debugging
return iter(itemlist)
def __len__(self):
return len(self.data)
def __getitem__(self,idx):
return self.data[idx]
# Cell
# class IterableTileDataset(torch.utils.data.IterableDataset):
# """
# An IterableDataset for passing arbitrary-length data to a Dataloader. The
# iter() method ensures that data is not duplicated if there is more than one worker, by splitting
# the data into separate pieces and only passing each worker one piece to iterate.
# """
# def __init__(self, tilelist):
# super(IterableTileDataset).__init__() #The init method of IterableDataset
# self.data = tilelist
# self.start = 0
# self.end = len(self.data)
# def reset_dataset(self, newdataset):
# self.data = newdataset
# self.start = 0
# self.end = len(newdataset)
# def __iter__(self):
# indexes = range(self.start,self.end)
# itemlist = [self.data[t] for t in indexes]
# return iter(itemlist)
# #return iter(range(self.start, self.end))
# def __len__(self):
# return len(self.data)
# def __getitem__(self,idx):
# return self.data[idx]
# #NOTE: This function must live outside the dataset class (not here!)
# def worker_init_fn(worker_id):
# worker_info = torch.utils.data.get_worker_info()
# print("worker_info.id",worker_info.id,"worker_info.num_workers",worker_info.num_workers)
# dataset = worker_info.dataset # the dataset copy in this worker process
# overall_start = dataset.start
# overall_end = dataset.end
# # configure the dataset to only process the split workload
# per_worker = int(math.ceil((overall_end - overall_start) / float(worker_info.num_workers)))
# worker_id = worker_info.id
# dataset.start = overall_start + worker_id * per_worker
# dataset.end = min(dataset.start + per_worker, overall_end)
# Cell
def build_train_loader(cfg, mapper=None):
"""
This is an edited version of build_detection_train_loader; it adds JP_RepeatFactorTrainingSampler,
which fixes a bug in RepeatFactorTrainingSampler that caused it to crash when it encountered empty images.
A data loader is created by the following steps:
1. Use the dataset names in config to query :class:`DatasetCatalog`, and obtain a list of dicts.
2. Coordinate a random shuffle order shared among all processes (all GPUs)
3. Each process spawn another few workers to process the dicts. Each worker will:
* Map each metadata dict into another format to be consumed by the model.
* Batch them by simply putting dicts into a list.
The batched ``list[mapped_dict]`` is what this dataloader will yield.
Args:
cfg (CfgNode): the config
mapper (callable): a callable which takes a sample (dict) from dataset and
returns the format to be consumed by the model.
By default it will be ``DatasetMapper(cfg, True)``.
Returns:
an infinite iterator of training data
"""
dataset_dicts = get_detection_dataset_dicts(
cfg.DATASETS.TRAIN,
filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS,
min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE
if cfg.MODEL.KEYPOINT_ON
else 0,
proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None,
)
dataset = DatasetFromList(dataset_dicts, copy=False)
if mapper is None:
mapper = DatasetMapper(cfg, True)
dataset = MapDataset(dataset, mapper)
sampler_name = cfg.DATALOADER.SAMPLER_TRAIN
logger = logging.getLogger(__name__)
logger.info("Using training sampler {}".format(sampler_name))
# TODO avoid if-else?
if sampler_name == "TrainingSampler":
sampler = TrainingSampler(len(dataset))
elif sampler_name == "RepeatFactorTrainingSampler":
repeat_factors = RepeatFactorTrainingSampler.repeat_factors_from_category_frequency(
dataset_dicts, cfg.DATALOADER.REPEAT_THRESHOLD
)
sampler = RepeatFactorTrainingSampler(repeat_factors)
elif sampler_name == "JP_RepeatFactorTrainingSampler":
repeat_factors = JP_RepeatFactorTrainingSampler.repeat_factors_from_category_frequency(
dataset_dicts, cfg.DATALOADER.REPEAT_THRESHOLD
)
sampler = RepeatFactorTrainingSampler(repeat_factors)
else:
raise ValueError("Unknown training sampler: {}".format(sampler_name))
return build_batch_data_loader(
dataset,
sampler,
cfg.SOLVER.IMS_PER_BATCH,
aspect_ratio_grouping=cfg.DATALOADER.ASPECT_RATIO_GROUPING,
num_workers=cfg.DATALOADER.NUM_WORKERS,
)
# Cell
class JTFMEvaluator(DatasetEvaluator):
"""
A dead-simple, Just The Facts, Ma'am evaluator. Just compiles and returns results.
The `process()` method evaluates each iteration; the `evaluate()` method summarizes
at the end.
"""
def __init__(self, distributed, output_dir=None):
"""
Arguments:
distributed (T/F): if True, will collect results from all ranks and run evaluation
in the main process. Otherwise, will evaluate the results in the current process.
output_dir (str): optional, an output directory to dump all
results predicted on the dataset [not implemented at present]:
"""
self._tasks = ("bbox",)
self._distributed = distributed
self._output_dir = output_dir
self._cpu_device = torch.device("cpu")
self._logger = logging.getLogger(__name__)
print("Evaluator: JTFMEvaluator")
def reset(self):
self._predictions = []
def process(self, inputs, outputs):
"""
Args:
inputs: the inputs to a model (e.g., GeneralizedRCNN).
It is a list of dicts. Each dict corresponds to an image and
contains keys like "height", "width", "file_name", "image_id".
outputs: the outputs of a model. It is a list of dicts with key
"instances" that contains :class:`Instances`.
"""
for input, output in zip(inputs, outputs):
#Copy some input values to the output
prediction = {"image_id":input["image_id"], "trow":input["trow"], "tcol":input["tcol"]}
# TODO this is ugly
if "instances" in output:
instances = output["instances"].to(self._cpu_device)
prediction["instances"] = instances
else:
prediction["instances"] = []
self._predictions.append(prediction)
def evaluate(self):
if self._distributed:
comm.synchronize()
predictions = comm.gather(self._predictions, dst=0)
predictions = list(itertools.chain(*predictions)) #Necessary. I think it joins a set of lists into one
if not comm.is_main_process():
return {}
else:
predictions = self._predictions
if len(predictions) == 0:
self._logger.warning("[COCOEvaluator] Did not receive valid predictions.")
return {}
return {"Predictions":predictions}
def _eval_predictions(self, tasks, predictions):
"""
Evaluate predictions on the given tasks.
Fill self._results with the metrics of the tasks.
"""
pass
# Cell
class Trainer(DefaultTrainer):
"""Customized version of DefaultTrainer, which enables augmentation to be added via
a custom DatasetMapper during training. It also optionally uses the Adabound optimizer,
and it calls build_train_loader, which adds JP_RepeatFactorTrainingSampler.
"""
#For testing, we don't use augmentation (but see detectron2/tools/train_net.py to add test-time augmentation)
# @classmethod
# def build_inference_loader(cls, cfg, dataset_name):
# return build_test_loader(cfg, dataset_name, mapper=TileDatasetMapper(cfg,False))
#For training we add image augmentation (=corruption of several kinds)
@classmethod
def build_train_loader(cls, cfg):
return build_train_loader(cfg, mapper=TridentDatasetMapper(cfg, True))
@classmethod
def build_test_loader(cls, cfg, dataset_name, dataset_dicts, mapper):
return build_inference_loader(cfg, dataset_name, dataset_dicts, mapper=TileDatasetMapper)
@classmethod
def build_optimizer(cls, cfg, model):
"""
Builds either a default SGD optimizer or an Adabound optimizer,
depending on what is specified in configs (cfg.SOLVER.OPTIMIZER_TYPE).
Returns:
torch.optim.Optimizer:
"""
if cfg.SOLVER.OPTIMIZER_TYPE == 'Adabound':
print("Building Adabound optimizer")
return build_adabound_optimizer(cfg, model)
else:
print("Building default SGD optimizer")
return build_optimizer(cfg, model) #This is an instance method, called by the class method.
#Add this method to use a custom optimizer (but how does it interact with momentum, weight decay?)
def build_adabound_optimizer(cls, cfg, model):
"""
Build an optimizer from config. This is built on`detectron2.solver.build_optimizer`,
but it returns an AdaBound optimizer instead of SGD.
"""
norm_module_types = (
nn.BatchNorm1d,
nn.BatchNorm2d,
nn.BatchNorm3d,
nn.SyncBatchNorm,
# NaiveSyncBatchNorm inherits from BatchNorm2d
nn.GroupNorm,
nn.InstanceNorm1d,
nn.InstanceNorm2d,
nn.InstanceNorm3d,
nn.LayerNorm,
nn.LocalResponseNorm,
)
params: List[Dict[str, Any]] = []
memo: Set[nn.parameter.Parameter] = set()
for module in model.modules():
for key, value in module.named_parameters(recurse=False):
if not value.requires_grad:
continue
# Avoid duplicating parameters
if value in memo:
continue
memo.add(value)
lr = cfg.SOLVER.BASE_LR
weight_decay = cfg.SOLVER.WEIGHT_DECAY
if isinstance(module, norm_module_types):
weight_decay = cfg.SOLVER.WEIGHT_DECAY_NORM
elif key == "bias":
# NOTE: unlike Detectron v1, we now default BIAS_LR_FACTOR to 1.0
# and WEIGHT_DECAY_BIAS to WEIGHT_DECAY so that bias optimizer
# hyperparameters are by default exactly the same as for regular
# weights.
lr = cfg.SOLVER.BASE_LR * cfg.SOLVER.BIAS_LR_FACTOR
weight_decay = cfg.SOLVER.WEIGHT_DECAY_BIAS
params += [{"params": [value], "lr": lr, "weight_decay": weight_decay}]
#optimizer = adabound.AdaBound(model.parameters(), lr=1e-3, final_lr=0.1) #orig
#print('LR before optimizer: ',cfg.SOLVER.BASE_LR)
optimizer = AdaBound(params, lr=cfg.SOLVER.BASE_LR, final_lr=0.05)
optimizer = maybe_add_gradient_clipping(cfg, optimizer)
#print('LR after optimizer: ',cfg.SOLVER.BASE_LR)
return optimizer
# Cell
class JP_RepeatFactorTrainingSampler(RepeatFactorTrainingSampler):
#This is a bug fix (didn't handle empty images correctly)
@staticmethod
def repeat_factors_from_category_frequency(dataset_dicts, repeat_thresh):
"""
Compute (fractional) per-image repeat factors based on category frequency.
The repeat factor for an image is a function of the frequency of the rarest
category labeled in that image. The "frequency of category c" in [0, 1] is defined
as the fraction of images in the training set (without repeats) in which category c
appears.
See :paper:`lvis` (>= v2) Appendix B.2.
Args:
dataset_dicts (list[dict]): annotations in Detectron2 dataset format.
repeat_thresh (float): frequency threshold below which data is repeated.
If the frequency is half of `repeat_thresh`, the image will be
repeated twice.
Returns:
torch.Tensor: the i-th element is the repeat factor for the dataset image
at index i.
"""
# 1. For each category c, compute the fraction of images that contain it: f(c)
category_freq = defaultdict(int)
for dataset_dict in dataset_dicts: # For each image (without repeats)
cat_ids = {ann["category_id"] for ann in dataset_dict["annotations"]}
for cat_id in cat_ids:
category_freq[cat_id] += 1
num_images = len(dataset_dicts)
for k, v in category_freq.items():
category_freq[k] = v / num_images
# 2. For each category c, compute the category-level repeat factor:
# r(c) = max(1, sqrt(t / f(c)))
category_rep = {
cat_id: max(1.0, math.sqrt(repeat_thresh / cat_freq))
for cat_id, cat_freq in category_freq.items()
}
# 3. For each image I, compute the image-level repeat factor:
# r(I) = max_{c in I} r(c)
# In other words, if the image includes a rare category, you prioritize that and re-sample the image
# even if it also has common categories in it.
rep_factors = []
for dataset_dict in dataset_dicts:
#Assume empty images are plentiful (don't sample them more often)
if len(dataset_dict["annotations"]) == 0:
rep_factors.append(1)
else:
cat_ids = {ann["category_id"] for ann in dataset_dict["annotations"]}
rep_factor = max({category_rep[cat_id] for cat_id in cat_ids})
rep_factors.append(rep_factor)
return torch.tensor(rep_factors, dtype=torch.float32)
```
#### File: aerial_survey_ai/trident_project/distrib_run_and_inference.py
```python
import sys
sys.path.insert(1, '/') #The container root
print('Loading imports')
import argparse
import os
import time
import cv2
import torch
from torch.nn.parallel import DistributedDataParallel
from torch.utils.data.distributed import DistributedSampler
import numpy as np
import copy
from PIL import Image
import random
import logging
from pathlib import Path
import pandas as pd
import datetime
import logging
from collections import OrderedDict,Counter
from contextlib import contextmanager
#Detectron2 imports
from detectron2.utils.comm import get_world_size, is_main_process
from detectron2.utils.logger import log_every_n_seconds
from detectron2.evaluation import inference_context
from detectron2.config import get_cfg
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.data.build import build_detection_test_loader, trivial_batch_collator
from detectron2.data.common import DatasetFromList, MapDataset
from detectron2.utils.logger import setup_logger
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.engine import default_setup, launch
from detectron2.evaluation import inference_on_dataset,DatasetEvaluators
from detectron2.evaluation import DatasetEvaluator,COCOEvaluator
from detectron2.structures import Instances, Boxes
import detectron2.utils.comm as comm
from detectron2.utils.comm import get_world_size, get_rank, get_local_rank, is_main_process
#My code
from dev_packages.TridentNet.tridentnet import add_tridentnet_config
from trident_dev.detectron2_utilities import *
from trident_dev.tiling import *
from trident_dev.model import *
from trident_dev.utilities import *
#Azureml SDK
from azureml.core.run import Run
print("All imports passed")
print("Rank:",comm.get_rank())
logger = logging.getLogger("detectron2")
#WARNING: if you mis-format a value, it will be silently ignored (i.e., use 'key:value' instead of 'key=value')
def setup_model_configuration(rootdir, output_dir, CLASS_NAMES):
cfg = get_cfg()
add_tridentnet_config(cfg) #Loads a few values that distinguish TridentNet from Detectron2
#MODEL
cfg.MODEL.WEIGHTS = str(Path(rootdir)/"model_final.pth") #In blobstore
cfg.MODEL.MASK_ON = False
cfg.MODEL.RESNETS.DEPTH = 101
cfg.MODEL.ANCHOR_GENERATOR.SIZES = [[32, 64, 128, 256, 512]]
cfg.MODEL.BACKBONE.FREEZE_AT = 0 #There are 4 conv stages. 0 means unfreeze.
cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512 #(default is 512)
cfg.MODEL.ROI_HEADS.NUM_CLASSES = len(CLASS_NAMES) #
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7 # set threshold for this model
cfg.MODEL.DEVICE='cuda' #THIS IS FOR INFERENCE! Options: 'cuda' or 'cpu'
# Specify the test branch index TridentNet Fast inference:
# - use -1 to aggregate results of all branches during inference.
# - otherwise, only using specified branch for fast inference. Recommended setting is
# to use the middle branch.
cfg.MODEL.TRIDENT.TEST_BRANCH_IDX = 1
#SOLVER
#KEEP. Default Solver specs. (Note; 3x seems to require 3 times as many steps as 1x)
#Base-TridentNet-Fast-C4.yaml 60,000-80,000 steps and 90,000 max_iter.
#tridentnet_fast_R_50_C4_1x.yaml SAME
#tridentnet_fast_R_50_C4_3x.yaml 210,000 - 250,000 steps and 270,000 max_iter.
#tridentnet_fast_R_101_C4_3x.yaml SAME
cfg.SOLVER.LR_SCHEDULER_NAME = "WarmupMultiStepLR" #Options: 'WarmupCosineLR'
#You idiot! The steps are the places where the default "WarmupMultiStepLR" scheduler drops the learning rate by gamma=0.1.
#V3 TRAINING: A 4-GPU machine takes 1.41 s/img = ~25 mins/1000 tiles or 4 hours/10,000 tiles
#V3 INFERENCE a 4-GPU machine takes 0.05s/img x 54 tiles/img = ~47 minutes/1000 images.
cfg.SOLVER.MAX_ITER = 12000 # 270000 for trident by default
cfg.SOLVER.STEPS = (2000,3000,4000,5000,10000)#If a single value, then trailing comma is required (must be iterable) #(210000, 250000) for trident
cfg.SOLVER.GAMMA = 0.5
cfg.SOLVER.WARMUP_ITERS = 1000 #1000 is default
cfg.SOLVER.IMS_PER_BATCH = 3 #16 if using 4 GPUs
cfg.SOLVER.CHECKPOINT_PERIOD = 5000 #How often the model is written
#Learning Rates
#From Goyal et al. 2017: Linear Scaling Rule: When the minibatch size is multiplied by k,
#multiply the learning rate by k. i.e., as you increase the batch size because of using
#additional GPUs, increase the learning rate too. Works up to very large batches (8,000 images)
#See auto_scale_workers() in Detectron2 (very important!)
cfg.SOLVER.BASE_LR = 0.002 #LR is .001 in defaults.py but .02 in tridentnet, but they trained on 8 GPUs
cfg.SOLVER.OPTIMIZER_TYPE = "Default" # 'Default' or 'Adabound'
#INPUT
#Pixel means are from 19261 500x500 tiles on Aug 15 2020
cfg.INPUT.FORMAT = "RGB"
cfg.MODEL.PIXEL_MEAN = [143.078, 140.690, 120.606]
cfg.MODEL.PIXEL_STD = [34.139, 29.849, 31.695]
#Auugmentation. Add corruption to images with probability p
cfg.INPUT.corruption = 0.1
cfg.INPUT.MIN_SIZE_TRAIN = (640, 672, 704, 736, 768, 800)
cfg.INPUT.MIN_SIZE_TEST = 800
#DATASETS
cfg.DATASETS.TRAIN = ("survey_train",) #Why the comma? Bcs you can have multiple training datasets
cfg.DATASETS.TEST = ("survey_valid",)
#DATALOADER
cfg.DATALOADER.SAMPLER_TRAIN = "JP_RepeatFactorTrainingSampler"
cfg.DATALOADER.REPEAT_THRESHOLD = 0.02 #Goal is 400 images per category given 20,000 images total.
cfg.DATALOADER.NUM_WORKERS = 4 #Set to equal the number of GPUs.
# if True, the dataloader will filter out images that have no associated
# annotations at train time.
cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS = False
cfg.OUTPUT_DIR = output_dir
#WARNING. #freeze() makes the config immutable; hence it must come last
cfg.freeze()
return cfg
def jp_inference_on_dataset(model, data_loader, evaluator):
"""
Run model on the data_loader and evaluate the metrics with evaluator.
Also benchmark the inference speed of `model.forward` accurately.
The model will be used in eval mode.
Args:
model (nn.Module): a module which accepts an object from
`data_loader` and returns some outputs. It will be temporarily set to `eval` mode.
If you wish to evaluate a model in `training` mode instead, you can
wrap the given model and override its behavior of `.eval()` and `.train()`.
data_loader: an iterable object with a length.
The elements it generates will be the inputs to the model.
evaluator (DatasetEvaluator): the evaluator to run. Use `None` if you only want
to benchmark, but don't want to do any evaluation.
Returns:
The return value of `evaluator.evaluate()`
"""
num_devices = get_world_size()
logger = logging.getLogger(__name__)
logger.info("Start inference on {} images".format(len(data_loader)))
total = len(data_loader) # inference data loader must have a fixed length
if evaluator is None:
# create a no-op evaluator
evaluator = DatasetEvaluators([])
evaluator.reset()
num_warmup = min(5, total - 1)
start_time = time.perf_counter()
total_compute_time = 0
with inference_context(model), torch.no_grad():
for idx, inputs in enumerate(data_loader):
print("rank",comm.get_rank(),"is processing batch",idx)
if idx == num_warmup:
start_time = time.perf_counter()
total_compute_time = 0
start_compute_time = time.perf_counter()
outputs = model(inputs) #RUN THE MODEL!!!!!!!!!
if torch.cuda.is_available():
torch.cuda.synchronize()
total_compute_time += time.perf_counter() - start_compute_time
evaluator.process(inputs, outputs)
iters_after_start = idx + 1 - num_warmup * int(idx >= num_warmup)
seconds_per_img = total_compute_time / iters_after_start
if idx >= num_warmup * 2 or seconds_per_img > 5:
total_seconds_per_img = (time.perf_counter() - start_time) / iters_after_start
eta = datetime.timedelta(seconds=int(total_seconds_per_img * (total - idx - 1)))
log_every_n_seconds(
logging.INFO,
"Inference done {}/{}. {:.4f} s / img. ETA={}".format(
idx + 1, total, seconds_per_img, str(eta)
),
n=5,
)
# Measure the time only for this worker (before the synchronization barrier)
total_time = time.perf_counter() - start_time
total_time_str = str(datetime.timedelta(seconds=total_time))
# NOTE this format is parsed by grep
logger.info(
"Total inference time: {} ({:.6f} s / img per device, on {} devices)".format(
total_time_str, total_time / (total - num_warmup), num_devices
)
)
total_compute_time_str = str(datetime.timedelta(seconds=int(total_compute_time)))
logger.info(
"Total inference pure compute time: {} ({:.6f} s / img per device, on {} devices)".format(
total_compute_time_str, total_compute_time / (total - num_warmup), num_devices
)
)
results = evaluator.evaluate()
# An evaluator may return None when not in main process.
# Replace it by an empty dict instead to make it easier for downstream code to handle
if results is None:
results = {}
return results
def main(args):
#You need this line to get the 'run' object so you can log to it
run = Run.get_context()
run.log('Data folder', args.data_folder,)
run.log('Output folder', args.output_dir,'')
#run.log('test', 100, 'test logging a single value') #Log a value
#run.log_list('test_list', [1,2,3], description='test logging a list') #Log a list
#Set paths
rootdir = Path(str(args.data_folder))
imagepath = Path(rootdir/'tiled_images')
annotationpath = Path(rootdir/'tiled_annotations')
inferencepath = Path(rootdir/'inferencetest')
output_dir = args.output_dir
run.log("rootdir",rootdir,)
run.log("imagepath",imagepath,)
run.log("annotationpath",annotationpath,)
run.log("inferencepath",inferencepath,)
run.log("output_dir",output_dir)
#Define allowed file types
imagefile_extensions = ['.jpg','.JPG']
annotationfile_extensions = ['.xml']
#Get annotation files
annotation_files = get_files(annotationpath,annotationfile_extensions,recursive=False)
#Log a count of the original classes
classcounts = count_classes(annotation_files) #Returns a list of tuples (class, count)
orig_class_names = [tup[0] for tup in classcounts] #Get a list of classes
run.log_list('Original class names',orig_class_names,'Class names found in annotation files.')
#Set list of permitted class names.
CLASS_NAMES = [
'buffalo',
'building',
'charcoal mound',
'charcoal sack',
'cow',
'donkey',
'eland',
'elephant',
'gazelle',
'giraffe',
'hartebeest',
'human',
'impala',
'kudu',
'oryx',
'other_animal',
'shoats',
'warthog',
'wildebeest',
'zebra']
omit_classes = ['boma'] #Classes to be dropped
rename_classes = {} #The format of this dict is oldname:newname if required.
#Load datasets and register them with Detectron
trainfile = rootdir/'train.txt'
validfile = rootdir/'valid.txt'
register_dataset_from_file(imagepath,annotationpath,trainfile,True, 'survey_train', CLASS_NAMES, rename_classes, omit_classes)
register_dataset_from_file(imagepath,annotationpath,validfile,False, 'survey_valid', CLASS_NAMES,rename_classes,omit_classes)
sval = str(MetadataCatalog.get("survey_valid"))
run.log('survey_valid',sval,)
#Create the config (cfg) object. This is my function (above)
cfg = setup_model_configuration(rootdir, output_dir, CLASS_NAMES)
# default_setup performs some basic common setups at the beginning of a job, including:
# 1. Set up the detectron2 logger
# 2. Log basic information about environment, cmdline arguments, and config
# 3. Backup the config to the output directory
default_setup(cfg, args) ##in detectron2/engine/defaults.py.
#If you are doing inference (=evaluation), build the model first
if args.eval_only:
print("Building model")
#NOTE: we are NOT creating a new Trainer here; we are only using its class method 'build_model'
#Therefore it doesn't do some of the other tricks like wrapping the model and calling build_train_loader
model = Trainer.build_model(cfg)
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
cfg.MODEL.WEIGHTS, resume=args.resume
)
#Wrap the model in DistributedDataParallel module
distributed = comm.get_world_size() > 1
if distributed:
world_size = comm.get_world_size()
rank = comm.get_local_rank()
model = DistributedDataParallel(model, device_ids=[rank], broadcast_buffers=False)
print("This is rank",rank)
#Run the evaluation (build one evaluator; then for each image file, rebuild the test loader with new tiles.)
print("Building evaluator")
image_files = get_files(inferencepath,imagefile_extensions,recursive=False)
#Build evaluator (doesn't need to be in the for loop). Note: distributed MUST BE True if total number of GPUs is > 1.
evaluators = DatasetEvaluators([JTFMEvaluator(distributed=True, output_dir=cfg.OUTPUT_DIR)])
mapper = TileDatasetMapper(cfg, False)
results = []
for f in image_files:
file_results = []
pil_img = Image.open(f)
pil_img = np.asarray(pil_img)
pil_img = copy.deepcopy(pil_img) #necessary step
#Convert to tensor
img = torch.from_numpy(pil_img)
img = img.permute(2,0,1)
fullimage_size = img.shape[1:]
#Get tiles
tile_size = (800, 800) #H, W in pixels
tile_overlap = 100
tiles = get_tiles(img,tile_size,tile_overlap)
#Now put in a list and make sure the tiles are contiguous (they weren't)
nrows, ncols = tiles.shape[1],tiles.shape[2]
tilelist = []
tilelist = [tiles[0][ir][ic].contiguous() for ir in range(nrows) for ic in range(ncols) ]
#Create a set of tile positions (note: order must match that of tilelist!)
tile_positions = [{"trow":ir,"tcol":ic} for ir in range(nrows) for ic in range(ncols)]
#Create a list of dicts (one per image) for Detectron2 input (note: list comprehension zips names and positions)
datasetdicts = [{"image_id":str(f), "trow":tile_pos["trow"], "tcol":tile_pos["tcol"],\
"image":tile, "width":tile_size[1],"height":tile_size[0]} \
for tile_pos,tile in zip(tile_positions,tilelist)]
inference_dataset = DatasetFromList(datasetdicts)
inferencesampler = torch.utils.data.distributed.DistributedSampler(
inference_dataset,
num_replicas=cfg.DATALOADER.NUM_WORKERS,
rank=rank
)
testloader = torch.utils.data.DataLoader(
dataset=inference_dataset,
batch_size=cfg.SOLVER.IMS_PER_BATCH,
shuffle=False,
num_workers=cfg.DATALOADER.NUM_WORKERS,
pin_memory=True,
collate_fn=trivial_batch_collator,
sampler=inferencesampler)
print(f'Running {f}: {len(datasetdicts)} tiles./n')
if comm.is_main_process():
tic = time.perf_counter()
tile_results = []
tile_results = jp_inference_on_dataset(model, testloader, evaluators)
if comm.is_main_process():
print("Inference on full-size image done in {:.2f}s".format(time.perf_counter() - tic))
#Recombine tile annotations into a full-image annotation
print("Reassembling tile annotations")
if 'Predictions' in tile_results:
all_instances = combine_tile_annotations(tile_results,tile_size,tile_overlap,fullimage_size)
print("all_instances after combine_tile_annotations",all_instances)
#Create the dict for this file
file_results = {'file_name':f}
file_results['height'] = fullimage_size[0]
file_results['width'] = fullimage_size[1]
file_results['instances'] = all_instances['instances']
results.append(file_results)
else:
next
if comm.is_main_process():
#Write the results dict to disk: one xml annotation file per image plus a summary (class_counts.csv)
write_results_to_xml(results,output_dir,CLASS_NAMES)
if args.write_annotated_images==True:
print("n_image_files",len(image_files))
print("n_results",len(results))
print(CLASS_NAMES)
save_annotated_images(image_files,results,CLASS_NAMES,output_dir,'ann')
return results #if doing evaluation
print("rank",comm.get_local_rank(),"returning")
return
#Else train (build the Trainer and return it)
else:
print("Creating or loading a trainer")
#Note: __init__ calls self.build_train_loader(cfg) and wraps the model in DistributedDataParallel
trainer = Trainer(cfg)
trainer.resume_or_load(resume=args.resume)
print("Training")
if args.eval_after_training==True:
trainer.train()
#Run the evaluation (build a test loader, an evaluator, and run)
print("Building data loader and evaluator")
val_loader = build_detection_test_loader(cfg, "survey_valid")
evaluators = DatasetEvaluators([COCOEvaluator("survey_valid", cfg, distributed=True, output_dir=cfg.OUTPUT_DIR)])
#Note: 'results' holds the summary table values. Predictions are written to "instances_predictions.pth"
print("Doing inference")
results = inference_on_dataset(trainer.model, val_loader, evaluators)
return results
else:
return trainer.train()
if __name__ == '__main__':
# __spec__ = None #fixes an error that pops up when debugging with pdb from commandline
print("Testing if I can parse arguments")
parser = argparse.ArgumentParser()
parser.add_argument('--data_folder', type=str,dest='data_folder', help='Root folder for input data')
parser.add_argument('--eval_only', dest='eval_only', action='store_true')
parser.set_defaults(eval_only=False)
parser.add_argument('--eval_after_training', dest='eval_after_training', action='store_true')
parser.set_defaults(eval_only=False)
parser.add_argument('--write_annotated_images', dest='write_annotated_images', action='store_true')
parser.set_defaults(eval_only=False)
parser.add_argument('--resume', dest='resume', action='store_true')
parser.set_defaults(resume=False)
#Note: <NAME> says --resume must come before --output_dir (https://github.com/facebookresearch/detectron2/issues/148)
parser.add_argument('--output_dir',type=str,dest='output_dir',help='Root folder for output')
parser.add_argument('--num_gpus_per_machine', type=int,dest='num_gpus_per_machine')
parser.set_defaults(num_gpus_per_machine=1)
parser.add_argument('--num_machines',type=int, dest='num_machines')
parser.set_defaults(num_machines=1)
parser.add_argument('--machine_rank', type=int,dest='machine_rank')
parser.set_defaults(machine_rank=0)
parser.add_argument('--dist_url', type=str, dest='dist_url')
parser.set_defaults(dist_url="auto")
#Call the parser
args = parser.parse_args()
print("Command Line Args:", args)
#main(args)
launch(
main,
args.num_gpus_per_machine,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
```
|
{
"source": "jcp/django-names",
"score": 2
}
|
#### File: django-names/names/mixins.py
```python
from django.db import models
from django.utils.translation import gettext_lazy as _
from nameparser import HumanName
from nameparser.config import CONSTANTS
from .conf import settings
class NameModelMixin(models.Model):
"""
An abstract base class model mixin that contains a full name
and its individual components.
"""
DEFAULTS = {"max_length": settings.MAX_LENGTH, "blank": True}
full = models.CharField(verbose_name=_("Full name"), max_length=settings.MAX_LENGTH)
title = models.CharField(verbose_name=_("Title"), **DEFAULTS)
first = models.CharField(verbose_name=_("First name"), **DEFAULTS)
last = models.CharField(verbose_name=_("Last name"), **DEFAULTS)
middle = models.CharField(verbose_name=_("Middle name"), **DEFAULTS)
nickname = models.CharField(verbose_name=_("Nickname"), **DEFAULTS)
suffix = models.CharField(verbose_name=_("Suffix"), **DEFAULTS)
# Stores previous value of `full`
_initial = None
class Meta:
abstract = True
def __init__(self, *args, **kwargs):
"""
Stores `self.full` for comparison in `save` method.
"""
super().__init__(*args, **kwargs)
self._initial = self.full
def __str__(self):
return self.full
def save(self, *args, **kwargs):
"""
Parses and saves a full name and its individual components.
"""
# Use name components to create a full name. This ensures
# modifications to individual name components are reflected
# in the full name.
name = CONSTANTS.string_format.format(**self.__dict__)
# If this is a new instance, or `self.full` has been modified,
# use `self.full` for the full name.
if not self.pk or self._initial != self.full:
name = self.full
instance = self._parse(name=name)
for attr, val in instance.as_dict().items():
setattr(self, attr, val)
self.full = instance.full_name
super().save(*args, **kwargs)
@classmethod
def _parse(cls, name):
"""
Parses and returns a `HumanName` instance.
"""
instance = HumanName(
full_name=name,
constants=settings.OPTIONS,
string_format=settings.STRING_FORMAT,
)
return instance
```
|
{
"source": "JCPedroza/algorithms-and-data-structures-py",
"score": 4
}
|
#### File: numeric/factorial/factorial_recursive.py
```python
def factorial(num: int) -> int:
if num < 2:
return 1
return num * factorial(num - 1)
algorithm = factorial
name = 'recursive if'
```
#### File: numeric/factorial/test_factorial.py
```python
from . import factorial_forloop
from . import factorial_native
from . import factorial_recursive
from . import factorial_reduce
from . import factorial_match
from . import factorial_tail
test_subjects = [
factorial_forloop,
factorial_native,
factorial_recursive,
factorial_reduce,
factorial_match,
factorial_tail
]
def test_factorial():
for subject in test_subjects:
assert subject.algorithm(0) == 1
assert subject.algorithm(1) == 1
assert subject.algorithm(2) == 2
assert subject.algorithm(3) == 6
assert subject.algorithm(4) == 24
assert subject.algorithm(5) == 120
```
#### File: sorting/bubble_sort/bubble_sort_naive.py
```python
def bubble_sort(nums: list[float]) -> list[float]:
for _ in range(len(nums)):
for indx in range(len(nums) - 1):
if nums[indx] > nums[indx + 1]:
nums[indx], nums[indx + 1] = nums[indx + 1], nums[indx]
return nums
algorithm = bubble_sort
name = 'naive'
```
#### File: sorting/bubble_sort/test_bubble_sort.py
```python
from . import bubble_sort_naive
from . import bubble_sort_optimized
test_subjects = [
bubble_sort_naive,
bubble_sort_optimized
]
def test_bubble_sort(test_sorting_algorithm):
test_sorting_algorithm(test_subjects)
```
#### File: sorting/insertion_sort/insertion_sort_naive.py
```python
def insertion_sort(nums: list[float]) -> list[float]:
"""Sorts a list in-place using the insertion sort approach.
This version does more comparisons and moves more data than necessary, so
it is sub-optimal.
Time complexity: O(n) best O(n^2) worst O(n^2) average.
Space complexity: O(n) total O(1) auxiliary.
Args:
nums: A list of numbers.
Returns.
The sorted list.
"""
for target in range(1, len(nums)):
swap = target
while swap > 0 and nums[swap - 1] > nums[swap]:
nums[swap - 1], nums[swap] = nums[swap], nums[swap - 1]
swap -= 1
return nums
algorithm = insertion_sort
name = 'in-place naive'
```
#### File: project_euler/e001_multiples_of_3_or_5/mul_of_3_or_5_filter.py
```python
def mul_of_3_or_5(limit: int) -> int:
"""Computes the sum of all the multiples of 3 or 5 below the given limit,
using filter and range.
:param limit: Limit of the values to sum (exclusive).
:return: Sum of all the multiples of 3 or 5 below the given limit.
"""
return sum(filter(
lambda x: x % 3 == 0 or x % 5 == 0,
range(3, limit)))
solution = mul_of_3_or_5
name = 'filter'
```
|
{
"source": "JCPedroza/freeCodeCamp-coding-interview-prep-python",
"score": 4
}
|
#### File: algorithms/implement_bubble_sort/implement_bubble_sort_flag_while.py
```python
def bubble_sort(nums: list[float]) -> list[float]:
is_sorted = False
while (not is_sorted):
is_sorted = True
for index in range(len(nums) - 1):
if nums[index] > nums[index + 1]:
nums[index], nums[index + 1] = nums[index + 1], nums[index]
is_sorted = False
return nums
```
#### File: algorithms/implement_insertion_sort/implement_insertion_sort.py
```python
def insertion_sort(nums: list[float]) -> list[float]:
for start in range(1, len(nums)):
index = start
while nums[index] < nums[index - 1] and index > 0:
nums[index], nums[index - 1] = nums[index - 1], nums[index]
index -= 1
return nums
```
#### File: data_structures/create_a_stack_class/create_a_stack_class_test.py
```python
from create_a_stack_class import Stack
def assert_stack_state(stack, expected_list):
assert stack.list == expected_list
assert stack.is_empty() == (not expected_list)
assert len(stack) == len(expected_list)
assert stack.size() == len(expected_list)
assert stack.peek() == (expected_list[-1] if expected_list else None)
def test_basic_stack_operations():
stack = Stack()
assert_stack_state(stack, [])
assert stack.pop() is None
stack.push(0)
assert_stack_state(stack, [0])
stack.push(1).push(2).push(3)
assert_stack_state(stack, [0, 1, 2, 3])
assert stack.pop() == 3
assert stack.pop() == 2
assert_stack_state(stack, [0, 1])
stack.clear()
assert_stack_state(stack, [])
```
#### File: project_euler/001_multiples_of_3_and_5/multiples_of_3_and_5.py
```python
def num_filter(num: int) -> bool:
return num % 3 == 0 or num % 5 == 0
def multiples_of_3_and_5(limit: int) -> int:
return sum(filter(num_filter, range(1, limit)))
```
#### File: project_euler/004_largest_palindrome_product/largest_palindrome_product.py
```python
def is_num_palindrome(num):
return str(num) == str(num)[::-1]
def ndigit_range(ndigit):
return range(10**(ndigit - 1), 10**(ndigit))
def largest_palindrome_product(ndigit):
ndigit_nums = ndigit_range(ndigit)
palindrome_products = []
for num in ndigit_nums:
for factor in range(num, ndigit_nums[-1] + 1):
product = num * factor
if is_num_palindrome(product):
palindrome_products.append(product)
return max(palindrome_products)
```
|
{
"source": "J-CPelletier/WebComicToCBZ",
"score": 2
}
|
#### File: download/tests/test_comic_pipeline.py
```python
import os
from scrapy.exceptions import DropItem
import pytest
from webcomix.scrapy.download.comic_pipeline import ComicPipeline
from webcomix.scrapy.download.comic_page import ComicPage
from webcomix.supported_comics import supported_comics
first_comic = list(sorted(supported_comics.values()))[0]
expected_url_image = "http://imgs.xkcd.com/comics/barrel_cropped_(1).jpg"
expected_image_location = "test/1.jpg"
expected_image_filename = "1.jpg"
def test_get_media_requests_returns_good_request_when_file_not_present(mocker):
mocker.patch("os.path.isfile", return_value=False)
mock_spider_info = mocker.patch("scrapy.pipelines.media.MediaPipeline.SpiderInfo")
mocker.patch(
"webcomix.comic.Comic.save_image_location", return_value=expected_image_location
)
mocker.patch(
"webcomix.comic.Comic.save_image_filename", return_value=expected_image_filename
)
pipeline = ComicPipeline(store_uri="foo")
elements = list(
pipeline.get_media_requests(
ComicPage(url=expected_url_image, page=1, title=False, alt_text=None),
mock_spider_info,
)
)
request = elements[0]
assert request.url == expected_url_image
assert request.meta["image_file_name"] == expected_image_filename
os.rmdir("foo")
def test_get_media_requests_drops_item_when_file_present(mocker):
mocker.patch("os.path.isfile", return_value=True)
mock_spider_info = mocker.patch("scrapy.pipelines.media.MediaPipeline.SpiderInfo")
mocker.patch(
"webcomix.comic.Comic.save_image_location", return_value=expected_image_location
)
pipeline = ComicPipeline(store_uri="foo")
with pytest.raises(DropItem):
list(
pipeline.get_media_requests(
ComicPage(url=expected_url_image, page=1, title=False, alt_text=None),
mock_spider_info,
)
)
os.rmdir("foo")
def test_get_media_requests_drops_item_when_file_present_in_zip(mocker):
mocker.patch("os.path.isfile", side_effect=[False])
mocker.patch(
"webcomix.scrapy.download.comic_pipeline.ComicPipeline.image_in_zipfile",
return_value=True,
)
mock_spider_info = mocker.patch("scrapy.pipelines.media.MediaPipeline.SpiderInfo")
mocker.patch(
"webcomix.comic.Comic.save_image_location", return_value=expected_image_location
)
pipeline = ComicPipeline(store_uri="foo")
with pytest.raises(DropItem):
list(
pipeline.get_media_requests(
ComicPage(url=expected_url_image, page=1, title=False, alt_text=None),
mock_spider_info,
)
)
os.rmdir("foo")
def test_item_completed_returns_item_when_file_downloaded(mocker):
results = [(True, {"path": expected_image_location})]
item = ComicPage()
pipeline = ComicPipeline(store_uri="foo")
result = pipeline.item_completed(results, item, mocker.ANY)
assert result == item
os.rmdir("foo")
def test_item_completed_returns_drops_when_file_not_downloaded(mocker):
results = [(False, {})]
item = ComicPage()
pipeline = ComicPipeline(store_uri="foo")
with pytest.raises(DropItem):
pipeline.item_completed(results, item, mocker.ANY)
os.rmdir("foo")
def test_file_path_is_image_path(mocker):
mock_request = mocker.patch("scrapy.http.Request")
mock_request.meta = {"image_file_name": expected_image_location}
pipeline = ComicPipeline(store_uri="foo")
file_path = pipeline.file_path(mock_request)
assert file_path == expected_image_location
os.rmdir("foo")
def test_image_not_in_zip_if_zip_does_not_exist(mocker):
mocker.patch("os.path.isfile", return_value=False)
assert ComicPipeline.image_in_zipfile(mocker.ANY, mocker.ANY) == False
```
#### File: webcomix/scrapy/request_factory.py
```python
from scrapy import Request
from scrapy_splash import SplashRequest
class RequestFactory:
def __init__(self, javascript):
self.javascript = javascript
def create(self, url, next_page):
if self.javascript:
return SplashRequest(url, args={"wait": 0.5}, meta={"page": next_page})
else:
return Request(url, meta={"page": next_page})
```
#### File: scrapy/tests/test_request_factory.py
```python
from scrapy import Request
from scrapy_splash import SplashRequest
from webcomix.scrapy.request_factory import RequestFactory
AN_URL = "https://xkcd.com"
def test_factory_creates_request_without_javascript(mocker):
request_factory = RequestFactory(False)
request = request_factory.create(AN_URL, mocker.ANY)
assert type(request) == Request
def test_factory_creates_splash_request_with_javascript(mocker):
request_factory = RequestFactory(True)
request = request_factory.create(AN_URL, mocker.ANY)
assert type(request) == SplashRequest
```
#### File: webcomix/tests/test_docker.py
```python
import docker
import pytest
from webcomix.docker import DockerManager, CONTAINER_NAME
@pytest.fixture
def cleanup_container():
yield None
client = docker.from_env()
for container in client.containers.list():
if container.attrs["Config"]["Image"] == CONTAINER_NAME:
container.kill()
def test_no_javascript_spawns_no_container(cleanup_container):
manager = DockerManager(False)
manager.__enter__()
manager.client = docker.from_env()
assert manager._get_container() is None
def test_javascript_spawns_container(cleanup_container):
manager = DockerManager(True)
manager.__enter__()
assert manager._get_container() is not None
manager.__exit__(None, None, None)
def test_javascript_exit_removes_container(cleanup_container):
manager = DockerManager(True)
manager.__enter__()
manager.__exit__(None, None, None)
assert manager._get_container() is None
```
#### File: webcomix/tests/test_search.py
```python
from webcomix.comic import Comic
from webcomix.search import discovery
from webcomix.tests.fake_websites.fixture import (
one_webpage_searchable_uri,
three_webpages_uri,
three_webpages_classes_uri,
)
def test_search_searchable_website(mocker, three_webpages_classes_uri):
expected = Comic(
"Blindsprings",
three_webpages_classes_uri,
"//*[contains(translate(@class, 'ABCDEFGHIJKLMNOPQRSTUVWXYZ','abcdefghijklmnopqrstuvwxyz'), 'comic')]//@src",
"//*[contains(translate(@class, 'ABCDEFGHIJKLMNOPQRSTUVWXYZ','abcdefghijklmnopqrstuvwxyz'), 'next')]//@href",
)
mocker.patch("webcomix.search.possible_image_xpath", ["comic"])
mocker.patch("webcomix.search.possible_next_page_xpath", ["next"])
mocker.patch("webcomix.search.possible_tags_image", ["*"])
mocker.patch("webcomix.search.possible_tags_next", ["*"])
mocker.patch("webcomix.search.possible_attributes_image", ["@class"])
mocker.patch("webcomix.search.possible_attributes_next", ["@class"])
mocker.patch("webcomix.util.check_first_pages")
comic, result = discovery("Blindsprings", three_webpages_classes_uri)
three_webpages_classes_folder = three_webpages_classes_uri.strip("1.html")
assert result == [
{
"page": 1,
"url": three_webpages_classes_uri,
"image_urls": [three_webpages_classes_folder + "1.jpeg"],
"alt_text": None,
},
{
"page": 2,
"url": three_webpages_classes_folder + "2.html",
"image_urls": [three_webpages_classes_folder + "2.jpeg"],
"alt_text": None,
},
{
"page": 3,
"url": three_webpages_classes_folder + "3.html",
"image_urls": [three_webpages_classes_folder + "3.jpeg"],
"alt_text": None,
},
]
assert comic.start_url == expected.start_url
assert comic.next_page_selector == expected.next_page_selector
assert comic.comic_image_selector == expected.comic_image_selector
def test_search_unsearchable_website(mocker, three_webpages_uri):
mocker.patch("webcomix.search.possible_image_xpath", ["comic"])
mocker.patch("webcomix.search.possible_next_page_xpath", ["next"])
mocker.patch("webcomix.search.possible_tags_image", ["*"])
mocker.patch("webcomix.search.possible_tags_next", ["*"])
mocker.patch("webcomix.search.possible_attributes_image", ["@class"])
mocker.patch("webcomix.search.possible_attributes_next", ["@class"])
assert discovery("test", three_webpages_uri) == (None, None)
def test_can_stop_searching(mocker, three_webpages_classes_uri):
mocker.patch("webcomix.search.possible_image_xpath", ["comic"])
mocker.patch("webcomix.search.possible_next_page_xpath", ["next"])
mocker.patch("webcomix.search.possible_tags_image", ["div"])
mocker.patch("webcomix.search.possible_tags_next", ["div"])
mocker.patch("webcomix.search.possible_attributes_image", ["@rel"])
mocker.patch("webcomix.search.possible_attributes_next", ["@class"])
exit_called = mocker.patch("sys.exit")
mocker.patch("webcomix.comic.Comic.verify_xpath", side_effect=KeyboardInterrupt)
result = discovery("test", three_webpages_classes_uri)
assert exit_called.call_count == 1
assert result == (None, None)
def test_can_find_single_page_correctly_while_searching(
mocker, one_webpage_searchable_uri
):
mocker.patch("webcomix.search.possible_image_xpath", ["image"])
mocker.patch("webcomix.search.possible_next_page_xpath", ["next"])
mocker.patch("webcomix.search.possible_tags_image", ["*"])
mocker.patch("webcomix.search.possible_tags_next", ["*"])
mocker.patch("webcomix.search.possible_attributes_image", ["@class"])
mocker.patch("webcomix.search.possible_attributes_next", ["."])
comic, result = discovery("test", one_webpage_searchable_uri, single_page=True)
validation = comic.verify_xpath()
assert len(result) == 1
assert result == validation
assert len(result[0]["image_urls"]) == 2
```
|
{
"source": "jcpeterson/Dallinger",
"score": 2
}
|
#### File: dallinger/heroku/messages.py
```python
import logging
import six
import smtplib
from cached_property import cached_property
from datetime import datetime
from email.mime.text import MIMEText
logger = logging.getLogger(__file__)
def get_email_server(host):
"""Return an SMTP server using the specified host.
Abandon attempts to connect after 8 seconds.
"""
return smtplib.SMTP(host, timeout=8)
resubmit_whimsical = """Dearest Friend,
I am writing to let you know that at {when},
during my regular (and thoroughly enjoyable) perousal of the most charming
participant data table, I happened to notice that assignment {assignment_id}
has been taking longer than we were expecting. I recall you had suggested
{duration} minutes as an upper limit for what was an acceptable length of time
for each assignement, however this assignment had been underway for a shocking
{minutes_so_far} minutes, a full {minutes_excess} minutes over your allowance.
I immediately dispatched a telegram to our mutual friends at AWS and they were
able to assure me that although the notification had failed to be correctly
processed, the assignment had in fact been completed. Rather than trouble you,
I dealt with this myself and I can assure you there is no immediate cause for
concern. Nonetheless, for my own peace of mind, I would appreciate you taking
the time to look into this matter at your earliest convenience.
I remain your faithful and obedient servant,
<NAME>
P.S. Please do not respond to this message, I am busy with other matters.
"""
resubmit = """Dear experimenter,
This is an automated email from
Dallinger. You are receiving this email because the Dallinger platform has
discovered evidence that a notification from Amazon Web Services failed to
arrive at the server. Dallinger has automatically contacted AWS and has
determined the dropped notification was a submitted notification (i.e. the
participant has finished the experiment). This is a non-fatal error and so
Dallinger has auto-corrected the problem. Nonetheless you may wish to check the
database.
Best,
The Dallinger dev. team.
Error details:
Assignment: {assignment_id}
Allowed time: {duration}
Time since participant started: {minutes_so_far}
"""
cancelled_hit_whimsical = """Dearest Friend,
I am afraid I write to you with most grave tidings. At {when},
during a routine check of the usually most delightful participant data table,
I happened to notice that assignment {assignment_id} has been taking longer
than we were expecting. I recall you had suggested {duration} minutes as an
upper limit for what was an acceptable length of time for each assignment,
however this assignment had been underway for a shocking {minutes_so_far}
minutes, a full {minutes_excess} minutes over your allowance. I immediately
dispatched a telegram to our mutual friends at AWS and they infact informed me
that they had already sent us a notification which we must have failed to
process, implying that the assignment had not been successfully completed. Of
course when the seriousness of this scenario dawned on me I had to depend on
my trusting walking stick for support: without the notification I didn't know
to remove the old assignment's data from the tables and AWS will have already
sent their replacement, meaning that the tables may already be in a most
unsound state!
I am sorry to trouble you with this, however, I do not know how to proceed so
rather than trying to remedy the scenario myself, I have instead temporarily
ceased operations by expiring the HIT with the fellows at AWS and have
refrained from posting any further invitations myself. Once you see fit I
would be most appreciative if you could attend to this issue with the caution,
sensitivity and intelligence for which I know you so well.
I remain your faithful and
obedient servant,
<NAME>
P.S. Please do not respond to this
message, I am busy with other matters.
"""
cancelled_hit = """Dear experimenter,
This is an automated email from
Dallinger. You are receiving this email because the Dallinger platform has
discovered evidence that a notification from Amazon Web Services failed to
arrive at the server. Dallinger has automatically contacted AWS and has
determined the dropped notification was an abandoned/returned notification
(i.e. the participant had returned the experiment or had run out of time).
This is a serious error and so Dallinger has paused the experiment - expiring
the HIT on MTurk and setting auto_recruit to false. Participants currently
playing will be able to finish, however no further participants will be
recruited until you do so manually. We strongly suggest you use the details
below to check the database to make sure the missing notification has not caused
additional problems before resuming.
If you are receiving a lot of these
emails this suggests something is wrong with your experiment code.
Best,
The Dallinger dev. team.
Error details:
Assignment: {assignment_id}
Allowed time: {duration}
Time since participant started: {minutes_so_far}
"""
idle_template = """Dear experimenter,
This is an automated email from Dallinger. You are receiving this email because
your dyno has been running for over {minutes_so_far} minutes.
The application id is: {app_id}
To see the logs, use the command "dallinger logs --app {app_id}"
To pause the app, use the command "dallinger hibernate --app {app_id}"
To destroy the app, use the command "dallinger destroy --app {app_id}"
The Dallinger dev. team.
"""
hit_error_template = """Dear experimenter,
This is an automated email from Dallinger. You are receiving this email because
a recruited participant has been unable to complete the experiment due to
a bug.
The application id is: {app_id}
The information about the failed HIT is recorded in the database in the
Notification table, with assignment_id {assignment_id}.
To see the logs, use the command "dallinger logs --app {app_id}"
To pause the app, use the command "dallinger hibernate --app {app_id}"
To destroy the app, use the command "dallinger destroy --app {app_id}"
The Dallinger dev. team.
"""
class MessengerError(Exception):
"""A message could not be relayed."""
class HITSummary(object):
def __init__(self, assignment_id, duration, time_active, app_id, when=datetime.now()):
self.when = when
self.assignment_id = assignment_id
self.duration = int(round(duration / 60))
self.minutes_so_far = int(round(time_active / 60))
self.minutes_excess = int(round((time_active - duration) / 60))
self.app_id = app_id
CONFIG_PLACEHOLDER = '???'
class EmailConfig(object):
"""Extracts and validates email-related values from a Configuration
"""
_map = {
'username': 'smtp_username',
'toaddr': 'contact_email_on_error',
'fromaddr': 'dallinger_email_address',
'password': '<PASSWORD>',
}
def __init__(self, config):
self.host = config.get('smtp_host', '')
self.username = config.get('smtp_username', '')
self.toaddr = config.get('contact_email_on_error', '')
self.password = config.get('smtp_password', '')
self.fromaddr = config.get('dallinger_email_address', '')
self.whimsical = config.get('whimsical', False)
def validate(self):
"""Could this config be used to send a real email?"""
missing = []
for k, v in self._map.items():
attr = getattr(self, k, False)
if not attr or attr == CONFIG_PLACEHOLDER:
missing.append(v)
if missing:
return "Missing or invalid config values: {}".format(
', '.join(sorted(missing))
)
class HITMessages(object):
@staticmethod
def by_flavor(hit_info, whimsical):
if whimsical:
return WhimsicalHITMessages(hit_info)
return HITMessages(hit_info)
_templates = {
'resubmitted': {
'subject': 'Dallinger automated email - minor error.',
'template': resubmit,
},
'cancelled': {
'subject': 'Dallinger automated email - major error.',
'template': cancelled_hit,
},
'idle': {
'subject': 'Idle Experiment.',
'template': idle_template,
},
'hit_error': {
'subject': 'Error during HIT.',
'template': hit_error_template,
},
}
def __init__(self, hit_info):
self.when = hit_info.when
self.assignment_id = hit_info.assignment_id
self.duration = hit_info.duration
self.minutes_so_far = hit_info.minutes_so_far
self.minutes_excess = hit_info.minutes_excess
self.app_id = hit_info.app_id
def resubmitted_msg(self):
return self._build('resubmitted')
def hit_cancelled_msg(self):
return self._build('cancelled')
def idle_experiment_msg(self):
return self._build('idle')
def hit_error_msg(self):
return self._build('hit_error')
def _build(self, category):
data = self._templates[category]
return {
'message': data['template'].format(**self.__dict__),
'subject': data['subject'],
}
class WhimsicalHITMessages(HITMessages):
_templates = {
'resubmitted': {
'subject': 'A matter of minor concern.',
'template': resubmit_whimsical,
},
'cancelled': {
'subject': 'Most troubling news.',
'template': cancelled_hit_whimsical,
},
'idle': {
'subject': 'Idle Experiment.',
'template': idle_template,
},
'hit_error': {
'subject': 'Error during HIT.',
'template': hit_error_template,
},
}
class BaseHITMessenger(object):
def __init__(self, hit_info, email_settings):
self.messages = HITMessages.by_flavor(hit_info, email_settings.whimsical)
self.app_id = hit_info.app_id
self.host = email_settings.host
self.username = email_settings.username
self.fromaddr = email_settings.fromaddr
self.toaddr = email_settings.toaddr
self.password = <PASSWORD>_settings.password
def send_resubmitted_msg(self):
data = self.messages.resubmitted_msg()
self._send(data)
return data
def send_hit_cancelled_msg(self):
data = self.messages.hit_cancelled_msg()
self._send(data)
return data
def send_idle_experiment_msg(self):
data = self.messages.idle_experiment_msg()
self._send(data)
return data
def send_hit_error_msg(self):
data = self.messages.hit_error_msg()
self._send(data)
return data
class EmailingHITMessenger(BaseHITMessenger):
"""Actually sends an email message to the experiment owner.
"""
@cached_property
def server(self):
return get_email_server(self.host)
def _send(self, data):
msg = MIMEText(data['message'])
msg['Subject'] = data['subject']
try:
self.server.starttls()
self.server.login(self.username, self.password)
self.server.sendmail(self.fromaddr, self.toaddr, msg.as_string())
self.server.quit()
except smtplib.SMTPException as ex:
six.raise_from(
MessengerError('SMTP error sending HIT error email.'),
ex
)
except Exception as ex:
six.raise_from(
MessengerError("Unknown error sending HIT error email."),
ex
)
class DebugHITMessenger(BaseHITMessenger):
"""Used in debug mode.
Prints the message contents to the log instead of sending an email.
"""
def _send(self, data):
logger.info("{}:\n{}".format(self.__class__.__name__, data['message']))
def get_messenger(hit_info, config):
"""Return an appropriate Messenger.
If we're in debug mode, or email settings aren't set, return a debug
version which logs the message instead of attempting to send a real
email.
"""
email_settings = EmailConfig(config)
if config.get("mode") == "debug":
return DebugHITMessenger(hit_info, email_settings)
problems = email_settings.validate()
if problems:
logger.info(problems + " Will log errors instead of emailing them.")
return DebugHITMessenger(hit_info, email_settings)
return EmailingHITMessenger(hit_info, email_settings)
```
#### File: Dallinger/tests/test_command_line.py
```python
import mock
import os
import pexpect
import re
import six
import subprocess
import sys
import tempfile
from six.moves import configparser
from time import sleep
from uuid import UUID
from click.testing import CliRunner
import pytest
from pytest import raises
import dallinger.command_line
from dallinger.command_line import new_webbrowser_profile
from dallinger.command_line import verify_package
from dallinger.command_line import report_idle_after
from dallinger.config import get_config
from dallinger import recruiters
import dallinger.version
def found_in(name, path):
return os.path.exists(os.path.join(path, name))
@pytest.fixture
def output():
class Output(object):
def __init__(self):
self.log = mock.Mock()
self.error = mock.Mock()
self.blather = mock.Mock()
return Output()
@pytest.fixture
def sleepless():
# Use this fixture to ignore sleep() calls, for speed.
with mock.patch('dallinger.command_line.time.sleep'):
yield
@pytest.fixture
def browser():
with mock.patch('dallinger.command_line.is_command') as mock_is_command:
mock_is_command.return_value = False
with mock.patch('dallinger.command_line.webbrowser') as mock_browser:
yield mock_browser
@pytest.fixture
def heroku():
from dallinger.heroku.tools import HerokuApp
instance = mock.Mock(spec=HerokuApp)
with mock.patch('dallinger.command_line.HerokuApp') as mock_app_class:
mock_app_class.return_value = instance
yield instance
@pytest.fixture
def data():
with mock.patch('dallinger.command_line.data') as mock_data:
mock_data.backup.return_value = 'fake backup url'
mock_bucket = mock.Mock()
mock_key = mock.Mock()
mock_key.generate_url.return_value = 'fake restore url'
mock_bucket.lookup.return_value = mock_key
mock_data.user_s3_bucket.return_value = mock_bucket
yield mock_data
@pytest.fixture
def mturk():
with mock.patch('dallinger.command_line.MTurkService') as mock_mturk:
mock_instance = mock.Mock()
mock_instance.get_hits.return_value = [
{'id': 'hit-id-1'},
{'id': 'hit-id-2', 'annotation': 'exp-id-2'}
]
mock_mturk.return_value = mock_instance
yield mock_mturk
class TestIsolatedWebbrowser(object):
def test_chrome_isolation(self):
import webbrowser
with mock.patch('dallinger.command_line.is_command') as is_command:
is_command.side_effect = lambda s: s == 'google-chrome'
isolated = new_webbrowser_profile()
assert isinstance(isolated, webbrowser.Chrome)
assert isolated.remote_args[:2] == [r'%action', r'%s']
assert isolated.remote_args[-1].startswith(
'--user-data-dir="{}'.format(tempfile.gettempdir())
)
def test_firefox_isolation(self):
import webbrowser
with mock.patch('dallinger.command_line.is_command') as is_command:
is_command.side_effect = lambda s: s == 'firefox'
isolated = new_webbrowser_profile()
assert isinstance(isolated, webbrowser.Mozilla)
assert isolated.remote_args[0] == '-profile'
assert isolated.remote_args[1].startswith(tempfile.gettempdir())
assert isolated.remote_args[2:] == ['-new-instance', '-no-remote', '-url', r'%s']
def test_fallback_isolation(self):
import webbrowser
with mock.patch('dallinger.command_line.is_command') as is_command:
is_command.return_value = False
isolated = new_webbrowser_profile()
assert isolated == webbrowser
@pytest.mark.usefixtures('bartlett_dir')
class TestVerify(object):
def test_verify(self):
subprocess.check_call(["dallinger", "verify"])
class TestCommandLine(object):
def test_dallinger_no_args(self):
output = subprocess.check_output(["dallinger"])
assert(b"Usage: dallinger [OPTIONS] COMMAND [ARGS]" in output)
def test_log_empty(self):
id = "dlgr-3b9c2aeb"
assert ValueError, subprocess.call(["dallinger", "logs", "--app", id])
def test_log_no_flag(self):
assert TypeError, subprocess.call(["dallinger", "logs"])
def test_deploy_empty(self):
id = "dlgr-3b9c2aeb"
assert ValueError, subprocess.call(["dallinger", "deploy", "--app", id])
def test_sandbox_empty(self):
id = "dlgr-3b9c2aeb"
assert ValueError, subprocess.call(["dallinger", "sandbox", "--app", id])
def test_verify_id_short_fails(self):
id = "dlgr-3b9c2aeb"
assert ValueError, dallinger.commandline.verify_id(id)
def test_empty_id_fails_verification(self):
assert ValueError, dallinger.commandline.verify_id(None)
def test_new_uuid(self):
output = subprocess.check_output(["dallinger", "uuid"])
assert isinstance(UUID(output.strip().decode('utf8'), version=4), UUID)
def test_dallinger_help(self):
output = subprocess.check_output(["dallinger", "--help"])
assert(b"Commands:" in output)
def test_setup(self):
subprocess.check_call(["dallinger", "setup"])
subprocess.check_call(["dallinger", "setup"])
class TestReportAfterIdleDecorator(object):
def test_reports_timeout(self, active_config):
@report_idle_after(1)
def will_time_out():
sleep(5)
with mock.patch('dallinger.command_line.get_messenger') as messenger:
will_time_out()
messenger.assert_called_once()
@pytest.mark.usefixtures('bartlett_dir', 'active_config')
class TestSetupExperiment(object):
def test_setup_creates_new_experiment(self):
from dallinger.command_line import setup_experiment
# Baseline
exp_dir = os.getcwd()
assert found_in('experiment.py', exp_dir)
assert not found_in('experiment_id.txt', exp_dir)
assert not found_in('Procfile', exp_dir)
assert not found_in('launch.py', exp_dir)
assert not found_in('worker.py', exp_dir)
assert not found_in('clock.py', exp_dir)
exp_id, dst = setup_experiment()
# dst should be a temp dir with a cloned experiment for deployment
assert(exp_dir != dst)
assert('/tmp' in dst)
assert found_in('experiment_id.txt', dst)
assert found_in('experiment.py', dst)
assert found_in('models.py', dst)
assert found_in('Procfile', dst)
assert found_in('launch.py', dst)
assert found_in('worker.py', dst)
assert found_in('clock.py', dst)
assert found_in(os.path.join("static", "css", "dallinger.css"), dst)
assert found_in(os.path.join("static", "scripts", "dallinger2.js"), dst)
assert found_in(os.path.join("static", "scripts", "reconnecting-websocket.js"), dst)
assert found_in(os.path.join("static", "scripts", "reqwest.min.js"), dst)
assert found_in(os.path.join("static", "scripts", "spin.min.js"), dst)
assert found_in(os.path.join("static", "scripts", "store+json2.min.js"), dst)
assert found_in(os.path.join("static", "robots.txt"), dst)
assert found_in(os.path.join("templates", "error.html"), dst)
assert found_in(os.path.join("templates", "error-complete.html"), dst)
assert found_in(os.path.join("templates", "launch.html"), dst)
assert found_in(os.path.join("templates", "complete.html"), dst)
def test_setup_with_custom_dict_config(self):
from dallinger.command_line import setup_experiment
config = get_config()
assert config.get('num_dynos_web') == 1
exp_id, dst = setup_experiment(exp_config={'num_dynos_web': 2})
# Config is updated
assert config.get('num_dynos_web') == 2
# Code snapshot is saved
os.path.exists(os.path.join('snapshots', exp_id + '-code.zip'))
# There should be a modified configuration in the temp dir
deploy_config = configparser.SafeConfigParser()
deploy_config.read(os.path.join(dst, 'config.txt'))
assert int(deploy_config.get('Parameters', 'num_dynos_web')) == 2
def test_setup_excludes_sensitive_config(self):
from dallinger.command_line import setup_experiment
config = get_config()
# Auto detected as sensitive
config.register('a_password', six.text_type)
# Manually registered as sensitive
config.register('something_sensitive', six.text_type, sensitive=True)
# Not sensitive at all
config.register('something_normal', six.text_type)
config.extend({'a_password': u'<PASSWORD>',
'something_sensitive': u'hide this',
'something_normal': u'show this'})
exp_id, dst = setup_experiment()
# The temp dir should have a config with the sensitive variables missing
deploy_config = configparser.SafeConfigParser()
deploy_config.read(os.path.join(dst, 'config.txt'))
assert(deploy_config.get(
'Parameters', 'something_normal') == 'show this'
)
with raises(configparser.NoOptionError):
deploy_config.get('Parameters', 'a_password')
with raises(configparser.NoOptionError):
deploy_config.get('Parameters', 'something_sensitive')
def test_payment_type(self):
config = get_config()
with raises(TypeError):
config['base_payment'] = 12
def test_large_float_payment(self):
config = get_config()
config['base_payment'] = 1.2342
assert verify_package() is False
def test_negative_payment(self):
config = get_config()
config['base_payment'] = -1.99
assert verify_package() is False
@pytest.mark.usefixtures('in_tempdir')
class TestGitClient(object):
@pytest.fixture
def git(self):
from dallinger.utils import GitClient
git = GitClient()
return git
def test_client(self, git, stub_config):
stub_config.write()
config = {'user.name': 'Test User', 'user.email': '<EMAIL>'}
git.init(config=config)
git.add("--all")
git.commit("Test Repo")
assert b"Test Repo" in subprocess.check_output(['git', 'log'])
def test_includes_details_in_exceptions(self, git):
with pytest.raises(Exception) as ex_info:
git.push('foo', 'bar')
assert ex_info.match('Not a git repository')
def test_can_use_alternate_output(self, git):
import tempfile
git.out = tempfile.NamedTemporaryFile()
git.encoding = 'utf8'
git.init()
git.out.seek(0)
assert b"git init" in git.out.read()
@pytest.fixture
def faster(tempdir):
with mock.patch.multiple('dallinger.command_line',
time=mock.DEFAULT,
setup_experiment=mock.DEFAULT) as mocks:
mocks['setup_experiment'].return_value = ('fake-uid', tempdir)
yield mocks
@pytest.fixture
def launch():
with mock.patch('dallinger.command_line._handle_launch_data') as hld:
hld.return_value = {'recruitment_msg': 'fake\nrecruitment\nlist'}
yield hld
@pytest.fixture
def fake_git():
with mock.patch('dallinger.command_line.GitClient') as git:
yield git
@pytest.fixture
def herokuapp():
# Patch addon since we're using a free app which doesn't support them:
from dallinger.heroku.tools import HerokuApp
instance = HerokuApp('fake-uid', output=None, team=None)
instance.addon = mock.Mock()
with mock.patch('dallinger.command_line.HerokuApp') as mock_app_class:
mock_app_class.return_value = instance
yield instance
instance.destroy()
@pytest.fixture
def heroku_mock():
# Patch addon since we're using a free app which doesn't support them:
from dallinger.heroku.tools import HerokuApp
instance = mock.Mock(spec=HerokuApp)
instance.redis_url = '\n'
instance.name = u'dlgr-fake-uid'
instance.url = u'fake-url'
instance.db_url = u'fake-url'
with mock.patch('dallinger.command_line.heroku') as heroku_module:
heroku_module.auth_token.return_value = u'fake token'
with mock.patch('dallinger.command_line.HerokuApp') as mock_app_class:
mock_app_class.return_value = instance
yield instance
@pytest.mark.usefixtures('active_config', 'launch', 'fake_git', 'faster')
class TestDeploySandboxSharedSetupNoExternalCalls(object):
@pytest.fixture
def dsss(self):
from dallinger.command_line import deploy_sandbox_shared_setup
return deploy_sandbox_shared_setup
def test_result(self, dsss, heroku_mock):
result = dsss()
assert result == {
'app_home': u'fake-url',
'app_name': u'dlgr-fake-uid',
'recruitment_msg': 'fake\nrecruitment\nlist'
}
def test_bootstraps_heroku(self, dsss, heroku_mock):
dsss()
heroku_mock.bootstrap.assert_called_once()
def test_installs_phantomjs(self, dsss, heroku_mock):
dsss()
heroku_mock.buildpack.assert_called_once_with(
'https://github.com/stomita/heroku-buildpack-phantomjs'
)
def test_installs_addons(self, dsss, heroku_mock):
dsss()
heroku_mock.addon.assert_has_calls([
mock.call('heroku-postgresql:standard-0'),
mock.call('heroku-redis:premium-0'),
mock.call('papertrail'),
mock.call('sentry')
])
def test_sets_app_properties(self, dsss, heroku_mock):
dsss()
heroku_mock.set.assert_has_calls([
mock.call('auto_recruit', True),
mock.call('aws_access_key_id', u'fake aws key'),
mock.call('aws_region', u'us-east-1'),
mock.call('aws_secret_access_key', u'fake aws secret'),
mock.call('smtp_password', u'<PASSWORD>'),
mock.call('smtp_username', u'fake email username'),
mock.call('whimsical', True),
])
def test_scales_dynos(self, dsss, heroku_mock):
dsss()
heroku_mock.scale_up_dyno.assert_has_calls([
mock.call('web', 1, u'free'),
mock.call('worker', 1, u'free'),
mock.call('clock', 1, u'free')
])
@pytest.mark.skipif(not pytest.config.getvalue("heroku"),
reason="--heroku was not specified")
@pytest.mark.usefixtures('bartlett_dir', 'active_config', 'launch', 'herokuapp')
class TestDeploySandboxSharedSetupFullSystem(object):
@pytest.fixture
def dsss(self):
from dallinger.command_line import deploy_sandbox_shared_setup
return deploy_sandbox_shared_setup
def test_full_deployment(self, dsss):
no_clock = {'clock_on': False} # can't run clock on free dyno
result = dsss(exp_config=no_clock) # can't run clock on free dyno
app_name = result.get('app_name')
assert app_name.startswith('dlgr')
@pytest.mark.usefixtures('bartlett_dir')
class Test_handle_launch_data(object):
@pytest.fixture
def handler(self):
from dallinger.command_line import _handle_launch_data
return _handle_launch_data
def test_success(self, handler):
log = mock.Mock()
with mock.patch('dallinger.command_line.requests.post') as mock_post:
result = mock.Mock(
ok=True,
json=mock.Mock(return_value={'message': u'msg!'}),
)
mock_post.return_value = result
assert handler('/some-launch-url', error=log) == {'message': u'msg!'}
def test_failure(self, handler):
from requests.exceptions import HTTPError
log = mock.Mock()
with mock.patch('dallinger.command_line.requests.post') as mock_post:
mock_post.return_value = mock.Mock(
ok=False,
json=mock.Mock(return_value={'message': u'msg!'}),
raise_for_status=mock.Mock(side_effect=HTTPError)
)
with pytest.raises(HTTPError):
handler('/some-launch-url', error=log, delay=0.05, remaining=5)
log.assert_has_calls([
mock.call('Experiment launch failed, retrying in 0.1 seconds ...'),
mock.call('Experiment launch failed, retrying in 0.2 seconds ...'),
mock.call('Experiment launch failed, retrying in 0.4 seconds ...'),
mock.call('Experiment launch failed, retrying in 0.8 seconds ...'),
mock.call('Experiment launch failed, check web dyno logs for details.'),
mock.call(u'msg!')
])
def test_non_json_response_error(self, handler):
log = mock.Mock()
with mock.patch('dallinger.command_line.requests.post') as mock_post:
mock_post.return_value = mock.Mock(
json=mock.Mock(side_effect=ValueError),
text='Big, unexpected problem.'
)
with pytest.raises(ValueError):
handler('/some-launch-url', error=log)
log.assert_called_once_with(
'Error parsing response from /launch, check web dyno logs for details: '
'Big, unexpected problem.'
)
@pytest.mark.usefixtures('bartlett_dir', 'clear_workers', 'env')
class TestDebugServer(object):
@pytest.fixture
def debugger_unpatched(self, output):
from dallinger.command_line import DebugSessionRunner
debugger = DebugSessionRunner(
output, verbose=True, bot=False, proxy_port=None, exp_config={}
)
yield debugger
if debugger.status_thread:
debugger.status_thread.join()
@pytest.fixture
def debugger(self, debugger_unpatched):
from dallinger.heroku.tools import HerokuLocalWrapper
debugger = debugger_unpatched
debugger.notify = mock.Mock(return_value=HerokuLocalWrapper.MONITOR_STOP)
return debugger
def test_startup(self, debugger):
debugger.run()
"Server is running" in str(debugger.out.log.call_args_list[0])
def test_raises_if_heroku_wont_start(self, debugger):
mock_wrapper = mock.Mock(
__enter__=mock.Mock(side_effect=OSError),
__exit__=mock.Mock(return_value=False)
)
with mock.patch('dallinger.command_line.HerokuLocalWrapper') as Wrapper:
Wrapper.return_value = mock_wrapper
with pytest.raises(OSError):
debugger.run()
def test_new_participant(self, debugger_unpatched):
from dallinger.config import get_config
debugger = debugger_unpatched
get_config().load()
debugger.new_recruit = mock.Mock(return_value=None)
assert not debugger.new_recruit.called
debugger.notify(' New participant requested: http://example.com')
assert debugger.new_recruit.called
def test_recruitment_closed(self, debugger_unpatched):
from dallinger.config import get_config
get_config().load()
debugger = debugger_unpatched
debugger.new_recruit = mock.Mock(return_value=None)
debugger.heroku = mock.Mock()
response = mock.Mock(
json=mock.Mock(return_value={'completed': True})
)
with mock.patch('dallinger.command_line.requests') as mock_requests:
mock_requests.get.return_value = response
debugger.notify(recruiters.CLOSE_RECRUITMENT_LOG_PREFIX)
debugger.status_thread.join()
debugger.out.log.assert_called_with('Experiment completed, all nodes filled.')
debugger.heroku.stop.assert_called_once()
def test_new_recruit(self, debugger_unpatched, browser):
debugger_unpatched.notify(
" {} some-fake-url".format(recruiters.NEW_RECRUIT_LOG_PREFIX)
)
browser.open.assert_called_once_with(
'some-fake-url', autoraise=True, new=1
)
def test_new_recruit_not_triggered_if_quoted(self, debugger_unpatched, browser):
debugger_unpatched.notify(
' "{}" some-fake-url'.format(recruiters.NEW_RECRUIT_LOG_PREFIX)
)
browser.open.assert_not_called()
@pytest.mark.skipif(not pytest.config.getvalue("runbot"),
reason="--runbot was specified")
def test_debug_bots(self, env):
# Make sure debug server runs to completion with bots
p = pexpect.spawn(
'dallinger',
['debug', '--verbose', '--bot'],
env=env,
encoding='utf-8',
)
p.logfile = sys.stdout
try:
p.expect_exact('Server is running', timeout=300)
p.expect_exact('Recruitment is complete', timeout=600)
p.expect_exact('Experiment completed', timeout=60)
p.expect_exact('Local Heroku process terminated', timeout=10)
finally:
try:
p.sendcontrol('c')
p.read()
except IOError:
pass
@pytest.mark.usefixtures('bartlett_dir', 'clear_workers', 'env')
class TestLoad(object):
exp_id = "some_experiment_id"
@pytest.fixture
def export(self):
# Data export created, then removed after test[s]
from dallinger.data import export
path = export(self.exp_id, local=True)
yield path
os.remove(path)
@pytest.fixture
def loader(self, db_session, output, clear_workers):
from dallinger.command_line import LoadSessionRunner
from dallinger.heroku.tools import HerokuLocalWrapper
loader = LoadSessionRunner(self.exp_id, output, verbose=True,
exp_config={})
loader.notify = mock.Mock(return_value=HerokuLocalWrapper.MONITOR_STOP)
yield loader
@pytest.fixture
def replay_loader(self, db_session, env, output, clear_workers):
from dallinger.command_line import LoadSessionRunner
loader = LoadSessionRunner(self.exp_id, output, verbose=True,
exp_config={'replay': True})
loader.keep_running = mock.Mock(return_value=False)
def launch_and_finish(self):
from dallinger.heroku.tools import HerokuLocalWrapper
loader.out.log("Launching replay browser...")
return HerokuLocalWrapper.MONITOR_STOP
loader.start_replay = mock.Mock(
return_value=None,
side_effect=launch_and_finish
)
yield loader
def test_load_runs(self, loader, export):
loader.keep_running = mock.Mock(return_value=False)
loader.run()
loader.out.log.assert_has_calls([
mock.call('Starting up the server...'),
mock.call('Ingesting dataset from some_experiment_id-data.zip...'),
mock.call('Server is running on http://0.0.0.0:5000. Press Ctrl+C to exit.'),
mock.call('Terminating dataset load for experiment some_experiment_id'),
mock.call('Cleaning up local Heroku process...'),
mock.call('Local Heroku process terminated.')
])
def test_load_raises_on_nonexistent_id(self, loader):
loader.app_id = 'nonsense'
loader.keep_running = mock.Mock(return_value=False)
with pytest.raises(IOError):
loader.run()
def test_load_with_replay(self, replay_loader, export):
replay_loader.run()
replay_loader.out.log.assert_has_calls([
mock.call('Starting up the server...'),
mock.call('Ingesting dataset from some_experiment_id-data.zip...'),
mock.call('Server is running on http://0.0.0.0:5000. Press Ctrl+C to exit.'),
mock.call('Launching the experiment...'),
mock.call('Launching replay browser...'),
mock.call('Terminating dataset load for experiment some_experiment_id'),
mock.call('Cleaning up local Heroku process...'),
mock.call('Local Heroku process terminated.')
])
class TestOutput(object):
@pytest.fixture
def output(self):
from dallinger.command_line import Output
return Output()
def test_outs(self, output):
output.log('logging')
output.error('an error')
output.blather('blah blah blah')
class TestHeader(object):
def test_header_contains_version_number(self):
# Make sure header contains the version number.
assert dallinger.version.__version__ in dallinger.command_line.header
class TestSandboxAndDeploy(object):
@pytest.fixture
def sandbox(self):
from dallinger.command_line import sandbox
return sandbox
@pytest.fixture
def deploy(self):
from dallinger.command_line import deploy
return deploy
@pytest.fixture
def dsss(self):
with mock.patch('dallinger.command_line.deploy_sandbox_shared_setup') as mock_dsss:
yield mock_dsss
def test_sandbox_with_app_id(self, sandbox, dsss):
CliRunner().invoke(
sandbox,
[
'--verbose',
'--app', 'some app id',
]
)
dsss.assert_called_once_with(app='some app id', verbose=True)
assert get_config().get('mode') == 'sandbox'
def test_sandbox_with_no_app_id(self, sandbox, dsss):
CliRunner().invoke(
sandbox,
[
'--verbose',
]
)
dsss.assert_called_once_with(app=None, verbose=True)
assert get_config().get('mode') == 'sandbox'
def test_sandbox_with_invalid_app_id(self, sandbox, dsss):
result = CliRunner().invoke(
sandbox,
[
'--verbose',
'--app', 'dlgr-some app id',
]
)
dsss.assert_not_called()
assert result.exit_code == -1
assert 'The --app flag requires the full UUID' in str(result.exception)
def test_deploy_with_app_id(self, deploy, dsss):
CliRunner().invoke(
deploy,
[
'--verbose',
'--app', 'some app id',
]
)
dsss.assert_called_once_with(app='some app id', verbose=True)
assert get_config().get('mode') == 'live'
class TestSummary(object):
@pytest.fixture
def summary(self):
from dallinger.command_line import summary
return summary
@pytest.fixture
def patched_summary_route(self):
response = mock.Mock()
response.json.return_value = {
u'completed': True,
u'nodes_remaining': 0,
u'required_nodes': 0,
u'status': 'success',
u'summary': [['approved', 1], ['submitted', 1]],
u'unfilled_networks': 0
}
with mock.patch('dallinger.command_line.requests') as req:
req.get.return_value = response
yield req
def test_summary(self, summary, patched_summary_route):
result = CliRunner().invoke(
summary,
[
'--app', 'some app id',
]
)
assert "Yield: 50.00%" in result.output
@pytest.mark.usefixtures('bartlett_dir')
class TestBot(object):
@pytest.fixture
def bot_command(self):
from dallinger.command_line import bot
return bot
@pytest.fixture
def mock_bot(self):
bot = mock.Mock()
with mock.patch('dallinger.command_line.bot_factory') as bot_factory:
bot_factory.return_value = bot
yield bot
def test_bot_factory(self):
from dallinger.command_line import bot_factory
from dallinger.command_line import setup_experiment
from dallinger.bots import BotBase
setup_experiment()
bot = bot_factory('some url')
assert isinstance(bot, BotBase)
def test_bot_no_debug_url(self, bot_command, mock_bot):
CliRunner().invoke(
bot_command,
[
'--app', 'some app id',
]
)
assert mock_bot.run_experiment.called
def test_bot_with_debug_url(self, bot_command, mock_bot):
CliRunner().invoke(
bot_command,
[
'--app', 'some app id',
'--debug', 'some url'
]
)
assert mock_bot.run_experiment.called
class TestQualify(object):
@pytest.fixture
def qualify(self):
from dallinger.command_line import qualify
return qualify
@pytest.fixture
def mturk(self):
with mock.patch('dallinger.command_line.MTurkService') as mock_mturk:
mock_results = [{'id': 'some qid', 'score': 1}]
mock_instance = mock.Mock()
mock_instance.get_workers_with_qualification.return_value = mock_results
mock_mturk.return_value = mock_instance
yield mock_instance
def test_qualify_single_worker(self, qualify, mturk):
qual_value = 1
result = CliRunner().invoke(
qualify,
[
'--qualification', 'some qid',
'--value', six.text_type(qual_value),
'some worker id',
]
)
assert result.exit_code == 0
mturk.set_qualification_score.assert_called_once_with(
'some qid', 'some worker id', qual_value, notify=False
)
mturk.get_workers_with_qualification.assert_called_once_with('some qid')
def test_uses_mturk_sandbox_if_specified(self, qualify):
qual_value = 1
with mock.patch('dallinger.command_line.MTurkService') as mock_mturk:
mock_mturk.return_value = mock.Mock()
CliRunner().invoke(
qualify,
[
'--sandbox',
'--qualification', 'some qid',
'--value', six.text_type(qual_value),
'some worker id',
]
)
assert 'sandbox=True' in str(mock_mturk.call_args_list[0])
def test_raises_with_no_worker(self, qualify, mturk):
qual_value = 1
result = CliRunner().invoke(
qualify,
[
'--qualification', 'some qid',
'--value', six.text_type(qual_value),
]
)
assert result.exit_code != 0
assert 'at least one worker ID' in result.output
def test_can_elect_to_notify_worker(self, qualify, mturk):
qual_value = 1
result = CliRunner().invoke(
qualify,
[
'--qualification', 'some qid',
'--value', six.text_type(qual_value),
'--notify',
'some worker id',
]
)
assert result.exit_code == 0
mturk.set_qualification_score.assert_called_once_with(
'some qid', 'some worker id', qual_value, notify=True
)
def test_qualify_multiple_workers(self, qualify, mturk):
qual_value = 1
result = CliRunner().invoke(
qualify,
[
'--qualification', 'some qid',
'--value', six.text_type(qual_value),
'worker1', 'worker2',
]
)
assert result.exit_code == 0
mturk.set_qualification_score.assert_has_calls([
mock.call(u'some qid', u'worker1', 1, notify=False),
mock.call(u'some qid', u'worker2', 1, notify=False)
])
def test_use_qualification_name(self, qualify, mturk):
qual_value = 1
mturk.get_qualification_type_by_name.return_value = {'id': 'some qid'}
result = CliRunner().invoke(
qualify,
[
'--qualification', 'some qual name',
'--value', six.text_type(qual_value),
'--by_name',
'some worker id',
]
)
assert result.exit_code == 0
mturk.set_qualification_score.assert_called_once_with(
'some qid', 'some worker id', qual_value, notify=False
)
mturk.get_workers_with_qualification.assert_called_once_with('some qid')
def test_use_qualification_name_with_bad_name(self, qualify, mturk):
qual_value = 1
mturk.get_qualification_type_by_name.return_value = None
result = CliRunner().invoke(
qualify,
[
'--qualification', 'some qual name',
'--value', six.text_type(qual_value),
'--by_name',
'some worker id',
]
)
assert result.exit_code == 2
assert 'No qualification with name "some qual name" exists.' in result.output
class TestRevoke(object):
DO_IT = 'Y\n'
DO_NOT_DO_IT = 'N\n'
@pytest.fixture
def revoke(self):
from dallinger.command_line import revoke
return revoke
@pytest.fixture
def mturk(self):
with mock.patch('dallinger.command_line.MTurkService') as mock_mturk:
mock_instance = mock.Mock()
mock_instance.get_qualification_type_by_name.return_value = 'some qid'
mock_instance.get_workers_with_qualification.return_value = [
{'id': 'some qid', 'score': 1}
]
mock_mturk.return_value = mock_instance
yield mock_instance
def test_revoke_single_worker_by_qualification_id(self, revoke, mturk):
result = CliRunner().invoke(
revoke,
[
'--qualification', 'some qid',
'--reason', 'some reason',
'some worker id',
],
input=self.DO_IT,
)
assert result.exit_code == 0
mturk.revoke_qualification.assert_called_once_with(
u'some qid', u'some worker id', u'some reason'
)
def test_can_be_aborted_cleanly_after_warning(self, revoke, mturk):
result = CliRunner().invoke(
revoke,
[
'--qualification', 'some qid',
'--reason', 'some reason',
'some worker id',
],
input=self.DO_NOT_DO_IT,
)
assert result.exit_code == 0
mturk.revoke_qualification.assert_not_called()
def test_uses_mturk_sandbox_if_specified(self, revoke):
with mock.patch('dallinger.command_line.MTurkService') as mock_mturk:
mock_mturk.return_value = mock.Mock()
CliRunner().invoke(
revoke,
[
'--sandbox',
'--qualification', 'some qid',
'--reason', 'some reason',
'some worker id',
],
input=self.DO_IT,
)
assert 'sandbox=True' in str(mock_mturk.call_args_list[0])
def test_reason_has_a_default(self, revoke, mturk):
result = CliRunner().invoke(
revoke,
[
'--qualification', 'some qid',
'some worker id',
],
input=self.DO_IT,
)
assert result.exit_code == 0
mturk.revoke_qualification.assert_called_once_with(
u'some qid',
u'some worker id',
u'Revoking automatically assigned Dallinger qualification'
)
def test_raises_with_no_worker(self, revoke, mturk):
result = CliRunner().invoke(
revoke,
[
'--qualification', 'some qid',
],
input=self.DO_IT,
)
assert result.exit_code != 0
assert 'at least one worker ID' in result.output
def test_raises_with_no_qualification(self, revoke, mturk):
result = CliRunner().invoke(
revoke,
[
u'some worker id',
],
input=self.DO_IT,
)
assert result.exit_code != 0
assert 'at least one worker ID' in result.output
def test_revoke_for_multiple_workers(self, revoke, mturk):
result = CliRunner().invoke(
revoke,
[
'--qualification', 'some qid',
'--reason', 'some reason',
'worker1', 'worker2',
],
input=self.DO_IT,
)
assert result.exit_code == 0
mturk.revoke_qualification.assert_has_calls([
mock.call(u'some qid', u'worker1', u'some reason'),
mock.call(u'some qid', u'worker2', u'some reason')
])
def test_use_qualification_name(self, revoke, mturk):
mturk.get_qualification_type_by_name.return_value = {'id': 'some qid'}
result = CliRunner().invoke(
revoke,
[
'--qualification', 'some qual name',
'--reason', 'some reason',
'--by_name',
'some worker id',
],
input=self.DO_IT,
)
assert result.exit_code == 0
mturk.revoke_qualification.assert_called_once_with(
u'some qid', u'some worker id', u'some reason'
)
def test_bad_qualification_name_shows_error(self, revoke, mturk):
mturk.get_qualification_type_by_name.return_value = None
result = CliRunner().invoke(
revoke,
[
'--qualification', 'some bad name',
'--reason', 'some reason',
'--by_name',
'some worker id',
],
input=self.DO_IT,
)
assert result.exit_code == 2
assert 'No qualification with name "some bad name" exists.' in result.output
class TestHibernate(object):
@pytest.fixture
def hibernate(self, sleepless):
from dallinger.command_line import hibernate
return hibernate
def test_creates_backup(self, hibernate, heroku, data):
CliRunner().invoke(
hibernate,
['--app', 'some-app-uid', ]
)
data.backup.assert_called_once_with('some-app-uid')
def test_scales_down_dynos(self, hibernate, heroku, data):
CliRunner().invoke(
hibernate,
['--app', 'some-app-uid', ]
)
heroku.scale_down_dynos.assert_called_once()
def test_kills_addons(self, hibernate, heroku, data):
CliRunner().invoke(
hibernate,
['--app', 'some-app-uid', ]
)
heroku.addon_destroy.assert_has_calls([
mock.call('heroku-postgresql'),
mock.call('heroku-redis')
])
@pytest.mark.usefixtures('active_config')
class TestAwaken(object):
@pytest.fixture
def awaken(self, sleepless):
from dallinger.command_line import awaken
return awaken
def test_creates_database_of_configured_size(self, awaken, heroku, data, active_config):
CliRunner().invoke(
awaken,
['--app', 'some-app-uid', ]
)
size = active_config.get('database_size')
expected = mock.call('heroku-postgresql:{}'.format(size))
assert expected == heroku.addon.call_args_list[0]
def test_adds_redis(self, awaken, heroku, data, active_config):
active_config['redis_size'] = u'premium-2'
CliRunner().invoke(
awaken,
['--app', 'some-app-uid', ]
)
assert mock.call('heroku-redis:premium-2') == heroku.addon.call_args_list[1]
def test_restores_database_from_backup(self, awaken, heroku, data):
CliRunner().invoke(
awaken,
['--app', 'some-app-uid', ]
)
heroku.restore.assert_called_once_with('fake restore url')
def test_scales_up_dynos(self, awaken, heroku, data, active_config):
CliRunner().invoke(
awaken,
['--app', 'some-app-uid', ]
)
web_count = active_config.get('num_dynos_web')
worker_count = active_config.get('num_dynos_worker')
size = active_config.get('dyno_type')
heroku.scale_up_dyno.assert_has_calls([
mock.call('web', web_count, size),
mock.call('worker', worker_count, size),
mock.call('clock', 1, size)
])
class TestDestroy(object):
@pytest.fixture
def destroy(self):
from dallinger.command_line import destroy
return destroy
def test_calls_destroy(self, destroy, heroku):
CliRunner().invoke(
destroy,
['--app', 'some-app-uid', '--yes']
)
heroku.destroy.assert_called_once()
def test_destroy_expires_hits(self, destroy, heroku, mturk):
CliRunner().invoke(
destroy,
['--app', 'some-app-uid', '--yes', '--expire-hit']
)
heroku.destroy.assert_called_once()
mturk_instance = mturk.return_value
mturk_instance.get_hits.assert_called_once()
mturk_instance.expire_hit.assert_called()
def test_requires_confirmation(self, destroy, heroku):
CliRunner().invoke(
destroy,
['--app', 'some-app-uid']
)
heroku.destroy.assert_not_called()
def test_destroy_expire_uses_sandbox(self, destroy, heroku, mturk):
CliRunner().invoke(
destroy,
['--app', 'some-app-uid', '--yes', '--expire-hit', '--sandbox']
)
assert 'sandbox=True' in str(mturk.call_args_list[0])
mturk_instance = mturk.return_value
mturk_instance.get_hits.assert_called_once()
mturk_instance.expire_hit.assert_called()
class TestLogs(object):
@pytest.fixture
def logs(self):
from dallinger.command_line import logs
return logs
def test_opens_logs(self, logs, heroku):
CliRunner().invoke(
logs,
['--app', 'some-app-uid', ]
)
heroku.open_logs.assert_called_once()
class TestMonitor(object):
def _twice(self):
count = [2]
def countdown():
if count[0]:
count[0] -= 1
return True
return False
return countdown
@pytest.fixture
def command_line_check_call(self):
with mock.patch('dallinger.command_line.check_call') as call:
yield call
@pytest.fixture
def summary(self):
with mock.patch('dallinger.command_line.get_summary') as sm:
sm.return_value = 'fake summary'
yield sm
@pytest.fixture
def two_summary_checks(self):
countdown = self._twice()
counter_factory = mock.Mock(return_value=countdown)
with mock.patch('dallinger.command_line._keep_running',
new_callable=counter_factory):
yield
@pytest.fixture
def monitor(self, sleepless, summary, two_summary_checks):
from dallinger.command_line import monitor
return monitor
def test_opens_browsers(self, monitor, heroku, browser, command_line_check_call):
heroku.dashboard_url = 'fake-dashboard-url'
CliRunner().invoke(
monitor,
['--app', 'some-app-uid', ]
)
browser.open.assert_has_calls([
mock.call('fake-dashboard-url'),
mock.call('https://requester.mturk.com/mturk/manageHITs')
])
def test_calls_open_with_db_uri(self, monitor, heroku, browser, command_line_check_call):
heroku.db_uri = 'fake-db-uri'
CliRunner().invoke(
monitor,
['--app', 'some-app-uid', ]
)
command_line_check_call.assert_called_once_with(['open', 'fake-db-uri'])
def test_shows_summary_in_output(self, monitor, heroku, browser, command_line_check_call):
heroku.db_uri = 'fake-db-uri'
result = CliRunner().invoke(
monitor,
['--app', 'some-app-uid', ]
)
assert len(re.findall('fake summary', result.output)) == 2
def test_raises_on_null_app_id(self, monitor, heroku, browser, command_line_check_call):
heroku.db_uri = 'fake-db-uri'
result = CliRunner().invoke(
monitor,
['--app', None, ]
)
assert str(result.exception) == 'Select an experiment using the --app flag.'
class TestHits(object):
@pytest.fixture
def output(self):
with mock.patch('dallinger.command_line.Output') as mock_data:
output_instance = mock.Mock()
mock_data.return_value = output_instance
yield output_instance
@pytest.fixture
def hits(self):
from dallinger.command_line import hits
return hits
@pytest.fixture
def expire(self):
from dallinger.command_line import expire
return expire
def test_hits(self, hits, mturk):
result = CliRunner().invoke(
hits, [
'--app', 'exp-id-2'
]
)
assert result.exit_code == 0
mturk_instance = mturk.return_value
mturk_instance.get_hits.assert_called_once()
def test_uses_mturk_sandbox_if_specified(self, hits, mturk):
CliRunner().invoke(
hits, [
'--sandbox',
'--app', 'exp-id-2',
]
)
assert 'sandbox=True' in str(mturk.call_args_list[0])
def test_expire(self, expire, mturk):
result = CliRunner().invoke(
expire, [
'--app', 'exp-id-2'
]
)
assert result.exit_code == 0
mturk_instance = mturk.return_value
mturk_instance.get_hits.assert_called_once()
mturk_instance.expire_hit.assert_called()
def test_expire_no_hits(self, expire, mturk, output):
mturk_instance = mturk.return_value
mturk_instance.get_hits.return_value = []
result = CliRunner().invoke(
expire, [
'--app', 'exp-id-2'
]
)
assert result.exit_code == 1
mturk_instance.get_hits.assert_called_once()
mturk_instance.expire_hit.assert_not_called()
assert output.log.call_count == 2
output.log.assert_has_calls([
mock.call('No hits found for this application.'),
mock.call(
'If this experiment was run in the MTurk sandbox, use: '
'`dallinger expire --sandbox --app exp-id-2`'
)
])
def test_expire_no_hits_sandbox(self, expire, mturk, output):
mturk_instance = mturk.return_value
mturk_instance.get_hits.return_value = []
result = CliRunner().invoke(
expire, [
'--app', 'exp-id-2', '--sandbox'
]
)
assert result.exit_code == 1
mturk_instance.get_hits.assert_called_once()
mturk_instance.expire_hit.assert_not_called()
output.log.assert_called_once_with(
'No hits found for this application.'
)
def test_expire_with_failure(self, expire, mturk, output):
mturk_instance = mturk.return_value
def mturk_raiser(*args, **kwargs):
from dallinger.mturk import MTurkServiceException
raise MTurkServiceException()
mturk_instance.expire_hit.side_effect = mturk_raiser
result = CliRunner().invoke(
expire, [
'--app', 'exp-id-2'
]
)
assert result.exit_code == 1
mturk_instance.get_hits.assert_called_once()
mturk_instance.expire_hit.call_count = 2
assert output.log.call_count == 1
assert 'Could not expire 2 hits:' in str(
output.log.call_args_list[0]
)
```
#### File: Dallinger/tests/test_config.py
```python
from __future__ import unicode_literals
import mock
import os
import sys
from tempfile import NamedTemporaryFile
import pexpect
import pytest
import six
from dallinger.config import Configuration
from dallinger.config import get_config, LOCAL_CONFIG
@pytest.mark.usefixtures('experiment_dir')
class TestConfiguration(object):
def test_register_new_variable(self):
config = Configuration()
config.register('num_participants', int)
config.extend({'num_participants': 1})
config.ready = True
assert config.get('num_participants', 1)
def test_register_duplicate_variable_raises(self):
config = Configuration()
config.register('num_participants', int)
with pytest.raises(KeyError):
config.register('num_participants', int)
def test_register_unknown_type_raises(self):
config = Configuration()
with pytest.raises(TypeError):
config.register('num_participants', object)
def test_type_mismatch(self):
config = Configuration()
config.register('num_participants', int)
with pytest.raises(TypeError):
config.extend({'num_participants': 1.0})
def test_type_mismatch_with_cast_types(self):
config = Configuration()
config.register('num_participants', int)
config.ready = True
config.extend({'num_participants': 1.0}, cast_types=True)
assert config.get('num_participants', 1) == 1
def test_type_cast_types_failure_raises(self):
config = Configuration()
config.register('num_participants', int)
config.ready = True
with pytest.raises(TypeError):
config.extend({'num_participants': 'A NUMBER'}, cast_types=True)
def test_get_before_ready_is_not_possible(self):
config = Configuration()
config.register('num_participants', int)
config.extend({'num_participants': 1})
with pytest.raises(RuntimeError):
config.get('num_participants', 1)
def test_layering_of_configs(self):
config = Configuration()
config.register('num_participants', int)
config.extend({'num_participants': 1})
config.ready = True
assert config.get('num_participants', 1) == 1
config.extend({'num_participants': 2})
assert config.get('num_participants', 1) == 2
def test_setting_unknown_key_is_ignored(self):
config = Configuration()
config.ready = True
config.extend({'num_participants': 1})
config.get('num_participants', None)
def test_setting_by_set(self):
config = Configuration()
config.ready = True
config.set("mode", "live")
def test_setting_by_assignment(self):
config = Configuration()
config.ready = True
config["mode"] = "live"
def test_get_without_default_raises(self):
config = Configuration()
config.register('num_participants', int)
config.ready = True
with pytest.raises(KeyError):
config.get('num_participants')
def test_get_has_default_value(self):
config = Configuration()
config.register('num_participants', int)
config.ready = True
assert config.get('num_participants', 10) == 10
def test_dict_access(self):
config = Configuration()
config.register('num_participants', int)
config.ready = True
config.extend({'num_participants': 1})
assert config['num_participants'] == 1
def test_attribute_access(self):
config = Configuration()
config.register('num_participants', int)
config.ready = True
config.extend({'num_participants': 1})
assert config.num_participants == 1
def test_attribute_setting(self):
config = Configuration()
config.register('num_participants', int)
config.ready = True
config.num_participants = 1
assert config.num_participants == 1
def test_strict_extending_blocks_unknown_keys(self):
config = Configuration()
config.register('num_participants', int)
config.ready = True
with pytest.raises(KeyError):
config.extend({'unknown_key': 1}, strict=True)
def test_setting_values_supports_synonyms(self):
config = Configuration()
config.register('num_participants', int, synonyms={'n', })
config.ready = True
config.extend({'n': 1})
assert config.get('num_participants') == 1
def test_loading_keys_from_config_file(self):
config = Configuration()
config.register('num_participants', int, synonyms={'n', })
config.register('deploy_worldwide', bool, synonyms={'worldwide', })
with NamedTemporaryFile() as configfile:
configfile.write(b"""
[Example Section]
num_participants = 10
worldwide = false
""")
configfile.flush()
config.load_from_file(configfile.name)
config.ready = True
assert config.get('num_participants') == 10
assert config.get('deploy_worldwide') is False
def test_loading_keys_from_environment_variables(self):
config = Configuration()
config.register('num_participants', int, synonyms={'n', })
os.environ['num_participants'] = '1'
try:
config.load_from_environment()
finally:
del os.environ['num_participants']
config.ready = True
assert config.get('num_participants') == 1
def test_experiment_defined_parameters(self):
try:
python = pexpect.spawn("python", encoding='utf-8')
python.read_nonblocking(10000)
python.setecho(False)
python.sendline('from dallinger.experiment_server import experiment_server')
python.sendline('config = experiment_server._config()')
python.sendline('print(config.types)')
if six.PY3:
python.expect_exact("custom_parameter': <class 'int'>")
else:
python.expect_exact("custom_parameter': <type 'int'>")
finally:
python.sendcontrol('d')
python.read()
def test_reload_config(self):
# replicate the experiment API runner config loading
config = get_config()
config.register_extra_parameters()
config.load_from_file(LOCAL_CONFIG)
# Failse with _reset()
config.clear()
config.register_extra_parameters()
config.load_from_file(LOCAL_CONFIG)
def test_custom_experiment_module_set_and_retained(self):
config = get_config()
with mock.patch.dict('sys.modules', dallinger_experiment=None):
config.register_extra_parameters()
assert sys.modules['dallinger_experiment'] is not None
exp_module = mock.Mock()
with mock.patch.dict('sys.modules', dallinger_experiment=exp_module):
config.clear()
config.register_extra_parameters()
assert sys.modules['dallinger_experiment'] is exp_module
def test_local_base_url(self):
from dallinger.utils import get_base_url
config = get_config()
config.ready = True
config.set('host', 'localhost')
config.set('base_port', 5000)
assert(get_base_url() == 'http://localhost:5000')
def test_remote_base_url(self):
from dallinger.utils import get_base_url
config = get_config()
config.ready = True
config.set('host', 'https://dlgr-bogus.herokuapp.com')
assert(get_base_url() == 'https://dlgr-bogus.herokuapp.com')
def test_remote_base_url_always_ssl(self):
from dallinger.utils import get_base_url
config = get_config()
config.ready = True
config.set('host', 'http://dlgr-bogus.herokuapp.com')
assert(get_base_url() == 'https://dlgr-bogus.herokuapp.com')
def test_write_omits_sensitive_keys_if_filter_sensitive(self, in_tempdir):
config = get_config()
config.set('aws_region', 'some region')
config.set('aws_secret_access_key', 'foo')
config.ready = True
config.write(filter_sensitive=True)
with open(LOCAL_CONFIG) as txt:
contents = txt.read()
assert 'aws_region' in contents
assert 'aws_secret_access_key' not in contents
def test_write_includes_all_keys_if_filter_sensitive_false(self, in_tempdir):
config = get_config()
config.set('aws_region', 'some region')
config.set('aws_secret_access_key', 'foo')
config.ready = True
config.write(filter_sensitive=False)
with open(LOCAL_CONFIG) as txt:
contents = txt.read()
assert 'aws_region' in contents
assert 'aws_secret_access_key' in contents
```
#### File: Dallinger/tests/test_heroku.py
```python
import os
import mock
import pytest
import dallinger.db
import datetime
import signal
from dallinger.config import get_config
from dallinger.heroku import app_name
from dallinger.heroku.messages import EmailingHITMessenger
from dallinger.heroku.messages import EmailConfig
@pytest.fixture
def run_check():
db = dallinger.db.init_db(drop_all=True)
os.chdir('tests/experiment')
config = get_config()
if not config.ready:
config.load()
# Import the FUT here, after config load, and return it
from dallinger.heroku.clock import run_check
yield run_check
db.rollback()
db.close()
os.chdir('../..')
@pytest.fixture
def check_call():
with mock.patch('dallinger.heroku.tools.check_call') as check_call:
yield check_call
@pytest.fixture
def check_output():
with mock.patch('dallinger.heroku.tools.check_output') as check_output:
yield check_output
class TestHeroku(object):
def test_heroku_app_name(self):
id = "8fbe62f5-2e33-4274-8aeb-40fc3dd621a0"
assert(len(app_name(id)) < 30)
class TestClockScheduler(object):
def setup(self):
"""Set up the environment by moving to the demos directory."""
os.chdir('tests/experiment')
config = get_config()
config.ready = False
from dallinger.heroku import clock
self.clock = clock
def teardown(self):
os.chdir("../..")
def test_scheduler_has_job(self):
jobs = self.clock.scheduler.get_jobs()
assert len(jobs) == 1
assert jobs[0].func_ref == 'dallinger.heroku.clock:check_db_for_missing_notifications'
def test_clock_expects_config_to_be_ready(self):
assert not get_config().ready
jobs = self.clock.scheduler.get_jobs()
with pytest.raises(RuntimeError) as excinfo:
jobs[0].func()
assert excinfo.match('Config not loaded')
def test_launch_loads_config(self):
original_start = self.clock.scheduler.start
data = {'launched': False}
def start():
data['launched'] = True
try:
self.clock.scheduler.start = start
self.clock.launch()
assert data['launched']
assert get_config().ready
finally:
self.clock.scheduler.start = original_start
class TestHerokuClockTasks(object):
def test_check_db_for_missing_notifications_assembles_resources(self, run_check):
# Can't import until after config is loaded:
from dallinger.heroku.clock import check_db_for_missing_notifications
with mock.patch.multiple('dallinger.heroku.clock',
run_check=mock.DEFAULT,
MTurkService=mock.DEFAULT) as mocks:
mocks['MTurkService'].return_value = 'fake connection'
check_db_for_missing_notifications()
mocks['run_check'].assert_called()
def test_does_nothing_if_assignment_still_current(self, a, stub_config, run_check):
mturk = mock.Mock(**{'get_assignment.return_value': ['fake']})
participants = [a.participant()]
session = None
reference_time = datetime.datetime.now()
run_check(stub_config, mturk, participants, session, reference_time)
mturk.get_assignment.assert_not_called()
def test_rejects_bot_participants(self, a, stub_config, run_check):
from dallinger.recruiters import BotRecruiter
mturk = mock.Mock(**{'get_assignment.return_value': ['fake']})
participants = [a.participant(recruiter_id=BotRecruiter.nickname)]
session = mock.Mock()
# Move the clock forward so assignment is overdue:
reference_time = datetime.datetime.now() + datetime.timedelta(hours=6)
run_check(stub_config, mturk, participants, session, reference_time)
assert participants[0].status == 'rejected'
session.commit.assert_called()
def test_sets_participant_status_if_mturk_reports_approved(self, a, stub_config, run_check):
fake_assignment = {'status': 'Approved'}
mturk = mock.Mock(**{'get_assignment.return_value': fake_assignment})
participants = [a.participant()]
session = mock.Mock()
# Move the clock forward so assignment is overdue:
reference_time = datetime.datetime.now() + datetime.timedelta(hours=6)
run_check(stub_config, mturk, participants, session, reference_time)
assert participants[0].status == 'approved'
session.commit.assert_called()
def test_sets_participant_status_if_mturk_reports_rejected(self, a, stub_config, run_check):
fake_assignment = {'status': 'Rejected'}
mturk = mock.Mock(**{'get_assignment.return_value': fake_assignment})
participants = [a.participant()]
session = mock.Mock()
# Move the clock forward so assignment is overdue:
reference_time = datetime.datetime.now() + datetime.timedelta(hours=6)
run_check(stub_config, mturk, participants, session, reference_time)
assert participants[0].status == 'rejected'
session.commit.assert_called()
def test_resubmits_notification_if_mturk_reports_submitted(self, a, stub_config, run_check):
# Include whimsical set to True to avoid error in the False code branch:
stub_config.extend({'host': u'fakehost.herokuapp.com'})
fake_assignment = {'status': 'Submitted'}
mturk = mock.Mock(**{'get_assignment.return_value': fake_assignment})
participants = [a.participant()]
session = None
# Move the clock forward so assignment is overdue:
reference_time = datetime.datetime.now() + datetime.timedelta(hours=6)
with mock.patch('dallinger.heroku.clock.requests') as mock_requests:
run_check(stub_config, mturk, participants, session, reference_time)
mock_requests.post.assert_called_once_with(
'http://fakehost.herokuapp.com/notifications',
data={
'Event.1.EventType': 'AssignmentSubmitted',
'Event.1.AssignmentId': participants[0].assignment_id
}
)
def test_sends_notification_if_resubmitted(self, a, stub_config, run_check):
fake_assignment = {'status': 'Submitted'}
mturk = mock.Mock(**{'get_assignment.return_value': fake_assignment})
participants = [a.participant()]
session = None
# Move the clock forward so assignment is overdue:
reference_time = datetime.datetime.now() + datetime.timedelta(hours=6)
mock_messenger = mock.Mock(spec=EmailingHITMessenger)
with mock.patch.multiple('dallinger.heroku.clock',
requests=mock.DEFAULT,
get_messenger=mock.DEFAULT) as mocks:
mocks['get_messenger'].return_value = mock_messenger
run_check(stub_config, mturk, participants, session, reference_time)
mock_messenger.send_resubmitted_msg.assert_called()
def test_no_assignment_on_mturk_shuts_down_hit(self, a, stub_config, run_check):
# Include whimsical set to True to avoid error in the False code branch:
stub_config.extend({'host': u'fakehost.herokuapp.com'})
mturk = mock.Mock(**{'get_assignment.return_value': None})
participants = [a.participant()]
session = None
# Move the clock forward so assignment is overdue:
reference_time = datetime.datetime.now() + datetime.timedelta(hours=6)
with mock.patch('dallinger.heroku.clock.requests') as mock_requests:
run_check(stub_config, mturk, participants, session, reference_time)
mock_requests.patch.assert_called_once_with(
'https://api.heroku.com/apps/fakehost/config-vars',
data='{"auto_recruit": "false"}',
headers={
"Accept": "application/vnd.heroku+json; version=3",
"Content-Type": "application/json",
"Authorization": "Bearer {}".format('heroku secret'),
}
)
mock_requests.post.assert_called_once_with(
'http://fakehost.herokuapp.com/notifications',
data={
'Event.1.EventType': 'NotificationMissing',
'Event.1.AssignmentId': participants[0].assignment_id
}
)
def test_no_assignment_on_mturk_sends_hit_cancelled_message(self, a, stub_config, run_check):
mturk = mock.Mock(**{'get_assignment.return_value': None})
participants = [a.participant()]
session = None
# Move the clock forward so assignment is overdue:
reference_time = datetime.datetime.now() + datetime.timedelta(hours=6)
mock_messenger = mock.Mock(spec=EmailingHITMessenger)
with mock.patch.multiple('dallinger.heroku.clock',
requests=mock.DEFAULT,
get_messenger=mock.DEFAULT) as mocks:
mocks['get_messenger'].return_value = mock_messenger
run_check(stub_config, mturk, participants, session, reference_time)
mock_messenger.send_hit_cancelled_msg.assert_called()
@pytest.fixture
def hit_summary():
from dallinger.heroku.messages import HITSummary
return HITSummary(
assignment_id='some assignment id',
duration=60,
time_active=120,
app_id='some app id',
when='the time',
)
class TestEmailConfig(object):
@pytest.fixture
def klass(self):
from dallinger.heroku.messages import EmailConfig
return EmailConfig
def test_catches_missing_config_values(self, klass, stub_config):
stub_config.extend({
'dallinger_email_address': u'',
'contact_email_on_error': u'',
'smtp_username': u'???',
'smtp_password': u'???',
})
econfig = klass(stub_config)
problems = econfig.validate()
assert problems == (
'Missing or invalid config values: contact_email_on_error, '
'dallinger_email_address, smtp_password, smtp_username'
)
def emailing_messenger(summary, config):
messenger = EmailingHITMessenger(
hit_info=summary,
email_settings=EmailConfig(config)
)
return messenger
@pytest.fixture
def whimsical(hit_summary, stub_config):
stub_config.extend({'whimsical': True})
return emailing_messenger(hit_summary, stub_config)
@pytest.fixture
def nonwhimsical(hit_summary, stub_config):
stub_config.extend({'whimsical': False})
return emailing_messenger(hit_summary, stub_config)
class TestMessengerFactory(object):
@pytest.fixture
def factory(self):
from dallinger.heroku.messages import get_messenger
return get_messenger
def test_returns_debug_version_if_configured(self, factory, stub_config, hit_summary):
from dallinger.heroku.messages import DebugHITMessenger
assert isinstance(factory(hit_summary, stub_config), DebugHITMessenger)
def test_returns_emailing_version_if_configured(self, factory, stub_config, hit_summary):
from dallinger.heroku.messages import EmailingHITMessenger
stub_config.extend({'mode': u'sandbox'})
assert isinstance(factory(hit_summary, stub_config), EmailingHITMessenger)
def test_returns_debug_version_if_email_config_invalid(self, factory, stub_config, hit_summary):
from dallinger.heroku.messages import DebugHITMessenger
stub_config.extend({'mode': u'sandbox', 'dallinger_email_address': u''})
assert isinstance(factory(hit_summary, stub_config), DebugHITMessenger)
@pytest.mark.usefixtures('dummy_mailer')
class TestEmailingHITMessenger(object):
def test_send_negotiates_email_server(self, whimsical, dummy_mailer):
whimsical.send_resubmitted_msg()
assert whimsical.server is dummy_mailer
whimsical.server.starttls.assert_called()
whimsical.server.login.assert_called_once_with(
'fake email username', '<PASSWORD> email password'
)
whimsical.server.sendmail.assert_called()
whimsical.server.quit.assert_called()
assert whimsical.server.sendmail.call_args[0][0] == u'<EMAIL>'
assert whimsical.server.sendmail.call_args[0][1] == u'<EMAIL>'
def test_wraps_mail_server_exceptions(self, whimsical, dummy_mailer):
import smtplib
from dallinger.heroku.messages import MessengerError
dummy_mailer.login.side_effect = smtplib.SMTPException("Boom!")
with pytest.raises(MessengerError):
whimsical.send_resubmitted_msg()
def test_send_resubmitted_msg_whimsical(self, whimsical):
data = whimsical.send_resubmitted_msg()
assert data['subject'] == 'A matter of minor concern.'
assert 'a full 1 minutes over' in data['message']
def test_send_resubmitted_msg_nonwhimsical(self, nonwhimsical):
data = nonwhimsical.send_resubmitted_msg()
assert data['subject'] == 'Dallinger automated email - minor error.'
assert 'Allowed time: 1' in data['message']
def test_send_hit_cancelled_msg_whimsical(self, whimsical):
data = whimsical.send_hit_cancelled_msg()
assert data['subject'] == 'Most troubling news.'
assert 'a full 1 minutes over' in data['message']
def test_send_hit_cancelled_msg_nonwhimsical(self, nonwhimsical):
data = nonwhimsical.send_hit_cancelled_msg()
assert data['subject'] == 'Dallinger automated email - major error.'
assert 'Allowed time: 1' in data['message']
def test_send_idle_experiment_msg(self, nonwhimsical):
data = nonwhimsical.send_idle_experiment_msg()
assert data['subject'] == 'Idle Experiment.'
def test_send_hit_error_msg(self, nonwhimsical):
data = nonwhimsical.send_hit_error_msg()
assert data['subject'] == 'Error during HIT.'
class TestHerokuUtilFunctions(object):
@pytest.fixture
def heroku(self):
from dallinger.heroku import tools
return tools
def test_auth_token(self, heroku, check_output):
check_output.return_value = b'some response '
assert heroku.auth_token() == u'some response'
def test_log_in_ok(self, heroku, check_output):
check_output.return_value = b'all good'
heroku.log_in()
def test_log_in_fails(self, heroku, check_output):
check_output.side_effect = Exception('boom!')
with pytest.raises(Exception) as excinfo:
heroku.log_in()
assert excinfo.match('You are not logged into Heroku.')
class TestHerokuApp(object):
@pytest.fixture
def temp_repo(self, in_tempdir, stub_config):
from dallinger.utils import GitClient
stub_config.write()
config = {'user.name': 'Test User', 'user.email': '<EMAIL>'}
git = GitClient()
git.init(config=config)
git.add("--all")
git.commit("Test Repo")
@pytest.fixture
def full_app(self):
from dallinger.heroku.tools import HerokuApp
the_app = HerokuApp(dallinger_uid='fake-uid', output=None, team=None)
yield the_app
the_app.destroy()
@pytest.fixture
def app(self):
from dallinger.heroku.tools import HerokuApp
with mock.patch('dallinger.heroku.tools.subprocess'):
the_app = HerokuApp(
dallinger_uid='fake-uid', output=None, team="fake team"
)
yield the_app
def test_name(self, app):
assert app.name == u'dlgr-fake-uid'
def test_url(self, app):
assert app.url == u'https://dlgr-fake-uid.herokuapp.com'
def test_dashboard_url(self, app):
assert app.dashboard_url == u'https://dashboard.heroku.com/apps/dlgr-fake-uid'
def test_bootstrap_creates_app_with_team(self, app, check_call):
app.team = 'some-team'
app.bootstrap()
check_call.assert_has_calls([
mock.call(['heroku', 'apps:create', 'dlgr-fake-uid', '--buildpack',
'heroku/python',
'--org', 'some-team'], stdout=None),
])
def test_bootstrap_sets_hostname(self, app, check_call):
app.team = 'some-team'
app.bootstrap()
check_call.assert_has_calls([
mock.call(['heroku', 'config:set',
'HOST=https://dlgr-fake-uid.herokuapp.com',
'--app', 'dlgr-fake-uid'], stdout=None)
])
def test_addon(self, app, check_call):
app.addon('some-fake-addon')
check_call.assert_called_once_with(
["heroku", "addons:create", "some-fake-addon", "--app", app.name],
stdout=None
)
def test_addon_destroy(self, app, check_call):
app.addon_destroy('some-fake-addon')
check_call.assert_called_once_with(
[
"heroku",
"addons:destroy", 'some-fake-addon',
"--app", app.name,
"--confirm", app.name
],
stdout=None
)
def test_buildpack(self, app, check_call):
app.buildpack('some-fake-buildpack')
check_call.assert_called_once_with(
["heroku", "buildpacks:add", "some-fake-buildpack", "--app", app.name],
stdout=None
)
def test_clock_is_on_checks_psscale(self, app, check_output):
app.clock_is_on
check_output.assert_called_once_with(
["heroku", "ps:scale", "--app", app.name]
)
def test_clock_is_on_returns_true_if_clock_1(self, app, check_output):
check_output.return_value = b'clock=1:Standard-2X console=0:Standard-1X'
assert app.clock_is_on is True
def test_clock_is_on_returns_false_if_clock_0(self, app, check_output):
check_output.return_value = b'clock=0:Standard-2X console=0:Standard-1X'
assert app.clock_is_on is False
def test_clock_is_on_returns_false_if_no_clock(self, app, check_output):
check_output.return_value = b'console=0:Standard-1X web=1:Standard-2X'
assert app.clock_is_on is False
def test_db_uri(self, app, check_output):
check_output.return_value = b'blahblahpostgres://foobar'
assert app.db_uri == u'postgres://foobar'
def test_db_uri_raises_if_no_match(self, app, check_output):
check_output.return_value = u'└─ as DATABASE on ⬢ dlgr-da089b8f app'.encode('utf8')
with pytest.raises(NameError) as excinfo:
app.db_uri
assert excinfo.match("Could not retrieve the DB URI")
def test_db_url(self, app, check_output, check_call):
check_output.return_value = b'some url '
assert app.db_url == u'some url'
check_call.assert_called_once_with(
["heroku", "pg:wait", "--app", app.name],
stdout=None
)
def test_backup_capture(self, app, check_call):
app.backup_capture()
check_call.assert_called_once_with(
["heroku", "pg:backups:capture", "--app", app.name],
stdout=None, stderr=None
)
def test_backup_download(self, app, check_call):
app.backup_download()
check_call.assert_called_once_with(
["heroku", "pg:backups:download", "--app", app.name],
stdout=None, stderr=None
)
def test_destroy(self, app, check_output):
check_output.return_value = b'some response message'
assert app.destroy() == 'some response message'
check_output.assert_called_once_with(
["heroku", "apps:destroy", "--app", app.name, "--confirm", app.name],
)
def test_get(self, app, check_output):
check_output.return_value = b'some value'
assert app.get('some key') == u'some value'
check_output.assert_called_once_with(
["heroku", "config:get", "some key", "--app", app.name],
)
def test_open_logs(self, app, check_call):
app.open_logs()
check_call.assert_called_once_with(
["heroku", "addons:open", "papertrail", "--app", app.name],
stdout=None
)
def test_pg_pull(self, app, check_call):
app.pg_pull()
check_call.assert_called_once_with(
["heroku", "pg:pull", "DATABASE_URL", app.name, "--app", app.name],
stdout=None
)
def test_pg_wait(self, app, check_call):
app.pg_wait()
check_call.assert_called_once_with(
["heroku", "pg:wait", "--app", app.name],
stdout=None
)
def test_redis_url(self, app, check_output):
check_output.return_value = b'some url'
assert app.redis_url == u'some url'
check_output.assert_called_once_with(
["heroku", "config:get", "REDIS_URL", "--app", app.name],
)
def test_restore(self, app, check_call):
app.restore('some url')
check_call.assert_called_once_with(
[
"heroku",
"pg:backups:restore",
"some url",
"DATABASE_URL",
"--app",
app.name,
"--confirm",
app.name,
],
stdout=None
)
def test_scale_up_dyno(self, app, check_call):
app.scale_up_dyno('some process', quantity=1, size='free')
check_call.assert_called_once_with(
[
"heroku",
"ps:scale",
"some process=1:free",
"--app",
app.name,
],
stdout=None
)
def test_scale_down_dyno(self, app, check_call):
app.scale_down_dyno('some process')
check_call.assert_called_once_with(
[
"heroku",
"ps:scale",
"some process=0",
"--app",
app.name,
],
stdout=None
)
def test_scale_down_dynos_with_clock_off(self, app, check_call, check_output):
check_output.return_value = b'[string indicating no clock process]'
app.scale_down_dynos()
check_call.assert_has_calls([
mock.call(['heroku', 'ps:scale', 'web=0', '--app', u'dlgr-fake-uid'], stdout=None),
mock.call(['heroku', 'ps:scale', 'worker=0', '--app', u'dlgr-fake-uid'], stdout=None)
])
def test_scale_down_dynos_with_clock_on(self, app, check_call, check_output):
check_output.return_value = b'clock=1 <= indicates clock is on'
app.scale_down_dynos()
check_call.assert_has_calls([
mock.call(['heroku', 'ps:scale', 'web=0', '--app', u'dlgr-fake-uid'], stdout=None),
mock.call(['heroku', 'ps:scale', 'worker=0', '--app', u'dlgr-fake-uid'], stdout=None),
mock.call(['heroku', 'ps:scale', 'clock=0', '--app', u'dlgr-fake-uid'], stdout=None),
])
def test_set(self, app, check_call):
app.set('some key', 'some value')
check_call.assert_called_once_with(
[
"heroku",
"config:set",
"some key='some value'",
"--app",
app.name,
],
stdout=None
)
def test_set_called_with_nonsensitive_key_uses_stdoutput(self, app, check_call):
app.set('some_nonsensitive_key', 'some value')
assert check_call.call_args_list[0][-1]['stdout'] is app.out
def test_set_called_with_sensitive_key_suppresses_stdoutput(self, app, check_call):
app.set('aws_secret_access_key', 'some value')
assert len(check_call.call_args_list) == 0
@pytest.mark.skipif(not pytest.config.getvalue("heroku"),
reason="--heroku was not specified")
def test_full_monty(self, full_app, temp_repo):
app = full_app
assert app.name == u'dlgr-fake-uid'
assert app.url == u'https://dlgr-fake-uid.herokuapp.com'
assert app.dashboard_url == u"https://dashboard.heroku.com/apps/dlgr-fake-uid"
app.bootstrap()
app.buildpack("https://github.com/stomita/heroku-buildpack-phantomjs")
app.set('auto_recruit', True)
@pytest.mark.usefixtures('bartlett_dir')
class TestHerokuLocalWrapper(object):
@pytest.fixture
def config(self):
from dallinger.command_line import setup_experiment
cwd = os.getcwd()
config = get_config()
if not config.ready:
config.load()
(id, tmp) = setup_experiment(verbose=True, exp_config={})
os.chdir(tmp)
yield config
os.chdir(cwd)
@pytest.fixture
def output(self):
class Output(object):
def __init__(self):
self.log = mock.Mock()
self.error = mock.Mock()
self.blather = mock.Mock()
return Output()
@pytest.fixture
def heroku(self, config, env, output, clear_workers):
from dallinger.heroku.tools import HerokuLocalWrapper
wrapper = HerokuLocalWrapper(config, output, env=env)
yield wrapper
try:
print("Calling stop() on {}".format(wrapper))
print(wrapper._record[-1])
wrapper.stop(signal.SIGKILL)
except IndexError:
pass
def test_start(self, heroku):
assert heroku.start()
assert heroku.is_running
def test_start_raises_without_home_dir_set(self, heroku):
from dallinger.heroku.tools import HerokuStartupError
env = heroku.env.copy()
del env['HOME']
heroku.env = env
with pytest.raises(HerokuStartupError) as excinfo:
heroku.start()
assert excinfo.match('"HOME" environment not set... aborting.')
def test_gives_up_after_timeout(self, heroku):
from dallinger.heroku.tools import HerokuTimeoutError
with pytest.raises(HerokuTimeoutError):
heroku.start(timeout_secs=1)
def test_quits_on_gunicorn_startup_error(self, heroku):
from dallinger.heroku.tools import HerokuStartupError
heroku.verbose = False # more coverage
heroku._stream = mock.Mock(return_value=['[DONE] Killing all processes'])
with pytest.raises(HerokuStartupError):
heroku.start()
def test_start_fails_if_stream_ends_without_matching_success_regex(self, heroku):
from dallinger.heroku.tools import HerokuStartupError
heroku._stream = mock.Mock(
return_value=['apple', 'orange', heroku.STREAM_SENTINEL]
)
heroku.success_regex = 'not going to match anything'
with pytest.raises(HerokuStartupError):
heroku.start()
assert not heroku.is_running
def test_stop(self, heroku):
heroku.start()
heroku.stop(signal.SIGKILL)
heroku.out.log.assert_called_with('Local Heroku process terminated.')
def test_stop_on_killed_process_no_error(self, heroku):
heroku.start()
heroku._process.terminate()
heroku.stop()
mock.call("Local Heroku was already terminated.") in heroku.out.log.mock_calls
def test_start_when_shell_command_fails(self, heroku):
heroku.shell_command = 'nonsense'
with pytest.raises(OSError):
heroku.start()
heroku.out.error.assert_called_with(
"Couldn't start Heroku for local debugging.")
def test_stop_before_start_is_noop(self, heroku):
heroku.stop()
heroku.out.log.assert_called_with("No local Heroku process was running.")
def test_start_when_already_started_is_noop(self, heroku):
heroku.start()
heroku.start()
heroku.out.log.assert_called_with("Local Heroku is already running.")
def test_monitor(self, heroku):
heroku._stream = mock.Mock(return_value=['apple', 'orange'])
listener = mock.Mock()
heroku.monitor(listener)
listener.assert_has_calls([
mock.call('apple'),
mock.call('orange'),
])
def test_monitor_stops_iterating_when_told(self, heroku):
heroku._stream = mock.Mock(return_value=['apple', 'orange'])
listener = mock.Mock()
listener.return_value = heroku.MONITOR_STOP
heroku.monitor(listener)
listener.assert_has_calls([
mock.call('apple'),
])
def test_as_context_manager(self, config, env, output, clear_workers):
from dallinger.heroku.tools import HerokuLocalWrapper
with HerokuLocalWrapper(config, output, env=env) as heroku:
assert heroku.is_running
assert not heroku.is_running
```
|
{
"source": "jcphill/gdal",
"score": 2
}
|
#### File: autotest/alg/applyverticalshiftgrid.py
```python
import gdaltest
from osgeo import gdal, osr
###############################################################################
# Rather dummy test: grid = DEM
def test_applyverticalshiftgrid_1():
src_ds = gdal.Open('../gcore/data/byte.tif')
src_ds = gdal.Translate('', src_ds, format='MEM',
width=20, height=40)
grid_ds = gdal.Translate('', src_ds, format='MEM')
out_ds = gdal.ApplyVerticalShiftGrid(src_ds, grid_ds)
assert out_ds.GetRasterBand(1).DataType == gdal.GDT_Byte
assert out_ds.RasterXSize == src_ds.RasterXSize
assert out_ds.RasterYSize == src_ds.RasterYSize
assert out_ds.GetGeoTransform() == src_ds.GetGeoTransform()
assert out_ds.GetProjectionRef() == src_ds.GetProjectionRef()
# Check that we can drop the reference to the sources
src_ds = None
grid_ds = None
cs = out_ds.GetRasterBand(1).Checksum()
assert cs == 10038
src_ds = gdal.Open('../gcore/data/byte.tif')
src_ds = gdal.Translate('', src_ds, format='MEM',
width=20, height=40)
# Test block size
out_ds = gdal.ApplyVerticalShiftGrid(src_ds, src_ds,
options=['BLOCKSIZE=15'])
cs = out_ds.GetRasterBand(1).Checksum()
assert cs == 10038
# Inverse transformer
out_ds = gdal.ApplyVerticalShiftGrid(src_ds, src_ds, True,
options=['DATATYPE=Float32'])
assert out_ds.GetRasterBand(1).DataType == gdal.GDT_Float32
cs = out_ds.GetRasterBand(1).Checksum()
assert cs == 0
###############################################################################
# Error cases
def test_applyverticalshiftgrid_2():
sr = osr.SpatialReference()
sr.SetFromUserInput("WGS84")
for i in range(6):
src_ds = gdal.GetDriverByName('MEM').Create('', 1, 1)
if i != 0:
src_ds.SetGeoTransform([0, 1, 0, 0, 0, -1])
if i != 1:
src_ds.SetProjection(sr.ExportToWkt())
if i == 2:
src_ds.AddBand(gdal.GDT_Byte)
grid_ds = gdal.GetDriverByName('MEM').Create('', 1, 1)
if i != 3:
grid_ds.SetGeoTransform([0, 1, 0, 0, 0, -1])
if i != 4:
grid_ds.SetProjection(sr.ExportToWkt())
if i == 5:
grid_ds.AddBand(gdal.GDT_Byte)
with gdaltest.error_handler():
out_ds = gdal.ApplyVerticalShiftGrid(src_ds, grid_ds)
assert out_ds is None, i
# Non invertable source geotransform
src_ds = gdal.GetDriverByName('MEM').Create('', 1, 1)
src_ds.SetGeoTransform([0, 0, 0, 0, 0, 0])
src_ds.SetProjection(sr.ExportToWkt())
grid_ds = gdal.GetDriverByName('MEM').Create('', 1, 1)
grid_ds.SetGeoTransform([0, 1, 0, 0, 0, -1])
grid_ds.SetProjection(sr.ExportToWkt())
with gdaltest.error_handler():
out_ds = gdal.ApplyVerticalShiftGrid(src_ds, grid_ds)
assert out_ds is None
# Non invertable grid geotransform
src_ds = gdal.GetDriverByName('MEM').Create('', 1, 1)
src_ds.SetGeoTransform([0, 1, 0, 0, 0, -1])
src_ds.SetProjection(sr.ExportToWkt())
grid_ds = gdal.GetDriverByName('MEM').Create('', 1, 1)
grid_ds.SetGeoTransform([0, 0, 0, 0, 0, 0])
grid_ds.SetProjection(sr.ExportToWkt())
with gdaltest.error_handler():
out_ds = gdal.ApplyVerticalShiftGrid(src_ds, grid_ds)
assert out_ds is None
# No PROJ.4 translation for source SRS, coordinate transformation
# initialization has failed
src_ds = gdal.GetDriverByName('MEM').Create('', 1, 1)
src_ds.SetGeoTransform([0, 1, 0, 0, 0, -1])
src_ds.SetProjection(sr.ExportToWkt())
grid_ds = gdal.GetDriverByName('MEM').Create('', 1, 1)
grid_ds.SetGeoTransform([0, 1, 0, 0, 0, -1])
grid_ds.SetProjection('LOCAL_CS["foo"]')
with gdaltest.error_handler():
out_ds = gdal.ApplyVerticalShiftGrid(src_ds, grid_ds)
assert out_ds is None
# Out of memory
if gdal.GetConfigOption('SKIP_MEM_INTENSIVE_TEST') is None:
src_ds = gdal.GetDriverByName('MEM').Create('', 1, 1)
src_ds.SetGeoTransform([0, 1, 0, 0, 0, -1])
src_ds.SetProjection(sr.ExportToWkt())
grid_ds = gdal.GetDriverByName('MEM').Create('', 1, 1)
grid_ds.SetGeoTransform([0, 1, 0, 0, 0, -1])
grid_ds.SetProjection(sr.ExportToWkt())
with gdaltest.error_handler():
out_ds = gdal.ApplyVerticalShiftGrid(src_ds, grid_ds,
options=['BLOCKSIZE=2000000000'])
assert out_ds is None
# Wrong DATATYPE
src_ds = gdal.GetDriverByName('MEM').Create('', 1, 1)
src_ds.SetGeoTransform([0, 1, 0, 0, 0, -1])
src_ds.SetProjection(sr.ExportToWkt())
grid_ds = gdal.GetDriverByName('MEM').Create('', 1, 1)
grid_ds.SetGeoTransform([0, 1, 0, 0, 0, -1])
grid_ds.SetProjection(sr.ExportToWkt())
with gdaltest.error_handler():
out_ds = gdal.ApplyVerticalShiftGrid(src_ds, grid_ds,
options=['DATATYPE=x'])
assert out_ds is None
###############################################################################
# Test with grid and src not in same projection
def test_applyverticalshiftgrid_3():
src_ds = gdal.Open('../gcore/data/byte.tif')
grid_ds = gdal.Warp('', src_ds, format='MEM', dstSRS='EPSG:4326',
width=40, height=40)
out_ds = gdal.ApplyVerticalShiftGrid(src_ds, grid_ds,
options=['RESAMPLING=NEAREST'])
assert out_ds.RasterXSize == src_ds.RasterXSize
assert out_ds.RasterYSize == src_ds.RasterYSize
assert out_ds.GetGeoTransform() == src_ds.GetGeoTransform()
assert out_ds.GetProjectionRef() == src_ds.GetProjectionRef()
cs = out_ds.GetRasterBand(1).Checksum()
assert cs == 5112
out_ds = gdal.ApplyVerticalShiftGrid(src_ds, grid_ds,
options=['RESAMPLING=BILINEAR'])
cs = out_ds.GetRasterBand(1).Checksum()
assert cs == 4867 or cs == 4868
out_ds = gdal.ApplyVerticalShiftGrid(src_ds, grid_ds,
options=['RESAMPLING=CUBIC'])
cs = out_ds.GetRasterBand(1).Checksum()
assert cs in (4841, 4854, 4842) # 4842 on Mac / Conda
###############################################################################
# Test nodata
def test_applyverticalshiftgrid_4():
sr = osr.SpatialReference()
sr.SetFromUserInput("WGS84")
# Nodata on source
src_ds = gdal.GetDriverByName('MEM').Create('', 1, 1)
src_ds.SetGeoTransform([0, 1, 0, 0, 0, -1])
src_ds.SetProjection(sr.ExportToWkt())
src_ds.GetRasterBand(1).Fill(1)
src_ds.GetRasterBand(1).SetNoDataValue(1)
grid_ds = gdal.GetDriverByName('MEM').Create('', 1, 1)
grid_ds.SetGeoTransform([0, 1, 0, 0, 0, -1])
grid_ds.SetProjection(sr.ExportToWkt())
grid_ds.GetRasterBand(1).Fill(30)
out_ds = gdal.ApplyVerticalShiftGrid(src_ds, grid_ds)
assert out_ds.GetRasterBand(1).GetNoDataValue() == 1
cs = out_ds.GetRasterBand(1).Checksum()
assert cs == 1
# Nodata on grid
src_ds = gdal.GetDriverByName('MEM').Create('', 1, 1)
src_ds.SetGeoTransform([0, 1, 0, 0, 0, -1])
src_ds.SetProjection(sr.ExportToWkt())
src_ds.GetRasterBand(1).Fill(1)
grid_ds = gdal.GetDriverByName('MEM').Create('', 1, 1)
grid_ds.SetGeoTransform([0, 1, 0, 0, 0, -1])
grid_ds.SetProjection(sr.ExportToWkt())
grid_ds.GetRasterBand(1).Fill(30)
grid_ds.GetRasterBand(1).SetNoDataValue(30)
out_ds = gdal.ApplyVerticalShiftGrid(src_ds, grid_ds)
assert out_ds.GetRasterBand(1).GetNoDataValue() is None
cs = out_ds.GetRasterBand(1).Checksum()
assert cs == 1
# ERROR_ON_MISSING_VERT_SHIFT due to non compatible extents
src_ds = gdal.GetDriverByName('MEM').Create('', 1, 1)
src_ds.SetGeoTransform([0, 1, 0, 0, 0, -1])
src_ds.SetProjection(sr.ExportToWkt())
src_ds.GetRasterBand(1).Fill(255)
grid_ds = gdal.GetDriverByName('MEM').Create('', 1, 1)
grid_ds.SetGeoTransform([10, 1, 0, 0, 0, -1])
grid_ds.SetProjection(sr.ExportToWkt())
out_ds = gdal.ApplyVerticalShiftGrid(src_ds, grid_ds,
options=['ERROR_ON_MISSING_VERT_SHIFT=YES'])
with gdaltest.error_handler():
data = out_ds.GetRasterBand(1).ReadRaster()
assert data is None
# ERROR_ON_MISSING_VERT_SHIFT due to nodata in grid
src_ds = gdal.GetDriverByName('MEM').Create('', 1, 1)
src_ds.SetGeoTransform([0, 1, 0, 0, 0, -1])
src_ds.SetProjection(sr.ExportToWkt())
src_ds.GetRasterBand(1).Fill(255)
grid_ds = gdal.GetDriverByName('MEM').Create('', 1, 1)
grid_ds.SetGeoTransform([0, 1, 0, 0, 0, -1])
grid_ds.SetProjection(sr.ExportToWkt())
grid_ds.GetRasterBand(1).SetNoDataValue(0)
out_ds = gdal.ApplyVerticalShiftGrid(src_ds, grid_ds,
options=['ERROR_ON_MISSING_VERT_SHIFT=YES'])
with gdaltest.error_handler():
data = out_ds.GetRasterBand(1).ReadRaster()
assert data is None
###############################################################################
# Test scaling parameters
def test_applyverticalshiftgrid_5():
src_ds = gdal.Open('../gcore/data/byte.tif')
grid_ds = gdal.Translate('', src_ds, format='MEM')
grid_ds.GetRasterBand(1).Fill(0)
src_ds = gdal.Translate('', src_ds, format='MEM',
outputType=gdal.GDT_Float32,
scaleParams=[[0, 1, 0, 0.5]])
out_ds = gdal.ApplyVerticalShiftGrid(src_ds, grid_ds, srcUnitToMeter=2)
cs = out_ds.GetRasterBand(1).Checksum()
assert cs == 4672
src_ds = gdal.Open('../gcore/data/byte.tif')
grid_ds = gdal.Translate('', src_ds, format='MEM')
grid_ds.GetRasterBand(1).Fill(0)
src_ds = gdal.Translate('', src_ds, format='MEM',
outputType=gdal.GDT_Float32,
scaleParams=[[0, 1, 0, 0.5]])
out_ds = gdal.ApplyVerticalShiftGrid(src_ds, grid_ds, dstUnitToMeter=0.5)
cs = out_ds.GetRasterBand(1).Checksum()
assert cs == 4672
###############################################################################
# Simulate EGM grids
def test_applyverticalshiftgrid_6():
grid_ds = gdal.GetDriverByName('GTX').Create(
'tmp/applyverticalshiftgrid_6.gtx', 1440, 721, 1, gdal.GDT_Float32)
grid_ds.SetGeoTransform([-180.125, 0.25, 0, 90.125, 0, -0.25])
grid_ds.GetRasterBand(1).Fill(10)
grid_ds = None
ds = gdal.Warp('', '../gcore/data/byte.tif', format='MEM',
srcSRS='EPSG:32611',
dstSRS='+proj=utm +zone=11 +datum=WGS84 +geoidgrids=./tmp/applyverticalshiftgrid_6.gtx +vunits=m +no_defs')
cs = ds.GetRasterBand(1).Checksum()
assert cs == 4783
gdal.Unlink('tmp/applyverticalshiftgrid_6.gtx')
###############################################################################
# Simulate USA geoid grids with long origin > 180
def test_applyverticalshiftgrid_7():
grid_ds = gdal.GetDriverByName('GTX').Create(
'tmp/applyverticalshiftgrid_7.gtx', 700, 721, 1, gdal.GDT_Float32)
grid_ds.SetGeoTransform([-150 + 360, 0.25, 0, 90.125, 0, -0.25])
grid_ds.GetRasterBand(1).Fill(10)
grid_ds = None
ds = gdal.Warp('', '../gcore/data/byte.tif', format='MEM',
srcSRS='EPSG:32611',
dstSRS='+proj=utm +zone=11 +datum=WGS84 +geoidgrids=./tmp/applyverticalshiftgrid_7.gtx +vunits=m +no_defs')
cs = ds.GetRasterBand(1).Checksum()
assert cs == 4783
gdal.Unlink('tmp/applyverticalshiftgrid_7.gtx')
```
#### File: python/osgeo/__init__.py
```python
from sys import platform, version_info
if version_info >= (3, 8, 0) and platform == 'win32':
import os
if 'USE_PATH_FOR_GDAL_PYTHON' in os.environ and 'PATH' in os.environ:
for p in os.environ['PATH'].split(';'):
if p:
try:
os.add_dll_directory(p)
except (FileNotFoundError, OSError):
continue
elif 'PATH' in os.environ:
import glob
for p in os.environ['PATH'].split(';'):
if glob.glob(os.path.join(p, 'gdal*.dll')) or glob.glob(os.path.join(p, 'libgdal*.dll')):
try:
os.add_dll_directory(p)
break
except (FileNotFoundError, OSError):
continue
def swig_import_helper():
import importlib
from os.path import dirname, basename
mname = basename(dirname(__file__)) + '._gdal'
try:
return importlib.import_module(mname)
except ImportError:
if version_info >= (3, 8, 0) and platform == 'win32':
import os
if not 'USE_PATH_FOR_GDAL_PYTHON' in os.environ:
msg = 'On Windows, with Python >= 3.8, DLLs are no longer imported from the PATH.\n'
msg += 'If gdalXXX.dll is in the PATH, then set the USE_PATH_FOR_GDAL_PYTHON=YES environment variable\n'
msg += 'to feed the PATH into os.add_dll_directory().'
import sys
import traceback
traceback_string = ''.join(traceback.format_exception(*sys.exc_info()))
raise ImportError(traceback_string + '\n' + msg)
return importlib.import_module('_gdal')
_gdal = swig_import_helper()
del swig_import_helper
__version__ = _gdal.__version__ = _gdal.VersionInfo("RELEASE_NAME")
gdal_version = tuple(int(s) for s in str(__version__).split('.') if s.isdigit())[:3]
python_version = tuple(version_info)[:3]
# Setting this flag to True will cause importing osgeo to fail on an unsupported Python version.
# Otherwise a deprecation warning will be issued instead.
# Importing osgeo fom an unsupported Python version might still partially work
# because the core of GDAL Python bindings might still support an older Python version.
# Hence the default option to just issue a warning.
# To get complete functionality upgrading to the minimum supported version is needed.
fail_on_unsupported_version = False
# The following is a Sequence of tuples in the form of (gdal_version, python_version).
# Each line represents the minimum supported Python version of a given GDAL version.
# Introducing a new line for the next GDAL version will trigger a deprecation warning
# when importing osgeo from a Python version which will not be
# supported in the next version of GDAL.
gdal_version_and_min_supported_python_version = (
((0, 0), (0, 0)),
((1, 0), (2, 0)),
((2, 0), (2, 7)),
((3, 3), (3, 6)),
# ((3, 4), (3, 7)),
# ((3, 5), (3, 8)),
)
def ver_str(ver):
return '.'.join(str(v) for v in ver) if ver is not None else None
minimum_supported_python_version_for_this_gdal_version = None
this_python_version_will_be_deprecated_in_gdal_version = None
last_gdal_version_to_supported_your_python_version = None
next_version_of_gdal_will_use_python_version = None
for gdal_ver, py_ver in gdal_version_and_min_supported_python_version:
if gdal_version >= gdal_ver:
minimum_supported_python_version_for_this_gdal_version = py_ver
if python_version >= py_ver:
last_gdal_version_to_supported_your_python_version = gdal_ver
if not this_python_version_will_be_deprecated_in_gdal_version:
if python_version < py_ver:
this_python_version_will_be_deprecated_in_gdal_version = gdal_ver
next_version_of_gdal_will_use_python_version = py_ver
if python_version < minimum_supported_python_version_for_this_gdal_version:
msg = 'Your Python version is {}, which is no longer supported by GDAL {}. ' \
'Please upgrade your Python version to Python >= {}, ' \
'or use GDAL <= {}, which supports your Python version.'.\
format(ver_str(python_version), ver_str(gdal_version),
ver_str(minimum_supported_python_version_for_this_gdal_version),
ver_str(last_gdal_version_to_supported_your_python_version))
if fail_on_unsupported_version:
raise Exception(msg)
else:
from warnings import warn, simplefilter
simplefilter('always', DeprecationWarning)
warn(msg, DeprecationWarning)
elif this_python_version_will_be_deprecated_in_gdal_version:
msg = 'You are using Python {} with GDAL {}. ' \
'This Python version will be deprecated in GDAL {}. ' \
'Please consider upgrading your Python version to Python >= {}, ' \
'Which will be the minimum supported Python version of GDAL {}.'.\
format(ver_str(python_version), ver_str(gdal_version),
ver_str(this_python_version_will_be_deprecated_in_gdal_version),
ver_str(next_version_of_gdal_will_use_python_version),
ver_str(this_python_version_will_be_deprecated_in_gdal_version))
from warnings import warn, simplefilter
simplefilter('always', DeprecationWarning)
warn(msg, DeprecationWarning)
```
#### File: swig/python/trimmedsysconfig.py
```python
import os
import sys
from sysconfig import get_python_version
# These are needed in a couple of spots, so just compute them once.
PREFIX = os.path.normpath(sys.prefix)
EXEC_PREFIX = os.path.normpath(sys.exec_prefix)
BASE_PREFIX = os.path.normpath(sys.base_prefix)
BASE_EXEC_PREFIX = os.path.normpath(sys.base_exec_prefix)
# Added by GDAL
def _is_debian():
for p in sys.path:
if 'dist-packages' in p:
return True
return False
def get_python_lib(plat_specific=0, standard_lib=0, prefix=None):
"""Return the directory containing the Python library (standard or
site additions).
If 'plat_specific' is true, return the directory containing
platform-specific modules, i.e. any module from a non-pure-Python
module distribution; otherwise, return the platform-shared library
directory. If 'standard_lib' is true, return the directory
containing standard Python library modules; otherwise, return the
directory for site-specific modules.
If 'prefix' is supplied, use it instead of sys.base_prefix or
sys.base_exec_prefix -- i.e., ignore 'plat_specific'.
"""
is_default_prefix = not prefix or os.path.normpath(prefix) in ('/usr', '/usr/local')
if prefix is None:
if standard_lib:
prefix = plat_specific and BASE_EXEC_PREFIX or BASE_PREFIX
else:
prefix = plat_specific and EXEC_PREFIX or PREFIX
if os.name == "posix":
libpython = os.path.join(prefix,
"lib", "python" + get_python_version())
if standard_lib:
return libpython
# This check is Debian specific
elif (_is_debian() and is_default_prefix and
'PYTHONUSERBASE' not in os.environ and
'VIRTUAL_ENV' not in os.environ and
'real_prefix' not in sys.__dict__ and
sys.prefix == sys.base_prefix):
return os.path.join(prefix, "lib", "python3", "dist-packages")
else:
return os.path.join(libpython, "site-packages")
elif os.name == "nt":
if standard_lib:
return os.path.join(prefix, "Lib")
else:
return os.path.join(prefix, "Lib", "site-packages")
else:
raise Exception(
"I don't know where Python installs its library "
"on platform '%s'" % os.name)
```
|
{
"source": "jcpince/algorithms",
"score": 4
}
|
#### File: algorithms/algos/bst.seq.py
```python
class BST(object):
pass
class BSTNode:
def __init__(self, value: int, ref: object):
self.right = None
self.left = None
self.value = value
self.ref = ref
def to_list(self):
l = list()
l.append([self.value, self.ref])
#print("Append value(%d)" % self.value)
if self.left:
print("Append (%d).left(%d)" % (self.value, self.left.value))
l.append(self.left.to_list())
else: l.append(None)
if self.right:
print("Append (%d).right(%d)" % (self.value, self.right.value))
l.append(self.right.to_list())
else: l.append(None)
return l
class BST:
def __init__(self):
self.root = None
def insert_node(self, node: BST) -> None:
# O(h) with h height of the tree
if self.root is None:
self.root = node
return
current = self.root
while(True):
if node.value >= current.value:
if current.right:
current = current.right
else:
current.right = node
break
else:
if current.left:
current = current.left
else:
current.left = node
break
def delete_node(self, node: BSTNode) -> bool:
return self.delete_value(node.value)
def delete_value(self, value: int) -> bool:
pred, to_delete = self.search_with_pred(value)
if to_delete is None:
return False
# Delete the root
if pred == None:
if to_delete.right is None:
self.root = to_delete.left
return True
smallest_pred, smallest = self.smallest_with_pred(None, to_delete.right)
# if smallest_pred is not None:
# print("Root delete -- smallest_pred(%d), smallest(%d)" % (smallest_pred.value, smallest.value))
# else:
# print("Root delete -- smallest_pred(None), smallest(%d)" % (smallest.value))
if smallest_pred is not None:
smallest.left = self.root.left
else:
smallest.left = self.root.left
self.root = smallest
return True
# print("Removing %d, pred is %d" % (to_delete.value, pred.value))
# Delete a leaf
if to_delete.right is None and to_delete.left is None:
if pred.right == to_delete:
pred.right = None
else:
pred.left = None
return True
# Delete a node with a single child
if to_delete.right is None:
if pred.right == to_delete:
pred.right = to_delete.left
else:
pred.left = to_delete.left
return True
if to_delete.left is None:
if pred.left == to_delete:
pred.left = to_delete.right
else:
pred.right = to_delete.right
return True
# Delete a node with 2 children
if pred.right == to_delete:
smallest_pred, smallest = self.smallest_with_pred(pred, to_delete.left)
# print("smallest_pred(%d), smallest(%d)" % (smallest_pred.value, smallest.value))
#smallest_pred.right = None
pred.right = smallest
smallest.left = to_delete.left if (smallest != to_delete.left) else None
smallest.right = to_delete.right
else:
smallest_pred, smallest = self.smallest_with_pred(pred, to_delete.right)
# print("smallest_pred(%d), smallest(%d)" % (smallest_pred.value, smallest.value))
#smallest_pred.left = None
pred.left = smallest
smallest.left = to_delete.left
smallest.right = to_delete.right if (smallest != to_delete.right) else None
return True
def search(self, value: int, intermediate_root = None) -> BSTNode:
_, found = self.search_with_pred(value, intermediate_root)
return found
def search_with_pred(self, value: int, intermediate_root = None) -> BSTNode:
# O(h) with h height of the tree or subtree
# (which can be n if the tree is all left or right)
current = intermediate_root
pred = None
if current is None:
current = self.root
while current is not None:
if value > current.value:
pred = current
current = current.right
elif value < current.value:
pred = current
current = current.left
else:
return pred, current
return None, None
def smallest(self, intermediate_root = None) -> BSTNode:
_, smallest = self.smallest_with_pred(None, intermediate_root)
return smallest
def smallest_with_pred(self, pred, intermediate_root) -> BSTNode:
# O(h) with h height of the tree or subtree
# (which can be n if the tree is all left or right)
current = intermediate_root
if current is None:
current = self.root
smallest = current
smallest_pred = pred
while current is not None:
if smallest.value > current.value:
smallest = current
smallest_pred = pred
pred = current
current = current.left
return smallest_pred, smallest
def to_list(self, current: BSTNode = None):
if current is None:
current = self.root
if current is None:
return []
l = list()
l.append([current.value, current.ref])
if current.left:
l.append(self.to_list(current.left))
else: l.append(None)
if current.right:
l.append(self.to_list(current.right))
else: l.append(None)
return l
```
#### File: hackerrank/euler007/euler007_2.py
```python
import time
from math import sqrt
count3 = 0
count5 = 0
count7 = 0
def get_prime(primes, N):
global count3, count5, count7
if N < len(primes):
return primes[N-1]
candidate = primes[-1]
while len(primes) != N:
candidate += 2
count3 += 1
count5 += 1
count7 += 1
if count3 == 2:
#print("count3 = %d -- candidate = %d" % (count3, candidate))
count3 = 0
count5 += 1
count7 += 1
candidate += 2
if candidate % 5 == 0: #count5 == 5:
#print("count5 = %d -- candidate = %d" % (count5, candidate))
count5 = 0
count3 += 1
count7 += 1
candidate += 2
if candidate % 7 == 0: #count7 == 6:
#print("count7 = %d -- candidate = %d" % (count7, candidate))
count7 = 0
count3 += 1
count5 += 1
candidate += 2
elif candidate % 5 == 0: #count5 == 5:
#print("count5 = %d -- candidate = %d" % (count5, candidate))
count5 = 0
count3 += 1
count7 += 1
candidate += 2
if candidate % 7 == 0: #count7 == 6:
#print("count7 = %d -- candidate = %d" % (count7, candidate))
count7 = 0
count3 += 1
count5 += 1
candidate += 2
elif candidate % 7 == 0: #count7 == 6:
#print("count7 = %d -- candidate = %d" % (count7, candidate))
count7 = 0
count3 += 1
count5 += 1
candidate += 2
s = int(sqrt(candidate))
#print("candidate = %d, primes = %s" % (candidate, primes))
for p in primes[1:]:
if p > s:
primes.append(candidate)
break
if (candidate % p) == 0:
break
return primes[-1]
start = time.time()
primes = [2, 3, 5, 7, 11]
t = int(input().strip())
for a0 in range(t):
n = int(input().strip())
print(get_prime(primes, n))
assert(get_prime(primes, 168) == 997)
assert(get_prime(primes, 10000) == 104729)
print("Computation time: %f seconds" % (time.time() - start))
#print(primes)
```
#### File: hackerrank/euler010/euler010_2.py
```python
import time
import array
from math import sqrt
primes = bytearray(1000000)
primes_sums = array.array('L', bytearray(8000000))
primes_sums[2] = 2
# print(len(primes))
# print(len(primes_sums))
def get_primes_below_N(N):
global primes
for i in range(3, int(sqrt(N))+1, 2):
if primes[i] == 0:
j = i**2
while j < N:
primes[j] = 1
j += i
def get_primes_sums():
global primes, primes_sums
primes_sum = 2
for i in range(3, len(primes), 2):
if primes[i] == 0:
primes_sum += i
primes_sums[i] = primes_sum
start = time.time()
get_primes_below_N(1000000)
get_primes_sums()
#print("Computation time: %f seconds" % (time.time() - start))
# print(primes_sums[0:20])
# print(primes_sums[2])
# print(primes_sums[9])
t = int(input().strip())
start = time.time()
for _ in range(t):
n = int(input().strip())
if n&0x1 == 0x0 and n != 2:
n -= 1
#print("%d => %d" % (n, primes_sums[n]))
print(primes_sums[n])
# Input: an integer n > 1.
#
# Let A be an array of Boolean values, indexed by integers 2 to n,
# initially all set to true.
#
# for i = 2, 3, 4, ..., not exceeding √n:
# if A[i] is true:
# for j = i2, i2+i, i2+2i, i2+3i, ..., not exceeding n:
# A[j] := false.
#
# Output: all i such that A[i] is true.
```
#### File: graph/goingtooffice/goingtooffice.py
```python
import sys
MAXINT = 2**32
def rsd(graph, start, destination, visited, closed_road):
result = MAXINT
for (neighbor, distance) in graph[start]:
if (neighbor in closed_road) and (start in closed_road):
print("start %d, visited %s, %d to destination is closed" %
(start, visited, neighbor))
continue
if neighbor in visited:
print("start %d, visited %s, %d already visited" %
(start, visited, neighbor))
continue
if neighbor == destination:
print("start %d, visited %s, found the destination %d, returns %d" %
(start, visited, neighbor, distance))
return distance
print("start %d, visited %s, explore %d" % (start, visited, neighbor))
v = visited.copy()
v.append(neighbor)
result = min(result, distance + rsd(graph, neighbor, destination, v, closed_road))
print("returns %d" % result)
return result
def call_rsd(graph, start, destination, closed_road):
result = MAXINT
for (neighbor, distance) in graph[start]:
if (neighbor in closed_road) and (destination in closed_road):
continue
if neighbor == destination:
result = min(result, distance)
neighbor_distance = rsd(graph, neighbor, destination, [neighbor], closed_road)
result = min(result, distance + neighbor_distance)
if result >= MAXINT:
return "Infinity"
return result
def recursive_sd(graph, start, destination, visited0, current_distance,
min_distance, closed_road):
#print("recursive_sd(%s)" % str(
# ("graph", start, visited0, current_distance, min_distance)))
for (neighbor, distance) in graph[start]:
print("Visited %s, start %d, neighbor %d" % (visited0, start, neighbor))
visited = visited0.copy()
if (current_distance + distance) >= min_distance:
continue
if neighbor == destination:
#print("Found destination(%d) returns %d" % (neighbor, distance))
return current_distance + distance
if neighbor in visited:
continue
if (neighbor in closed_road) and (start in closed_road):
continue
# else...
visited.add(neighbor)
#print("Visiting neighbor %d" % neighbor)
neighbor_result = recursive_sd(graph, neighbor, destination,
visited, current_distance + distance, min_distance, closed_road)
#print("neighbor_result %d" % neighbor_result)
min_distance = min(min_distance, neighbor_result)
#print("returns2 %d" % min_distance)
return min_distance
def shortest_distance(graph, start, destination, closed_road):
min_distance = MAXINT
visited0 = set()
visited0.add(start)
# makes sure that the start's neighbors won't be explored in recursive_sd
for (neighbor, distance) in graph[start]:
if (neighbor not in closed_road) or (start not in closed_road):
visited0.add(neighbor)
for (neighbor, distance) in graph[start]:
if (neighbor in closed_road) and (start in closed_road):
continue
visited = visited0.copy()
neighbor_result = recursive_sd(graph,
neighbor, destination, visited, distance, min_distance, closed_road)
min_distance = min(min_distance, neighbor_result)
if min_distance >= MAXINT:
return "Infinity"
return min_distance
def filter_roads(graph, start, destination):
# remove all the cul-de-sac not start or destination
citys = list(graph.keys())
for city in citys:
if (city == start) or (city == destination):
continue
tos = graph[city]
if len(tos) == 1:
del graph[city]
def build_graph(roads):
graph = dict()
for road in roads:
u, v, w = road
if u not in graph.keys():
graph[u] = list()
graph[u].append((v, w))
if v not in graph.keys():
graph[v] = list()
graph[v].append((u, w))
return graph
stdin = sys.stdin
n, m = (int(i) for i in stdin.readline().strip('\n').split(' '))
roads = list()
for idx in range (0, m):
(u, v, w) = (int(i) for i in stdin.readline().strip('\n').split(' '))
roads.append((u, v, w))
start, destination = (int(i) for i in stdin.readline().strip('\n').split(' '))
nb_days = int(stdin.readline().strip('\n'))
closed_roads = list()
for idx in range (0, m):
(u, v) = (int(i) for i in stdin.readline().strip('\n').split(' '))
closed_roads.append((u, v))
graph = build_graph(roads)
filter_roads(graph, start, destination)
print(graph)
for closed_road in closed_roads:
print("\n\nStart %d, Destination %d, close_road %s" % (start, destination,
str(closed_road)))
# print("%s: %s" % (str(closed_road),
# str(shortest_distance(graph, start, destination, closed_road))))
print("%s: %s" % (str(closed_road),
str(call_rsd(graph, start, destination, closed_road))))
break
```
#### File: algorithms/hackerrank/magicSquareChecker.py
```python
N = 3
# Returns true if mat[][] is magic
# square, else returns false.
def isMagicSquare( mat) :
# calculate the sum of
# the prime diagonal
s = 0
for i in range(0, N) :
s = s + mat[i][i]
# the secondary diagonal
s2 = 0
for i in range(0, N) :
s2 = s2 + mat[i][N-i-1]
if(s!=s2) :
print("Diagonals are different %d vs %d" % (s, s2))
return False
# For sums of Rows
for i in range(0, N) :
rowSum = 0;
for j in range(0, N) :
rowSum += mat[i][j]
# check if every row sum is
# equal to prime diagonal sum
if (rowSum != s) :
print("Row[%d] %s is different from %d" % (i, mat[i], s))
return False
# For sums of Columns
for i in range(0, N):
colSum = 0
for j in range(0, N) :
colSum += mat[j][i]
# check if every column sum is
# equal to prime diagonal sum
if (s != colSum) :
col = [mat[0][i], mat[1][i], mat[2][i]]
print("Column[%d] %s is different from %d" % (i, col, s))
return False
return True
# Driver Code
# mat = [ [ 2, 7, 6 ],
# [ 9, 5, 1 ],
# [ 4, 3, 8 ] ]
mat = [ [ 4, 3, 8 ],
[ 2, 6, 7 ],
[ 1, 9, 5 ] ]
if (isMagicSquare(mat)) :
print( "Magic Square")
else :
print( "Not a magic Square")
# This code is contributed by <NAME>.
```
#### File: algorithms/leetcode/allPathsSourceTarget.py
```python
from typing import List
class Solution:
def allPathsSourceTarget(self, graph: List[List[int]]) -> List[List[int]]:
# Acyclic so no need to check for visited node
# A DFS stopping on destination should be ok
def dfs(graph, current, destination, results, local_result = None):
#print("dfs(%d) -- %s -- local_result %s" % (current, graph[current], local_result))
if local_result is None:
local_result = [current]
if current == destination:
results.append(local_result)
return
if len(graph[current]) == 0:
return
for node in graph[current][:-1]:
#print("Calling dfs(%d)" % node)
tmp = local_result.copy()
tmp.append(node)
dfs(graph, node, destination, results, tmp)
node = graph[current][-1]
#print("Calling last node dfs(%d)" % node)
local_result.append(node)
dfs(graph, node, destination, results, local_result)
results = []
dfs(graph, 0, len(graph)-1, results)
print("Results: %s" % results)
return results
def check_solution():
s = Solution()
assert(len(s.allPathsSourceTarget([[1,2], [3], [3], []])) == 2)
print("All tests passed successfully!!")
check_solution()
```
#### File: algorithms/leetcode/graph.py
```python
from heapq import heappush, heappop
from typing import List
class GraphEdge(object):
def __init__(self, fr: int, to: int, weight: int):
self.fr = fr
self.to = to
self.weight = weight
def __lt__(self, other):
return self.weight < other.weight
def __str__(self):
return "[%d, %d]" % (self.to, self.weight)
class GraphNode(object):
def __init__(self, id: int):
self.id = id
self.tos = []
# total_weight is used for visited flag
self.total_weight = -1
def addEdge(self, to: int, weight: int):
heappush(self.tos, GraphEdge(self.id, to, weight))
def __str__(self):
string = "(%d ,[" % self.id
for edge in self.tos:
string += str(edge)
return string + "])"
class GraphPath(object):
def __init__(self, weight: int, nodes: List[int]):
self.weight = weight
self.nodes = nodes
def __lt__(self, other):
return self.weight < other.weight
class Graph(object):
def __init__(self, nodes: List[GraphNode] = []):
self.nodes = nodes
def addNode(self, node: GraphNode) -> None:
self.nodes.append(node)
def __str__(self):
string = "(%d nodes: [" % len(self.nodes)
for node in self.nodes:
string += str(node)
return string + "])"
def topologicalOrder(self) -> List[int]:
N = len(self.nodes)
result = []
in_degree = [0] * N
# initialize the array of inedges per node
for node in self.nodes:
for edge in node.tos:
in_degree[edge.to] += 1
# Push the nodes with no incoming edge to our resulting array
queue = []
for node_idx, degree in enumerate(in_degree):
if degree == 0:
queue.append(node_idx)
in_degree[node_idx] = -1
while len(queue) != 0:
node = self.nodes[queue.pop(0)]
result.append(node.id)
for edge in node.tos:
to = edge.to
if in_degree[to] == -1: continue
in_degree[to] -= 1
if in_degree[to] == 0:
queue.append(to)
in_degree[to] = -1
return result
def djikstra(self, start: GraphNode, end: GraphNode) -> List[int]:
if start.id == end.id:
return 0, [start.id]
if len(start.tos) == 0:
return -1, None
for node in self.nodes:
node.total_weight = -1
paths = []
for edge in start.tos:
heappush(paths, GraphPath(edge.weight, [start.id, edge.to]))
self.nodes[edge.to].total_weight = edge.weight
while len(paths) > 0:
path = heappop(paths)
path_end = self.nodes[path.nodes[-1]]
if path_end is end:
return path.weight, path.nodes
create_route = len(path_end.tos)
for edge in path_end.tos:
next = self.nodes[edge.to]
create_route -= 1
if ((next.total_weight == -1) or
(next.total_weight > (path_end.total_weight + edge.weight))):
next.total_weight = path_end.total_weight + edge.weight
else:
continue
if create_route > 0:
nodes = path.nodes.copy()
nodes.append(next.id)
heappush(paths, GraphPath(next.total_weight, nodes))
else:
path.nodes.append(next.id)
path.weight = next.total_weight
heappush(paths, path)
return -1, None
def djikstraK(self, start: GraphNode, end: GraphNode, max_path_len: int) -> List[int]:
if start.id == end.id:
return 0, [start.id]
if len(start.tos) == 0:
return -1, None
paths = []
for edge in start.tos:
heappush(paths, GraphPath(edge.weight, [start.id, edge.to]))
self.nodes[edge.to].total_weight = edge.weight
while len(paths) > 0:
path = heappop(paths)
path_end = self.nodes[path.nodes[-1]]
if path_end is end:
return path.weight, path.nodes
if len(path.nodes) == max_path_len:
continue
create_route = len(path_end.tos)
for edge in path_end.tos:
next = self.nodes[edge.to]
create_route -= 1
# print("(len(path.nodes)(%d) >= (max_path_len(%d) - 1)) and next(%d) is not end(%d)" %
# (len(path.nodes), max_path_len, next.id, end.id))
if create_route > 0:
nodes = path.nodes.copy()
nodes.append(next.id)
new_path = GraphPath(path.weight + edge.weight, nodes)
heappush(paths, new_path)
else:
path.nodes.append(next.id)
path.weight += edge.weight
heappush(paths, path)
return -1, None
if __name__ == "__main__":
nodes = [GraphNode(i) for i in range(0, 6)]
nodes[0].addEdge(1, 1)
nodes[0].addEdge(2, 1)
nodes[1].addEdge(2, 1)
nodes[1].addEdge(3, 2)
nodes[2].addEdge(3, 1)
nodes[2].addEdge(4, 1)
nodes[4].addEdge(5, 1)
graph = Graph(nodes)
print("Graph: %s" % graph)
print("Topological order: %s" % graph.topologicalOrder())
w, route = graph.djikstra(nodes[3], nodes[5])
print("Shortest path from 3 to 5 is (%d km) %s" % (w, route))
assert(w == -1 and route is None)
w, route = graph.djikstra(nodes[0], nodes[3])
print("Shortest path from 0 to 3 is (%d km) %s" % (w, route))
assert(w == 2 and len(route) == 3)
w, route = graph.djikstra(nodes[0], nodes[5])
print("Shortest path from 0 to 5 is (%d km) %s" % (w, route))
assert(w == 3 and len(route) == 4)
w, route = graph.djikstraK(nodes[0], nodes[5], 3)
print("Shortest path from 0 to 5 is (%d km) %s" % (w, route))
assert(w == -1 and route is None)
w, route = graph.dfs(nodes[0], nodes[5], 3)
print("Shortest path from 0 to 5 is (%d km) %s" % (w, route))
assert(w == -1 and route is None)
```
#### File: algorithms/leetcode/isBipartite.py
```python
from typing import List
import itertools
class Solution:
def isBipartite(self, graph: List[List[int]]) -> bool:
if len(graph) < 2:
# print("len(graph) is %d" % len(graph))
return True
g1 = {0}
g2 = set(graph[0])
# We build g1 and g2 and every time we add some element to one, we check it is not in the
# second (if dst_node in g2: return false)
# We alternate g1 and g2 to factorize the code
# When we can't grow g1 and g2 (islands), we append one of the group with the next
# unexplored node (list(set(range(len(graph))).difference(g1.union(g2)))[0]) and
# we continue
changed = True
while changed:
changed = False
for src_node in g2:
#print("explore the node %d from %s" % (src_node, g2))
for dst_node in graph[src_node]:
if dst_node in g2:
# print("-- g1", g1)
# print("Aye! %d already in g2(%s)" % (dst_node, g2))
return False
if dst_node not in g1:
#print("append %d to g1(%s)" % (dst_node, g1))
g1.add(dst_node)
changed = True
if not changed and ((len(g1) + len(g2)) < len(graph)):
# append unexplored node
l = list(set(range(len(graph))).difference(g1.union(g2)))
#print("Isolated nodes found; append %d to %s" % (l[0], g1))
g1.add(l[0])
changed = True
# swap groups
tmp = g2
g2 = g1
g1 = tmp
# print("g1", g1)
# print("g2", g2)
return True
def check_solution():
s = Solution()
graph = [[1,3], [0,2], [1,3], [0,2]]
result = s.isBipartite(graph)
assert(result == True)
graph = [[1,2,3], [0,2], [0,1,3], [0,2]]
result = s.isBipartite(graph)
assert(result == False)
graph = [[1,3,4], [0,2], [1,3,4], [0,2], []]
result = s.isBipartite(graph)
assert(result == True)
graph = [[1,3,4,6], [0,2], [1,3,4,5], [0,2], [0,2], [2,6], [0,5]]
result = s.isBipartite(graph)
assert(result == False)
graph = [[3],[2,4],[1],[0,4],[1,3]]
result = s.isBipartite(graph)
assert(result == True)
graph = [[]]
result = s.isBipartite(graph)
assert(result == True)
graph = [[],[2,4,6],[1,4,8,9],[7,8],[1,2,8,9],[6,9],[1,5,7,8,9],[3,6,9],[2,3,4,6,9],[2,4,5,6,7,8]]
result = s.isBipartite(graph)
assert(result == False)
graph = [[47],[48],[52,58,62],[77,84],[9,13,24,73],[15,41,47,49,55,68,83,85,88],[72],
[10,37,57,79],[12,14,42,58],[4,26,65,74],[7,26,33,37],[41,53,83,90],[8],[4,68,81],[8,86],
[5,19,51,54,96],[39,81],[47,74,76],[55,60,64,77],[15,23,44,74,88,90,91],[22,39,57,72],
[59,77],[20,48],[19,59,90,92,98],[4,84,87],[43,63,81,90,91,92],[9,10,64,84,91],[42,54],
[54,83,87],[36,45,77],[59],[32,40,52,57],[31,65],[10,79],[35,46,62,70,89,91],[34,70],
[29,78],[7,10,43,75,99],[55,56],[16,20,65,74],[31,42,58,63],[5,11,45,63],[8,27,40,65,72,78],
[25,37],[19,53],[29,41,66,97],[34],[0,5,17,59,66,91],[1,22,81],[5,55,64,78,98],[68,71],
[15,52,87,97],[2,31,51,91],[11,44,54,66,70,74],[15,27,28,53],[5,18,38,49,63,83],[38,69,75],
[7,20,31],[2,8,40,90],[21,23,30,47,67,96],[18,84],[],[2,34],[25,40,41,55],[18,26,49],
[9,32,39,42],[45,47,53,96,99],[59,70,77],[5,13,50],[56,83,89,94],[34,35,53,67],[50,83,85],
[6,20,42],[4,83],[9,17,19,39,53],[37,56],[17],[3,18,21,29,67,90],[36,42,49,83],[7,33,90],
[90,91],[13,16,25,48,99],[],[5,11,28,55,69,71,73,78,90],[3,24,26,60],[5,71],[14],[24,28,51],
[5,19,93],[34,69],[11,19,23,25,58,77,79,80,83,92],[19,25,26,34,47,52,80],[23,25,90],[88,95],
[69,96],[93,99],[15,59,66,94],[45,51],[23,49],[37,66,81,95]]
result = s.isBipartite(graph)
assert(result == False)
graph = [[],[10,44,62],[98],[59],[90],[],[31,59],[52,58],[],[53],[1,63],[51,71],[18,64],
[24,26,45,95],[61,67,96],[],[40],[39,74,79],[12,21,72],[35,85],[86,88],[18,76],[71,80],
[27,58,85],[13,26,87],[75,94],[13,24,68,77,82],[23],[56,96],[67],[56,73],[6],[41],
[50,88,91,94],[],[19,72,92],[59],[49],[49,89],[17],[16],[32,84,86],[61,73,77],[94,98],
[1,74],[13,57,90],[],[93],[],[37,38,54,68],[33],[11],[7,85],[9],[49],[61],[28,30,87,93],
[45,69,77],[7,23,76],[3,6,36,62],[81],[14,42,55,62],[1,59,61],[10],[12,93],[],[96],
[14,29,70,73],[26,49,71,76],[57,83],[67],[11,22,68,89],[18,35],[30,42,67],[17,44],[25],
[21,58,68],[26,42,57,95],[],[17],[22,83],[60],[26,83,84,94],[69,80,82],[41,82],[19,23,52],
[20,41],[24,56],[20,33],[38,71,99],[4,45],[33],[35],[47,56,64],[25,33,43,82],[13,77],
[14,28,66],[],[2,43],[89]]
result = s.isBipartite(graph)
assert(result == False)
graph = [[1],[0],[4],[4],[2,3]]
result = s.isBipartite(graph)
assert(result == True)
print("All tests passed successfully!!")
check_solution()
# def isBipartite(self, graph: List[List[int]]) -> bool:
# g1 = graph[0]
# g2 = list()
# for n in graph[1:]:
# found = False
# for e in n:
# if e in g1:
# found = True
# g1 += n
# for e in n:
# if e in g2:
# return False
# break
# if not found:
# g2 += n
# return len(g2) != 0
```
#### File: algorithms/leetcode/kthgrammar.py
```python
class Solution:
cache = {}
def kthGrammar(self, N: int, K: int) -> int:
if N in self.cache: return self.cache[N][K-1]
self.cache[N] = self.NthRow(N)
print("Row(%d) : %s" % (N, self.cache[N]))
return self.cache[N][K-1]
def NthRow(self, N:int) -> list:
if N == 1: return [0]
if N in self.cache: return self.cache[N]
if N-1 in self.cache:
prev = self.cache[N-1]
else:
prev = self.NthRow(N-1)
self.cache[N-1] = prev
idx = 0
max_idx = 2 * len(prev)
while idx < max_idx:
if prev[idx] == 0:
prev.insert(idx+1, 1)
else:
prev.insert(idx+1, 0)
idx += 2
return prev
row_cached = '0'
def kthGrammar(self, N: int, K: int) -> int:
N -= 1
K -= 1
if K >= len(self.row_cached):
self.update_row_cached(N, K)
#print("Row(%d) : %s" % (N+1, self.row_cached))
return int(self.row_cached[K])
def update_row_cached(self, N:int, K: int) -> None:
if len(self.row_cached) > K:
return
missing_01s = len(self.row_cached)
idx = 0
while missing_01s:
to_add = '0'
if self.row_cached[idx] == '0':
to_add = '1'
self.row_cached += to_add
missing_01s -= 1
idx += 1
self.update_row_cached(N, K)
def kthGrammar(self, N: int, K: int) -> int:
if K < 3:
return K - 1
K -= 1
pivot = 1
while K >= (pivot * 2):
pivot *= 2
return int(1 - self.get_mirror(pivot, K))
def get_mirror(self, pivot:int, K: int) -> int:
if K <= 1:
return 1 - K
while pivot > K:
pivot /= 2
return 1 - self.get_mirror(pivot/2, K-pivot)
translate = {
0: [0, 1],
1: [1, 0]
}
def kthGrammar_sol1(self, N: int, K: int) -> int:
if N == 1 and K == 1:
return 0
index = (K+1) % 2
source = (K+1) // 2
res = self.kthGrammar_sol1(N-1, source)
return res if index == 0 else res ^ 1
def kthGrammar_sol2(self, N, K):
if N == 1:
if K == 1:
return 0
else:
return 1
half = 2**(N - 1)
if K <= half:
return self.kthGrammar_sol2(N - 1, K)
else:
res = self.kthGrammar_sol2(N - 1, K - half)
if res == 0:
return 1
else:
return 0
def kthGrammar_sol3(self, N: 'int', K: 'int') -> 'int':
# my solution ... 28 ms ... 100 % without bar ... 12.4 MB ... 87 %
def find(N, K):
if N == 1:
return 0
if K <= 2**(N-2): # row 3: "0110"
return find(N-1, K)
else:
return 1 - find(N-1, K - 2**(N-2)) # row 4: "0110" + "1001"
return find(N, K)
def check_solution():
s = Solution()
# N = 1
# K = 1
# res = s.kthGrammar(N, K)
# print("kthGrammar(%d, %d) = %d" % (N, K, res))
# assert(res == 0)
#
# N = 2
# K = 1
# res = s.kthGrammar(N, K)
# print("kthGrammar(%d, %d) = %d" % (N, K, res))
# assert(res == 0)
#
# N = 2
# K = 2
# res = s.kthGrammar(N, K)
# print("kthGrammar(%d, %d) = %d" % (N, K, res))
# assert(res == 1)
#
# N = 3
# K = 3
# res = s.kthGrammar(N, K)
# print("kthGrammar(%d, %d) = %d" % (N, K, res))
# assert(res == 1)
#
# N = 3
# K = 4
# res = s.kthGrammar(N, K)
# print("kthGrammar(%d, %d) = %d" % (N, K, res))
# assert(res == 0)
#
# N = 4
# K = 5
# res = s.kthGrammar(N, K)
# print("kthGrammar(%d, %d) = %d" % (N, K, res))
# assert(res == 1)
#
# N = 5
# K = 5
# res = s.kthGrammar(N, K)
# print("kthGrammar(%d, %d) = %d" % (N, K, res))
# assert(res == 1)
#
# N = 6
# K = 5
# res = s.kthGrammar(N, K)
# print("kthGrammar(%d, %d) = %d" % (N, K, res))
# assert(res == 1)
#
# N = 7
# K = 5
# res = s.kthGrammar(N, K)
# print("kthGrammar(%d, %d) = %d" % (N, K, res))
# assert(res == 1)
#
#
# N = 8
# K = 5
# res = s.kthGrammar(N, K)
# print("kthGrammar(%d, %d) = %d" % (N, K, res))
# assert(res == 1)
#
#
# N = 8
# K = 10
# res = s.kthGrammar(N, K)
# print("kthGrammar(%d, %d) = %d" % (N, K, res))
# assert(res == 0)
#
# N = 30
# K = 434991989
# res = s.kthGrammar(N, K)
# print("kthGrammar(%d, %d) = %d" % (N, K, res))
# assert(res == 0)
#
# N = 30
# K = 434991990
# res = s.kthGrammar(N, K)
# print("kthGrammar(%d, %d) = %d" % (N, K, res))
# assert(res == 1)
#
# N = 60
# K = 84349919900
# res = s.kthGrammar(N, K)
# print("kthGrammar(%d, %d) = %d" % (N, K, res))
# assert(res == 1)
N = 30
K = 434991990
for idx in range(0, 800000):
s.kthGrammar(N, K)
check_solution()
```
#### File: algorithms/leetcode/mypow.py
```python
class Solution:
cache = {}
cache_root = None
nb_rec = 0
def pow(self, x: float, n: int) -> float:
return x**n
def _myPow1(self, x: float, n: int) -> float:
self.nb_rec += 1
if n == 0: return 1
if n not in self.cache:
self.cache[n] = x * self._myPow1(x, n-1)
return self.cache[n]
def _myPow2(self, x: float, n: int) -> float:
#self.nb_rec += 1
if n == 0: return 1
if n == 1: return x
if n % 2:
if (n-1) not in self.cache:
self.cache[n-1] = self._myPow2(x, n-1)
#print("_myPow2(%f, %d) = %f" % (x, n-1, self.cache[n-1]))
return x * self.cache[n-1]
n //= 2
if n not in self.cache:
self.cache[n] = self._myPow2(x, n)
#print("_myPow2(%f, %d) = %f" % (x, n, self.cache[n]))
return self.cache[n] * self.cache[n]
def myPow(self, x: float, n: int) -> float:
if n == 0: return 1
if self.cache_root != x:
self.cache_root = x
self.cache = {}
if n < 0: return 1.0 / self._myPow2(x, -n)
return self._myPow2(x, n)
def check_solution():
s = Solution()
root = 2
power = 2
result = s.myPow(root, power)
print("myPow(%f, %d) = %f" % (root, power, result))
assert(result == s.pow(root, power))
power = 3
result = s.myPow(root, power)
print("myPow(%f, %d) = %f" % (root, power, result))
assert(result == s.pow(root, power))
power = 10
result = s.myPow(root, power)
print("myPow(%f, %d) = %f" % (root, power, result))
assert(result == s.pow(root, power))
power = 50
result = s.myPow(root, power)
print("myPow(%f, %d) = %f" % (root, power, result))
assert(result == s.pow(root, power))
power = -2
result = s.myPow(root, power)
print("myPow(%f, %d) = %f" % (root, power, result))
power = -4
result = s.myPow(root, power)
print("myPow(%f, %d) = %f" % (root, power, result))
assert(result == s.pow(root, power))
# power = 123456
# result = s.myPow(root, power)
# print("myPow(%f, %d) = %ld" % (root, power, result))
# assert(result == s.pow(root, power))
root = 2.1
power = 3
result = s.myPow(root, power)
print("myPow(%f, %d) = %f" % (root, power, result))
assert(result == s.pow(root, power))
print("nb_rec = %d" % s.nb_rec)
check_solution()
```
#### File: algorithms/leetcode/numrabiits.py
```python
import array
class Solution:
def numRabbits(self, answers: list) -> int:
groups = dict()
for g in answers:
if (g+1) in groups.keys():
groups[g+1] += 1
else:
groups[g+1] = 1
#print(groups)
num_rabbits = 0
for r in groups.keys():
n = groups[r]
if n == 1:
# Unique groups
num_rabbits += r
else:
# Multiple groups
num_rabbits += r * int(n / r)
if n % r != 0:
num_rabbits += r
return num_rabbits
def numRabbits(self, answers: list) -> int:
groups = array.array('H', [0]*1000)
for g in answers:
groups[g+1] += 1
num_rabbits = 0
r = 0
for n in groups:
if n == 0:
r += 1
continue
if n == 1:
# Unique groups
num_rabbits += r
else:
# Multiple groups
num_rabbits += r * int(n / r)
if n % r != 0:
num_rabbits += r
r += 1
return num_rabbits
def check_solution():
s = Solution()
answers = [1, 1, 2]
result = s.numRabbits(answers)
assert(result == 5)
answers = []
result = s.numRabbits(answers)
assert(result == 0)
answers = [10, 10, 10]
result = s.numRabbits(answers)
assert(result == 11)
answers = [2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4]
result = s.numRabbits(answers)
assert(result == 17)
print("All tests passed successfully!!")
check_solution()
```
#### File: algorithms/tools/listnode.py
```python
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
def __str__(self):
l = list()
cell = self
while cell.next is not None:
l.append(cell.val)
cell = cell.next
l.append(cell.val)
return str(l)
def prepend(self, x):
new_root = ListNode(x)
new_root.next = self
return new_root
def append(self, x) -> None:
new_root = ListNode(x)
cell = self
while cell.next is not None:
cell = cell.next
cell.next = new_root
```
|
{
"source": "JCPistell/looker_deployer",
"score": 2
}
|
#### File: looker_deployer/tests/test_deploy_group_in_group.py
```python
from looker_deployer.commands import deploy_group_in_group
from looker_sdk import methods, models
class mockSettings:
base_url = "taco"
class mockAuth:
settings = mockSettings()
def mock_responses(responses, default_response=None):
return lambda input: responses[input] \
if input in responses else default_response
sdk = methods.LookerSDK(mockAuth(), "bar", "baz", "bosh", "bizz")
source_sdk = methods.LookerSDK(mockAuth(), "bar", "baz", "bosh", "bizz")
target_sdk = methods.LookerSDK(mockAuth(), "bar", "baz", "bosh", "bizz")
def test_get_filtered_groups(mocker):
group_list = [
models.Group(name="Taco"),
models.Group(name="Burrito")
]
mocker.patch.object(sdk, "all_groups")
sdk.all_groups.return_value = group_list
groups = deploy_group_in_group.get_filtered_groups(sdk)
assert groups == group_list
def test_get_filtered_groups_filter(mocker):
group_list = [
models.Group(name="Taco"),
models.Group(name="Burrito")
]
mocker.patch.object(sdk, "all_groups")
sdk.all_groups.return_value = group_list
groups = deploy_group_in_group.get_filtered_groups(sdk, "Burrito")
assert groups == [models.Group(name="Burrito")]
def test_write_groups_in_group_new(mocker):
group_1 = models.Group(name="Taco", id=1)
group_2 = models.Group(name="Taco Supreme", id=2)
groups_list = [group_1, group_2]
group_in_group = [group_1]
mocker.patch.object(source_sdk, "all_groups")
mocker.patch.object(source_sdk, "all_group_groups")
mocker.patch.object(target_sdk, "all_groups")
mocker.patch.object(target_sdk, "all_group_groups")
mocker.patch.object(target_sdk, "add_group_group")
source_sdk.all_groups.return_value = groups_list
target_sdk.all_groups.return_value = groups_list
source_sdk.all_group_groups.side_effect = mock_responses(
{
1: [],
2: group_in_group
})
deploy_group_in_group.write_groups_in_group(source_sdk, target_sdk)
target_sdk.add_group_group.assert_called_once_with(
group_id=group_2.id, body=models.GroupIdForGroupInclusion(group_id=1))
def test_write_groups_in_group_change(mocker):
group_1 = models.Group(name="Taco", id=1)
group_2 = models.Group(name="Taco Supreme", id=2)
group_3 = models.Group(name="Chalupa", id=3)
groups_list = [group_1, group_2, group_3]
group_in_group = [group_1]
mocker.patch.object(source_sdk, "all_groups")
mocker.patch.object(source_sdk, "all_group_groups")
mocker.patch.object(target_sdk, "all_groups")
mocker.patch.object(target_sdk, "all_group_groups")
mocker.patch.object(target_sdk, "add_group_group")
mocker.patch.object(target_sdk, "delete_group_from_group")
source_sdk.all_groups.return_value = groups_list
target_sdk.all_groups.return_value = groups_list
source_sdk.all_group_groups.side_effect = mock_responses(
{
1: [],
2: [],
3: group_in_group
})
target_sdk.all_group_groups.side_effect = mock_responses(
{
1: [],
2: group_in_group,
3: []
})
deploy_group_in_group.write_groups_in_group(source_sdk, target_sdk)
target_sdk.delete_group_from_group.assert_called_once_with(
group_id=group_2.id, deleting_group_id=1)
target_sdk.add_group_group.assert_called_once_with(
group_id=group_3.id, body=models.GroupIdForGroupInclusion(group_id=1))
```
|
{
"source": "jcPOLO/polonet",
"score": 3
}
|
#### File: app/core/exceptions.py
```python
import logging
from app.core.helpers import configure_logging
logger = logging.getLogger(__name__)
class AutoNornirException(Exception):
configure_logging(logger)
REASONS = (
"fail-config", # config provided is not valid
"fail-connect", # device is unreachable at IP:PORT
"fail-execute", # unable to execute device/API command
"fail-login", # bad username/password
"fail-general", # other error
)
def __init__(self, reason, message, **kwargs):
"""Exception Init."""
super(AutoNornirException, self).__init__(kwargs)
self.reason = reason
self.message = message
logger.error(message)
def __str__(self):
"""Exception __str__."""
return f"{self.__class__.__name__}: {self.reason}: {self.message}"
class ValidationException(AutoNornirException):
pass
class RuntimeErrorException(AutoNornirException):
pass
```
#### File: core/models/platform.py
```python
from nornir_napalm.plugins.tasks import napalm_get
from nornir_netmiko.tasks import netmiko_send_command
from nornir.core.task import Task
import logging
class PlatformBase:
def __init__(self, task: Task):
self.task = task
def get_facts(self) -> str:
r = self.task.run(
task=napalm_get,
name=f"FACTs PARA: {self.task.host}",
getters=["facts"],
# severity_level=logging.DEBUG,
).result
return r
# TODO: Think about this cause this is not multiplatform
def send_command(self, command: str) -> str:
r = self.task.run(
task=netmiko_send_command,
name="SEND COMMAND - Send the command to all devices",
# severity_level=logging.DEBUG
command_string=command,
use_textfsm=True,
).result
return r
def get_version(self):
pass
def get_config(self):
pass
def get_config_section(self):
pass
def get_interfaces_status(self):
pass
def get_interfaces_trunk(self):
pass
def get_interface_description(self, interface: str):
pass
def get_neighbor(self, interface: str):
pass
def save_config(self):
pass
def software_upgrade(self):
pass
def set_rsa(self):
pass
def get_dir(self):
pass
```
#### File: core/models/template.py
```python
import logging
import os
logger = logging.getLogger(__name__)
dir_path = os.path.dirname(os.path.realpath(__file__))
TEMPLATES_DIR = dir_path + "/../templates/ios/"
class Template(object):
def __init__(self, templates: list) -> None:
self.templates = templates
def merge_templates(self, platform: str = "ios") -> str:
result = []
for template in self.templates:
file = str(template)
path = f"{dir_path}/../templates/{platform}/"
filename = f"{path}{file}"
with open(filename, "r") as f:
lines = f.read()
result.append(lines)
template = "\n".join(result)
return template
def create_final_template(self, platform: str = "ios") -> None:
file = "final.j2"
path = f"{dir_path}/../templates/{platform}/"
filename = f"{path}{file}"
with open(filename, "w") as f:
f.write(self.merge_templates(platform))
```
#### File: app/core/output.py
```python
def facts_for_customer_csv(result):
for h in result.keys():
# facts = result[h][2]
for task_result in result[h]:
if "FACT" in task_result.name:
r = task_result.result
facts = r["facts"]
hostname = facts["hostname"]
os_version = facts["os_version"]
serial_number = facts["serial_number"]
model = facts["model"]
print(
"{};{};{};{};{}".format(
h, hostname, os_version, serial_number, model
)
)
```
#### File: polonet/app/forms.py
```python
from flask_wtf import FlaskForm
from flask import flash
class BaseForm(FlaskForm):
__abstract__ = True
def flash_errors(self):
"""Flashes form errors"""
for field, errors in self.errors.items():
for error in errors:
flash(
"Error in the %s field - %s"
% (getattr(self, field).label.text, error),
"error",
)
```
#### File: app/job/helper.py
```python
import logging
import pprint
import threading
from typing import List, Optional, cast
from collections import OrderedDict
import json
import sys
from io import StringIO
from colorama import Fore, Style, init
from typing import Union
from nornir.core.task import AggregatedResult, MultiResult, Result
LOCK = threading.Lock()
# String buffer stream to use in case formatted string output is needed.
OUT_BUFFER = StringIO()
init(autoreset=True, strip=False)
def print_title(title: str) -> None:
"""
Helper function to print a title.
"""
msg = "**** {} ".format(title)
print("{}{}{}{}".format(Style.BRIGHT, Fore.GREEN, msg, "*" * (80 - len(msg))))
def _get_color(result: Result, failed: bool) -> str:
if result.failed or failed:
color = Fore.RED
elif result.changed:
color = Fore.YELLOW
else:
color = Fore.GREEN
return cast(str, color)
def _print_individual_result(
result: Result,
host: Optional[str],
attrs: List[str],
failed: bool,
severity_level: int,
task_group: bool = False,
return_output: bool = False,
):
global OUT_BUFFER
out_buffer = None
if return_output:
out_buffer = OUT_BUFFER
else:
out_buffer = sys.stdout
if result.severity_level < severity_level:
return
color = _get_color(result, failed)
subtitle = (
"" if result.changed is None else " ** changed : {} ".format(result.changed)
)
level_name = logging.getLevelName(result.severity_level)
symbol = "v" if task_group else "-"
msg = "{} {}{}".format(symbol * 4, result.name, subtitle)
print(
"{}{}{}{} {}".format(
Style.BRIGHT, color, msg, symbol * (80 - len(msg)), level_name
),
file=out_buffer,
)
for attribute in attrs:
x = getattr(result, attribute, "")
if isinstance(x, BaseException):
# for consistency between py3.6 and py3.7
print(f"{x.__class__.__name__}{x.args}", file=out_buffer)
elif x and not isinstance(x, str):
if isinstance(x, OrderedDict):
print(json.dumps(x, indent=2), file=out_buffer)
else:
pprint.pprint(x, indent=2, stream=out_buffer)
elif x:
print(x, file=out_buffer)
def _print_result(
result: Result,
host: Optional[str] = None,
attrs: List[str] = None,
failed: bool = False,
severity_level: int = logging.INFO,
return_output: bool = False,
) -> Union[None, str]:
global OUT_BUFFER
out_buffer = None
if return_output:
out_buffer = OUT_BUFFER
else:
out_buffer = sys.stdout
attrs = attrs or ["diff", "result", "stdout"]
if isinstance(attrs, str):
attrs = [attrs]
if isinstance(result, AggregatedResult):
msg = result.name
print(
"{}{}{}{}".format(Style.BRIGHT, Fore.CYAN, msg, "*" * (80 - len(msg))),
file=out_buffer,
)
for host, host_data in sorted(result.items()):
title = (
""
if host_data.changed is None
else " ** changed : {} ".format(host_data.changed)
)
msg = "* {}{}".format(host, title)
print(
"{}{}{}{}".format(Style.BRIGHT, Fore.BLUE, msg, "*" * (80 - len(msg))),
file=out_buffer,
)
_print_result(
host_data,
host,
attrs,
failed,
severity_level,
return_output=return_output,
)
elif isinstance(result, MultiResult):
_print_individual_result(
result[0],
host,
attrs,
failed,
severity_level,
task_group=True,
return_output=return_output,
)
for r in result[1:]:
_print_result(
r, host, attrs, failed, severity_level, return_output=return_output
)
color = _get_color(result[0], failed)
msg = "^^^^ END {} ".format(result[0].name)
print(
"{}{}{}{}".format(Style.BRIGHT, color, msg, "^" * (80 - len(msg))),
file=out_buffer,
)
elif isinstance(result, Result):
_print_individual_result(
result, host, attrs, failed, severity_level, return_output=return_output
)
if return_output:
return out_buffer.getvalue()
def print_result(
result: Result,
host: Optional[str] = None,
vars: List[str] = None,
failed: bool = False,
severity_level: int = logging.INFO,
return_output: bool = False,
) -> Union[None, str]:
"""
Prints the :obj:`nornir.core.task.Result` from a previous task to screen
Arguments:
result: from a previous task
host: # TODO
vars: Which attributes you want to print
failed: if ``True`` assume the task failed
severity_level: Print only errors with this severity level or higher
return_output: If ``True``, return the formatted output instead of printing on stdout
"""
LOCK.acquire()
global OUT_BUFFER
try:
string_output = _print_result(
result, host, vars, failed, severity_level, return_output=return_output
)
return string_output
finally:
LOCK.release()
OUT_BUFFER.close()
```
#### File: app/job/views.py
```python
from crypt import methods
from datetime import datetime
import json
import logging
from typing import Dict, List
from flask import (
Blueprint,
render_template,
request,
flash,
redirect,
session,
url_for,
jsonify,
current_app,
)
from flask_login import login_required, current_user
from app import db
from app.inventory.schemas import InventorySchema, DeviceSchema
from app.inventory.models import Device, Inventory
from app.job.models import Job
from app.job.schemas import JobSchema
from app.core.helpers import dir_path, json_to_csv
from app.core.core import Core
from nornir_utils.plugins.functions import print_result
job_bp = Blueprint("job_bp", __name__, template_folder="templates")
logger = logging.getLogger(__name__)
ALLOWED_EXTENSIONS = {"txt", "csv"}
DEFAULT_DEVICE_ATTR = [
"hostname",
"platform",
"port",
"custom",
"user_id",
"id",
"date_created",
"date_modified",
]
OMITTED_DEVICE_ATTR = ["groups", "user_id", "_sa_instance_state"]
CFG_FILE = f"{dir_path}/config.yaml"
@job_bp.route("/job", methods=["GET", "POST"])
@login_required
def tasks():
tasks = [
{"name": "get_version", "description": "hacer un show version"},
{"name": "save_config", "description": "guardar la configuracion"},
{"name": "set_ip_domain.j2", "description": "poner el ip domain <prueba.com>"},
]
devices = session.get("devices") or []
if request.method == "POST":
devices = json.loads(request.data)
session["devices"] = devices
return jsonify(devices), 200
return render_template(
"job/tasks.html",
tasks=tasks,
devices=devices,
user=current_user,
)
# TODO: temporary web result '/job/<id>/result'
@job_bp.route("/job/result", methods=["GET", "POST"])
@login_required
def jobs():
job_schema = JobSchema()
# GET
results = session.get("results")
if request.method == "GET":
pass
if request.method == "POST":
devices = session["devices"]
tasks = request.form.getlist("data")
devices = json_to_csv(list(devices))
# TODO: I have to do a way to chose credentials to pass them
core = Core(
csv_text=devices, tasks=tasks, cli=False, username="cisco", password="<PASSWORD>"
)
data = dict(
inventory_id=session["inventory_id"],
output="",
status=1,
)
job = job_schema.load(data)
results = core.run()
job.finished_at = datetime.now()
db.session.commit()
result = job_schema.dump(job)
result_tasks = {k: [] for k in tasks if ".j2" not in k}
# TODO: remove this. Just prints what nornir would print in CLI.
from app.job.helper import print_result
# TODO: implement a way to toggle between ansible like result or nornir like result
output = print_result(results, return_output=True)
if isinstance(output, str):
output = output.split("\n")
status = results.failed # True if at least 1 task failed
# Ansible like result
# TODO: Exceptions are not serializable. Do a better thing in all of this.
# TODO: Diff for jinja2 templates is not implemented (only napalm config have it)
for host, tasks_result in sorted(results.items()):
for task in tasks_result:
task_name = str(task.name).split(" ")[0].lower()
if (
task_name in tasks
or task_name == "get_config"
or "plantilla" in task_name
):
if isinstance(task.result, str):
task.result = task.result.split("\n")
taskres = {
"result": task.result,
"failed": task.failed,
"ip": str(host),
"changed": task.changed,
"diff": task.diff,
"stderr": task.stderr,
# 'exception': task.exception
}
result_tasks[task_name] = result_tasks.get(task_name, [])
result_tasks[task_name].append(taskres)
# TODO: remove this. It is just to simulate what is going to be seen in results.html
for task, hosts in result_tasks.items():
print(f"{task} ************")
for host in hosts:
if host["failed"]:
print(f"{host['ip']} : FAIL")
# print(f"EXCEPTION: {host['exception']}")
else:
print(f"{host['ip']} : OK")
return render_template(
"job/result.html",
user=current_user,
status=status,
session=session,
result=result,
tasks=result_tasks,
output=output,
)
```
#### File: polonet/tests/conftest.py
```python
import json
import tempfile
import os
from flask import jsonify
import pytest
from app import create_app
from flask_sqlalchemy import SQLAlchemy
from flask_marshmallow import Marshmallow
from flask.testing import FlaskClient
from app.auth.models import User
from app.inventory.models import Inventory, Device
from app import db
from werkzeug.security import generate_password_hash, check_password_hash
@pytest.fixture
def runner(client):
return client.test_cli_runner()
def feed_db():
db.drop_all()
db.create_all()
new_user()
db.session.commit()
def new_user():
new_user = User(
email="<EMAIL>",
first_name="<NAME>",
password=generate_password_hash("<PASSWORD>", method="sha256"),
)
db.session.add(new_user)
db.session.commit()
new_user = User.query.filter_by(email=new_user.email).first()
new_user.inventories.append(new_inventory(new_user))
return new_user
def new_inventory(new_user):
new_inventory = Inventory(
name="inventory1",
slug="inventory1",
data="""
hostname,port,platform,site
1.1.1.1,22,ios,zaragoza
2.2.2.2,23,nxos,huesca
3.3.3.3,22,ios,teruel
""",
user_id=new_user.id,
)
devices = new_devices(new_user)
for device in devices:
new_inventory.devices.append(device)
db.session.add(new_inventory)
return new_inventory
def new_devices(new_user):
new_devices = []
device = Device(
hostname="1.1.1.1",
platform="ios",
port=22,
custom=json.dumps({"site": "zaragoza"}),
user_id=new_user.id,
)
db.session.add(device)
new_devices.append(device)
device = Device(
hostname="2.2.2.2",
platform="nxos",
port=23,
custom=json.dumps({"site": "huesca"}),
user_id=new_user.id,
)
db.session.add(device)
new_devices.append(device)
device = Device(
hostname="3.3.3.3",
platform="ios",
port=22,
custom=json.dumps({"site": "teruel"}),
user_id=new_user.id,
)
db.session.add(device)
new_devices.append(device)
return new_devices
def test_app(test=True):
db_fd, db_path = tempfile.mkstemp()
app = create_app(test, db_path=db_path)
return app, db_fd, db_path
@pytest.fixture
def client():
app, db_fd, db_path = test_app()
with app.test_client() as client:
with app.app_context():
feed_db()
yield client
os.close(db_fd)
os.unlink(db_path)
@pytest.fixture
def client_no_csrf():
app, db_fd, db_path = test_app(test="no_csrf")
with app.test_client() as client:
with app.app_context():
feed_db()
yield client
os.close(db_fd)
os.unlink(db_path)
```
|
{
"source": "jcpowermac/aos-api-examples",
"score": 3
}
|
#### File: aos-api-examples/images/layers.py
```python
from subprocess import Popen, PIPE
import requests
import pprint
import json
import yaml
def gettoken():
p = Popen(['oc', 'whoami', '-t'], stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = p.communicate()
rc = p.returncode
return output.rstrip('\n')
def aosget_text(url, token):
headers = {'Authorization': 'Bearer %s' % token, 'Accept': 'application/yaml'}
req = requests.get(url, headers=headers, verify=False)
if req.status_code == 200:
text = req.text
return text
return ""
def aosget_yaml(url, token):
headers = {'Authorization': 'Bearer %s' % token, 'Accept': 'application/yaml'}
req = requests.get(url, headers=headers, verify=False)
if req.status_code == 200:
text = req.text
return yaml.load(text)
return {}
def aosget(url, token):
headers = {'Authorization': 'Bearer %s' % token }
req = requests.get(url, headers=headers, verify=False)
if req.status_code == 200:
json = req.json()
return json
return {}
def main():
pp = pprint.PrettyPrinter(indent=4)
token = gettoken()
url = "https://origin-master1.virtomation.com:8443/oapi/v1/images"
results = aosget_text(url, token)
print results
if __name__ == '__main__':
main()
```
|
{
"source": "jcpowermac/flask-cap",
"score": 2
}
|
#### File: jcpowermac/flask-cap/app.py
```python
from docker import Client
from flask import Flask, render_template, request, session, url_for, redirect, \
Response, abort
from flask_login import LoginManager, UserMixin, \
login_required, login_user, logout_user
from flask_socketio import SocketIO
import psutil
import capng
import json
import sys
DEBUG = True
SECRET_KEY = 'development key'
app = Flask(__name__)
app.config.from_object(__name__)
app.config.from_envvar('FLASKCAP_SETTINGS', silent=True)
async_mode = None
socketio = SocketIO(app, async_mode=async_mode)
# flask-login
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = "login"
pids = []
# silly user model
class User(UserMixin):
def __init__(self, id):
self.id = id
self.name = "user" + str(id)
self.password = "password"
def __repr__(self):
return "%d/%s/%s" % (self.id, self.name, self.password)
# create some users with ids 1 to 20
users = [User(id) for id in range(1, 21)]
def capabilities(pid):
permitted = None
capng.capng_setpid(pid)
capng.capng_clear(capng.CAPNG_SELECT_BOTH)
capng.capng_get_caps_process()
caps = capng.capng_have_capabilities(capng.CAPNG_SELECT_CAPS)
if caps > capng.CAPNG_NONE:
if caps == capng.CAPNG_PARTIAL:
permitted = capng.capng_print_caps_text(capng.CAPNG_PRINT_BUFFER, capng.CAPNG_PERMITTED)
if capng.capng_have_capabilities(capng.CAPNG_SELECT_BOUNDS) == capng.CAPNG_FULL:
permitted += "+"
else:
permitted = "full"
return permitted
@app.route('/', methods=['GET', 'POST'])
def index():
return redirect(url_for('login'))
@app.route('/login', methods=['GET', 'POST'])
def login():
error = "Error"
if request.method == "POST":
username = request.form['username']
password = request.form['password']
if password == 'password':
id = username.split('user')[1]
user = User(id)
login_user(user)
return redirect(url_for('docker'))
return render_template('login.html', error=error)
@app.route('/docker', methods=['GET', 'POST'])
def docker():
if request.method == "POST":
total = 0
current = 0
layerids = []
cli = Client(base_url='unix://var/run/docker.sock', version='auto')
image = request.form["dockerimage"]
dockerrun = request.form["dockerrun"]
for line in cli.pull(image, tag="latest", stream=True):
pull = json.loads(line)
if 'id' in pull and 'progressDetail' in pull:
id = pull['id']
progressDetail = pull['progressDetail']
# print(json.dumps(pull, indent=4))
# sys.stdout.flush()
if 'total' in progressDetail and 'current' in progressDetail:
if id in layerids:
current += pull['progressDetail']['current']
else:
layerids.append(id)
total += pull['progressDetail']['total']
# print current / total
# sys.stdout.flush()
value = str(current / total)
style = "width: %s%%;" % value
socketio.emit('progressupdate', {'style': style, 'value': value }, namespace='')
container = cli.create_container(image=image)
container_id = container['Id']
cli.start(container_id)
socketio.sleep(10)
pids = cli.top(container_id)
print pids
return redirect(url_for('results'))
return render_template('docker.html', async_mode=socketio.async_mode)
@app.route('/results')
def results():
results = []
for proc in psutil.process_iter():
try:
pinfo = proc.as_dict(attrs=['ppid', 'pid', 'name', 'username', 'uids', 'gids'])
pinfo["capabilities"] = "Not available"
except psutil.NoSuchProcess:
pass
else:
capstext = capabilities(pinfo["pid"])
if capstext is not None:
pinfo["capabilities"] = capstext
results.append(pinfo)
return render_template('results.html', results=results)
# somewhere to logout
@app.route("/logout")
@login_required
def logout():
logout_user()
return Response('<p>Logged out</p>')
# handle login failed
@app.errorhandler(401)
def page_not_found(e):
return Response('<p>Login failed</p>')
# callback to reload the user object
@login_manager.user_loader
def load_user(userid):
return User(userid)
if __name__ == '__main__':
socketio.run(app, debug=True)
```
|
{
"source": "jcps07/connectedhomeip",
"score": 2
}
|
#### File: build/builders/host.py
```python
import os
from platform import uname
from enum import Enum, auto
from .gn import GnBuilder
class HostApp(Enum):
ALL_CLUSTERS = auto()
CHIP_TOOL = auto()
THERMOSTAT = auto()
RPC_CONSOLE = auto()
MIN_MDNS = auto()
ADDRESS_RESOLVE = auto()
TV_APP = auto()
LOCK = auto()
TESTS = auto()
SHELL = auto()
CERT_TOOL = auto()
OTA_PROVIDER = auto()
OTA_REQUESTOR = auto()
def ExamplePath(self):
if self == HostApp.ALL_CLUSTERS:
return 'all-clusters-app/linux'
elif self == HostApp.CHIP_TOOL:
return 'chip-tool'
elif self == HostApp.THERMOSTAT:
return 'thermostat/linux'
elif self == HostApp.RPC_CONSOLE:
return 'common/pigweed/rpc_console'
elif self == HostApp.MIN_MDNS:
return 'minimal-mdns'
elif self == HostApp.ADDRESS_RESOLVE:
return '../'
elif self == HostApp.TV_APP:
return 'tv-app/linux'
elif self == HostApp.LOCK:
return 'door-lock-app/linux'
elif self == HostApp.TESTS:
return '../'
elif self == HostApp.SHELL:
return 'shell/standalone'
elif self == HostApp.CERT_TOOL:
return '..'
elif self == HostApp.OTA_PROVIDER:
return 'ota-provider-app/linux'
elif self == HostApp.OTA_REQUESTOR:
return 'ota-requestor-app/linux'
else:
raise Exception('Unknown app type: %r' % self)
def OutputNames(self):
if self == HostApp.ALL_CLUSTERS:
yield 'chip-all-clusters-app'
yield 'chip-all-clusters-app.map'
elif self == HostApp.CHIP_TOOL:
yield 'chip-tool'
yield 'chip-tool.map'
elif self == HostApp.THERMOSTAT:
yield 'thermostat-app'
yield 'thermostat-app.map'
elif self == HostApp.RPC_CONSOLE:
yield 'chip_rpc_console_wheels'
elif self == HostApp.MIN_MDNS:
yield 'mdns-advertiser'
yield 'mdns-advertiser.map'
yield 'minimal-mdns-client'
yield 'minimal-mdns-client.map'
yield 'minimal-mdns-server'
yield 'minimal-mdns-server.map'
elif self == HostApp.ADDRESS_RESOLVE:
yield 'address-resolve-tool'
yield 'address-resolve-tool.map'
elif self == HostApp.TV_APP:
yield 'chip-tv-app'
yield 'chip-tv-app.map'
elif self == HostApp.LOCK:
yield 'chip-door-lock-app'
yield 'chip-door-lock-app.map'
elif self == HostApp.TESTS:
pass
elif self == HostApp.SHELL:
yield 'chip-shell'
yield 'chip-shell.map'
elif self == HostApp.CERT_TOOL:
yield 'chip-cert'
yield 'chip-cert.map'
elif self == HostApp.OTA_PROVIDER:
yield 'chip-ota-provider-app'
yield 'chip-ota-provider-app.map'
elif self == HostApp.OTA_REQUESTOR:
yield 'chip-ota-requestor-app'
yield 'chip-ota-requestor-app.map'
else:
raise Exception('Unknown app type: %r' % self)
class HostBoard(Enum):
NATIVE = auto()
# cross-compile support
ARM64 = auto()
# for test support
FAKE = auto()
def BoardName(self):
if self == HostBoard.NATIVE:
uname_result = uname()
arch = uname_result.machine
# standardize some common platforms
if arch == 'x86_64':
arch = 'x64'
elif arch == 'i386' or arch == 'i686':
arch = 'x86'
elif arch == 'aarch64' or arch == 'aarch64_be' or arch == 'armv8b' or arch == 'armv8l':
arch = 'arm64'
return arch
elif self == HostBoard.ARM64:
return 'arm64'
elif self == HostBoard.FAKE:
return 'fake'
else:
raise Exception('Unknown host board type: %r' % self)
def PlatformName(self):
if self == HostBoard.NATIVE:
return uname().system.lower()
elif self == HostBoard.FAKE:
return 'fake'
else:
# Cross compilation assumes linux currently
return 'linux'
class HostBuilder(GnBuilder):
def __init__(self, root, runner, app: HostApp, board=HostBoard.NATIVE, enable_ipv4=True,
enable_ble=True, enable_wifi=True, use_tsan=False, use_asan=False, separate_event_loop=True,
test_group=False, use_libfuzzer=False, use_clang=False, interactive_mode=True,
use_platform_mdns=False):
super(HostBuilder, self).__init__(
root=os.path.join(root, 'examples', app.ExamplePath()),
runner=runner)
self.app = app
self.board = board
self.extra_gn_options = []
if not enable_ipv4:
self.extra_gn_options.append('chip_inet_config_enable_ipv4=false')
if not enable_ble:
self.extra_gn_options.append('chip_config_network_layer_ble=false')
if not enable_wifi:
self.extra_gn_options.append('chip_enable_wifi=false')
if use_tsan:
self.extra_gn_options.append('is_tsan=true')
if use_asan:
self.extra_gn_options.append('is_asan=true')
if not separate_event_loop:
self.extra_gn_options.append('config_use_separate_eventloop=false')
if not interactive_mode:
self.extra_gn_options.append('config_use_interactive_mode=false')
if test_group:
self.extra_gn_options.append(
'chip_enable_group_messaging_tests=true')
if use_libfuzzer:
self.extra_gn_options.append('is_libfuzzer=true')
if use_clang:
self.extra_gn_options.append('is_clang=true')
if use_platform_mdns:
self.extra_gn_options.append('chip_mdns="platform"')
if app == HostApp.TESTS:
self.extra_gn_options.append('chip_build_tests=true')
self.build_command = 'check'
if app == HostApp.CERT_TOOL:
# Certification only built for openssl
if self.board == HostBoard.ARM64:
# OpenSSL and MBEDTLS conflict. We only cross compile with mbedtls
raise Exception(
"Cannot cross compile CERT TOOL: ssl library conflict")
self.extra_gn_options.append('chip_crypto="openssl"')
self.build_command = 'src/tools/chip-cert'
if app == HostApp.ADDRESS_RESOLVE:
self.build_command = 'src/lib/address_resolve:address-resolve-tool'
def GnBuildArgs(self):
if self.board == HostBoard.NATIVE:
return self.extra_gn_options
elif self.board == HostBoard.ARM64:
self.extra_gn_options.extend(
[
'target_cpu="arm64"',
'is_clang=true',
'chip_crypto="mbedtls"',
'sysroot="%s"' % self.SysRootPath('SYSROOT_AARCH64')
]
)
return self.extra_gn_options
elif self.board == HostBoard.FAKE:
self.extra_gn_options.extend(
[
'custom_toolchain="//build/toolchain/fake:fake_x64_gcc"',
'chip_link_tests=true',
'chip_device_platform="fake"',
]
)
return self.extra_gn_options
else:
raise Exception('Unknown host board type: %r' % self)
def GnBuildEnv(self):
if self.board == HostBoard.NATIVE:
return None
elif self.board == HostBoard.FAKE:
return None
elif self.board == HostBoard.ARM64:
return {
'PKG_CONFIG_PATH': self.SysRootPath('SYSROOT_AARCH64') + '/lib/aarch64-linux-gnu/pkgconfig',
}
else:
raise Exception('Unknown host board type: %r' % self)
def SysRootPath(self, name):
if not name in os.environ:
raise Exception('Missing environment variable "%s"' % name)
return os.environ[name]
def build_outputs(self):
outputs = {}
for name in self.app.OutputNames():
path = os.path.join(self.output_dir, name)
if os.path.isdir(path):
for root, dirs, files in os.walk(path):
for file in files:
outputs.update({
file: os.path.join(root, file)
})
else:
outputs.update({
name: os.path.join(self.output_dir, name)
})
return outputs
```
|
{
"source": "jcq15/chooseCourseHelper",
"score": 3
}
|
#### File: jcq15/chooseCourseHelper/filter.py
```python
import pandas as pd
# change this
def filter_func(x):
return x['credit'] >= 5
data = pd.read_csv('output/courses.csv')
df = data[data.apply(filter_func, axis=1)]
#data.iloc[0,:].to_csv('output/query.csv', header=False, index=False)
df.to_csv('output/query.csv', index=False)
```
|
{
"source": "jcq15/frogWechaty",
"score": 2
}
|
#### File: jcq15/frogWechaty/baoshi.py
```python
from local_libs.cyjl_server import CYJLServer
from local_libs.gif_generator import GIFGenerator
from local_libs.time_reporter import TimeReporter
from local_libs.astrologist import Astrologist
from local_libs.weather_forecaster import WeatherForecaster
from local_libs import calculator
from local_libs import PyOJAgent
from local_libs.latex_delegate import LaTeXDelegate
from local_libs.sudoku_manager import SudokuManager
import numpy as np
import random
import re
from pypinyin import pinyin, Style
import uuid
class Baoshi:
def __init__(self):
self.abs_path = '/home/wechat/wechatbot/'
# self.abs_path = 'D:/bot_local/wechatbot/'
self.prob_threshold = 0.2
self.status = 0 # 0: 无状态, 'cyjl': 在玩成语接龙
self.cyjl = CYJLServer(abs_path=self.abs_path)
self.time_reporter = TimeReporter()
self.astrologist = Astrologist()
self.weather_forecaster = WeatherForecaster(abs_path=self.abs_path)
self.oj_agent = PyOJAgent.PyOJAgent()
self.oj_in_progress = False
self.latex_color_str = '000000'
self.gif_fg_color_str = '660874'
self.gif_bg_color_str = 'ffffff'
self.gif_generator = GIFGenerator(
fg_color='#' + self.gif_fg_color_str,
bg_color='#' + self.gif_bg_color_str)
self.latex_resolution = 600
self.latex_delegate = LaTeXDelegate()
self.sudoku_manager = SudokuManager()
self.function = '@我并发送【括号】里的内容就行噢!\n' \
'0. 【功能】\n' \
'1. 【报时】\n' \
'2. 【24点】或【48点】\n' \
'3. 【运行 code】运行python代码,例如【运行 [i*i for i in range(10)]】\n' \
'4. 【天气 城市】查询天气,例如【天气 北京】\n' \
'5. 【今日运势 星座】算命,例如【今日运势 狮子座】\n' \
'6. 【成语接龙】,例如【成语接龙】\n' \
'7. 【OJ 题号】 做OJ题目,例如 【OJ 2】\n' \
'8. 【提交OJ 代码】 提交OJ代码,例如【提交OJ def main_function(a, b): return a + b】\n' \
'9. 【latex 表达式】 由LaTeX表达式生成图片,例如【latex e^{i\\pi} + 1 = 0】\n' \
'A. 【latex颜色 6位色号(RRGGBB)】 修改LaTeX颜色,例如【颜色 114514】\n' \
'B. 【分辨率 数值】 修改LaTeX分辨率,例如【分辨率 600】\n' \
'C. 【gif 文本】 由文本生成gif图片,例如【gif 苟利国家生死以】\n'\
'D. 【gif前景色 6位色号(RRGGBB)】 修改gif前景色,例如【gif前景色 660874】\n' \
'E. 【数独】 显示当前数独题目,例如【数独】\n'\
'F. 【数独 重新出题】 出一道新数独题,例如【数独 重新出题】\n'\
'10. 【数独 rcn】 在当前数独第r行第c列填入数字n,行列范围均为1-9,例如【数独 123】\n'\
'11. 【数独 答案】 显示当前数独答案,例如【数独 答案】\n'
# 'E. 【gif背景色 6位色号(RRGGBB)】 修改gif背景色,例如【gif背景色 123456】\n'
def cyjl_helper(self, data, text):
userid = data['contact'].id
usernick = data['contact'].name
if text == '结束成语接龙': # 结束
reply = self.cyjl.end_game()
self.status = 0
return reply
elif text == '成语接龙': # 成语接龙
return '你是不是沙雕,我们已经在玩成语接龙了!当前是第%s个成语:\n【%s】' % (self.cyjl.index, self.cyjl.now)
elif text == '要看答案':
return self.cyjl.report_solutions()
elif len(text) > 0 and self.cyjl.data[self.cyjl.now][-1] in pinyin(text, style=Style.NORMAL)[0]: # 第一个字对上了
reply = self.cyjl.judge(text, userid, usernick)
if not self.cyjl.next_list: # 后面没法接
reply += '\n这成语没法接,算了,就玩到这吧!\n' + self.cyjl.end_game()
self.status = 0
else:
pass
return reply
else:
return '我们玩成语接龙呢,你少来插嘴没用的!'
@staticmethod
def parse_text(text):
main_arg = None
other_arg = None
segs = text.split()
if len(segs) > 0:
main_arg = segs[0]
if len(segs) > 1:
other_arg = segs[1]
else:
pass
else:
pass
return main_arg, other_arg
def handle(self, data):
if data['mentionSelf']:
text = data['text'].replace('\u2005', '')
text = text.replace('@青蛙', '')
text = text.lstrip()
main_arg, other_arg = self.parse_text(text)
if not main_arg:
return '不说话就别撩我!'
else:
pass
if self.status == 0 and main_arg == '成语接龙': # 结束
self.status = 'cyjl'
self.cyjl.start()
return '成语接龙开始!当前是第%s个成语:\n%s' % (self.cyjl.index, self.cyjl.now)
elif other_arg and main_arg == '修改模板':
self.time_reporter.set_template(other_arg)
return '修改成功!现在的模板是:\n' + self.time_reporter.template
elif main_arg == '功能':
return self.function
elif main_arg == '报时':
return self.time_reporter.gettext()
elif other_arg and (main_arg == '计算' or main_arg == '运行'):
expression = text[2:]
res = calculator.safe_calculate(expression.lstrip(), 2)
print(res)
return res
elif text == 'ping':
return str(data['age'])
elif text == '24点':
array_data = np.loadtxt(self.abs_path + 'resources/24p_1_9.txt')
ind = np.random.randint(low=0, high=len(list(array_data)))
choice = list(array_data)[ind]
choice = [int(c) for c in choice]
num_str_list = [str(c) for c in choice]
res = ' '.join(num_str_list)
return '4 numbers: ' + res
elif text == '48点':
array_data = np.loadtxt(self.abs_path + 'resources/48p_1_13.txt')
ind = np.random.randint(low=0, high=len(list(array_data)))
choice = list(array_data)[ind]
choice = [int(c) for c in choice]
num_str_list = [str(c) for c in choice]
res = ' '.join(num_str_list)
return '4 numbers: ' + res
# 今日星座运势功能
elif other_arg and main_arg == '今日运势':
return self.astrologist.get_data(other_arg)
elif len(text) >= 5 and text[:5] == '晚安小青蛙':
return '你是猪吗?还睡!别@我了,再@自杀!'
elif other_arg and main_arg == '天气':
return self.weather_forecaster.get_weather(other_arg)
elif other_arg and main_arg.lower() == 'oj':
try:
problem_id = int(other_arg)
except ValueError as e:
# print(repr(e))
return '请输入合法的题号!'
else:
oj_filename = self.abs_path + 'resources/OJ/Problems/Problem' + str(problem_id) + '.plm'
success = self.oj_agent.load_problem_file(oj_filename)
if not success:
return '田了!题库里没这题!'
else:
return self.oj_agent.describe_problem()
elif other_arg and main_arg.lower() == '提交oj':
if self.oj_in_progress:
return '上一个OJ还没测试完呢,先等会儿!急什么!'
else:
self.oj_in_progress = True
code = text[4:].lstrip()
self.oj_agent.test_submission(code)
self.oj_in_progress = False
return self.oj_agent.report_submission_result()
elif main_arg.lower() == 'latex':
expression = text[5:].lstrip()
filename = self.abs_path + 'resources/images/' + str(uuid.uuid1()) + '.png'
status, info = self.latex_delegate.latex2png(
expression=expression, local_file_name=filename,
resolution=self.latex_resolution, color_str=self.latex_color_str
)
if status:
return info, 'image'
else:
return info
elif main_arg == 'latex颜色':
if len(other_arg) == 6 and re.match(r'[0-9a-fA-F]{6}', other_arg):
self.latex_color_str = other_arg
return 'LaTeX颜色修改成功!当前颜色为:' + other_arg.lower()
else:
return '别胡闹,没这色儿!'
elif main_arg == '分辨率':
try:
res = int(other_arg)
if 1 <= res <= 1000:
self.latex_resolution = res
return '分辨率修改成功!当前分辨率为:{0}'.format(res)
else:
return '分辨率过大或过小!'
except Exception as e:
return '分辨率必须是正整数!'
elif main_arg == '数独':
filename = self.abs_path + 'resources/images/' + str(uuid.uuid1()) + '.jpg'
if not other_arg:
if not self.sudoku_manager.problem:
self.sudoku_manager.generate()
self.sudoku_manager.generate_image(filename=filename, option='problem')
else:
self.sudoku_manager.generate_image(filename=filename, option='user')
return filename, 'image'
elif other_arg == '重新出题':
self.sudoku_manager.generate()
self.sudoku_manager.generate_image(filename=filename, option='problem')
return filename, 'image'
elif other_arg == '答案':
if not self.sudoku_manager.problem:
return '题都没有,哪来的答案!'
else:
self.sudoku_manager.generate_image(filename=filename, option='answer')
return filename, 'image'
else:
if len(other_arg) == 3 and re.match(r'[1-9]{2}[0-9]{1}', other_arg):
row = int(other_arg[0]) - 1
col = int(other_arg[1]) - 1
num = int(other_arg[2])
result = self.sudoku_manager.user_fill(row, col, num)
if not result[0]:
return result[1]
else:
if result[1].lower() == 'Finished':
print('完成!')
else:
print('未完成!')
self.sudoku_manager.generate_image(filename=filename, option='user')
return filename, 'image'
else:
return '输入不合法!'
elif main_arg.lower() == 'gif前景色':
if len(other_arg) == 6 and re.match(r'[0-9a-fA-F]{6}', other_arg):
self.gif_fg_color_str = other_arg
self.gif_generator.fg_color = '#' + self.gif_fg_color_str
return 'gif前景色修改成功!当前颜色为:' + other_arg.lower()
else:
return '别胡闹,没这色儿!'
# 未正式上线的测试功能
elif main_arg.lower() == 'gif背景色':
if len(other_arg) == 6 and re.match(r'[0-9a-fA-F]{6}', other_arg):
self.gif_bg_color_str = other_arg
self.gif_generator.bg_color = '#' + self.gif_bg_color_str
return 'gif背景色修改成功!当前颜色为:' + other_arg.lower()
else:
return '别胡闹,没这色儿!'
elif text == '测试图片':
return self.abs_path + 'resources/images/testgif.gif', 'image'
elif len(text) >= 5 and text[:3] == 'gif':
content = text[4:]
filename = self.abs_path + 'resources/images/' + str(uuid.uuid1()) + '.gif'
self.gif_generator.create_gif(content, filename=filename)
return filename, 'image'
elif self.status == 'cyjl':
return self.cyjl_helper(data, text)
else:
if random.random() < self.prob_threshold:
return '本蛙懒得理你!'
else:
return None
else:
if random.random() < self.prob_threshold:
return '本蛙不说话就静静看你们聊天!'
else:
return None
```
#### File: frogWechaty/local_libs/PyOJAgent.py
```python
from RestrictedPython import compile_restricted
from RestrictedPython import Eval
from RestrictedPython import Guards
from RestrictedPython import safe_globals
from RestrictedPython import utility_builtins
from RestrictedPython.PrintCollector import PrintCollector
from multiprocessing import Process
from multiprocessing import Manager
import local_libs.ProblemFileHandler as Handler
import time
class PyOJAgent:
def __init__(self, memory_limit=1048576, time_limit=5):
self.name = 'default_agent'
self.memory_limit = memory_limit
self.time_limit = time_limit
self.submission_result = []
self.problem_dict = {}
self.compile_error_flag = False
self.compile_error_info = ''
self.problem_file_handler = Handler.ProblemFileHandler()
def load_problem_file(self, problem_file):
self.problem_dict = self.problem_file_handler.load_problem_file(problem_file)
if self.problem_dict:
return True
else:
return False
def test_submission(self, submission_code_str):
self.submission_result = []
self.compile_error_flag = False
if not self.problem_dict:
return
else:
pass
try:
compile_restricted(submission_code_str, '<inline>', 'exec')
except Exception as e:
self.compile_error_flag = True
self.compile_error_info = repr(e)
return
for test_case in self.problem_dict['test_cases']:
print('testing test case:', test_case, sep='\n')
suffix = '\noutput = main_function' + str(tuple(test_case[0]))
try:
manager = Manager()
py_code = submission_code_str + suffix
ret_dict = manager.dict()
p = Process(target=target_function, args=(py_code, ret_dict))
p.start()
time.sleep(self.time_limit)
p.terminate()
p.join()
if not ret_dict:
self.submission_result.append('服务器资源不足!')
return
else:
print('submission result: ', ret_dict['output'])
if ret_dict['RE_flag']:
self.submission_result.append('Runtime Error! ' + ret_dict['RE_info'])
elif ret_dict['TLE_flag']:
self.submission_result.append('Time Limit Exceeded! ')
elif ret_dict['output'] == test_case[1]:
self.submission_result.append('Accepted! ')
else:
self.submission_result.append('Wrong Answer! ') # add error types here maybe
except Exception as e:
print(repr(e))
def report_submission_result(self):
if self.compile_error_flag:
return "Compile Error!\n" + self.compile_error_info
elif not self.problem_dict:
return '未加载题目!'
elif not self.submission_result:
return 'No Report Available!'
else:
ret = ''
n = len(self.submission_result)
ret += '{0}组数据已测试,结果如下:\n'.format(n)
for i in range(n):
ret += '测试点{0}/{1}:'.format(i + 1, n)
ret += self.submission_result[i]
ret += '\n'
return ret
def describe_problem(self):
if not self.problem_dict:
return '未加载题目!'
else:
ret = '题目描述:\n'
ret += self.problem_dict['text']
ret += '\n========\n'
ret += '附加信息:\n'
ret += '本次测试时间限制:{0} s,内存限制:{1} KB\n'.format(self.time_limit, self.memory_limit)
return ret
def reset(self):
self.submission_result = []
self.problem_dict = {}
# this function has to be defined outside the PyOJAgent class for multiprocessing to pickle
def target_function(py_code, ret_dict):
policy_globals = generate_restricted_environment_policy()
policy_globals['output'] = None
ret_dict['RE_flag'] = False
ret_dict['RE_info'] = ''
ret_dict['TLE_flag'] = True
ret_dict['output'] = None
try:
byte_code = compile_restricted(py_code, '<inline>', 'exec')
exec(byte_code, policy_globals)
ret_dict['TLE_flag'] = False
ret_dict['output'] = policy_globals['output']
except Exception as e:
print(repr(e))
ret_dict['RE_flag'] = True # if RE, TLE flag would also be True
ret_dict['RE_info'] = repr(e)
finally:
pass
# print('finally')
def generate_restricted_environment_policy():
policy_globals = {**safe_globals, **utility_builtins}
policy_globals['__builtins__']['__metaclass__'] = type
policy_globals['__builtins__']['__name__'] = type
policy_globals['_getattr_'] = Guards.safer_getattr
policy_globals['_write_'] = Guards.full_write_guard
policy_globals['_getiter_'] = Eval.default_guarded_getiter
policy_globals['_getitem_'] = Eval.default_guarded_getitem
policy_globals['_print_'] = PrintCollector
policy_globals['_iter_unpack_sequence_'] = Guards.guarded_iter_unpack_sequence
return policy_globals
```
#### File: frogWechaty/local_libs/weather_forecaster.py
```python
import pandas as pd
import requests
from bs4 import BeautifulSoup
class WeatherForecaster:
def __init__(self, abs_path='/home/wechat/wechatbot/'):
self.abs_path = abs_path
self.source_file = self.abs_path + 'resources/weather_city_id.csv'
self.source_url = 'http://www.weather.com.cn/weather/'
def get_weather(self, city):
city_id = pd.read_csv(self.source_file, encoding='gbk')
li = list(city_id[city_id['城市'] == city]['citycode'])
if not li:
return '没这地儿!'
else:
str_id = str(li[0])
ui = self.source_url + str_id + '.shtml'
ri = requests.get(url=ui) # 访问页面
ri.encoding = ri.apparent_encoding # encoding
soup = BeautifulSoup(ri.text, 'html.parser')
#ul_tag = soup.find('ul', 't clearfix')
body1 = soup.body # 获取body部分
data = body1.find('div', {'id': '7d'}) # 找到id为7d的div
ul = data.find('ul') # 获取ul部分
li = ul.find_all('li') # 获取所有的li
final = city + '7日天气速递:\n'
for day in li: # 对每个li标签中的内容进行遍历
data = day.find('h1').string # 找到日期
temp = '【' + data + '】'
inf = day.find_all('p') # 找到li中的所有p标签
if inf[1].find('span') is None:
temperature_highest = '无' # 天气当中可能没有最高气温(傍晚)
else:
temperature_highest = inf[1].find('span').string # 找到最高气温
temperature_highest = temperature_highest
temperature_lowest = inf[1].find('i').string # 找到最低温
temperature_lowest = temperature_lowest # 最低温度后面有个℃,去掉这个符号
temp += inf[0].string + ' '
temp += '最高:' + str(temperature_highest) + ' '
temp += '最低:' + str(temperature_lowest) + '\n'
final += temp
return final
```
#### File: jcq15/frogWechaty/mingyuan.py
```python
def handle(data):
if data['contact'].name == 'tensorspace':
return ('闭嘴吧张量空间,本机器人看不下去了!', 'mention')
```
|
{
"source": "jcq15/mahjong",
"score": 2
}
|
#### File: jcq15/mahjong/hnk05_hupai.py
```python
import operator
from global_data import *
class HupaiCheck:
# 对一副具有14张牌的手牌的胡牌与否、胡牌拆分形式进行判断,拥有两个输入接口:self.tehai和self.numtehai,分别表示用数字字母表示的手牌和
# 转化成了方便处理的数字形式的手牌,33332拆解形式是递归,返回存储在了self.hupaiway中,是一个最多三层嵌套的列表。如果没胡是空列表,和了的话
# 最大列表的第一层列表是胡牌形式,里面的每一个列表是拆分成33332中的一份、或者七对子的一对,或者国士无双的所有内容。
# 下一个版本的改进:输入不一定要输入28个字符,即2m3m4m应该被允许输入成234m,支持东南西北白发中输入
# 突然发现的问题:没做吃碰杠,计划改成负数来
def __init__(self, *, tehai='', numtehai=[]):
self.tehai = tehai # 手牌,用mspz表示的
self.numtehai = numtehai # 手牌,用数字列表记录的东西
# 胡牌拆分完毕的嵌套列表,如果没胡是空列表,和了的话最大列表的第一层列表是胡牌形式,里面的每一个列表是拆分成
# 33332中的一份、或者七对子的一对,或者国士无双的所有内容
# 国士也是三层列表,这样格式统一,能省去一些田事
self.hupaiway = []
def hupai32_deal(self):
hupaidevide_unranked = [] # 胡牌分割,没有进行排序之前的部分
hupaidevide = [] # 胡牌处理过程的中间变量
def hupai(tehai, sorted_):
if len(tehai) == 2 and tehai[0] == tehai[1]:
hupaidevide_unranked.append(sorted_ + [tehai])
else:
for i in range(0, len(tehai)):
if tehai[i] + 1 in tehai and tehai[i] + 2 in tehai:
tehainext = tehai + []
i1 = tehainext.pop(i)
a = tehainext.index(tehai[i] + 1)
a1 = tehainext.pop(a)
b = tehainext.index(tehai[i] + 2)
b1 = tehainext.pop(b)
hupai(tehainext, sorted_ + [[i1, a1, b1]])
for i in range(0, len(tehai)):
if i + 2 < len(tehai) and tehai[i] == tehai[i + 1] and tehai[i] == tehai[i + 2]:
hupai(tehai[:i] + tehai[i + 3:], sorted_ + [tehai[i:i + 3]])
hupai(self.tehai, [])
for h in hupaidevide_unranked:
h.sort(key=operator.itemgetter(0), reverse=False)
for i in hupaidevide_unranked:
if i not in hupaidevide:
hupaidevide.append(i)
for i in hupaidevide:
for j in range(len(i)):
if len(i[j]) == 2:
i.append(i[j])
i.remove(i[j])
return hupaidevide
@staticmethod
def tehaitonumtehai(tehai, num=14):
numtehai = []
for i in range(1, num + 1):
numtehai.append(paitonum[tehai[:2]])
tehai = tehai[2:]
numtehai.sort()
return numtehai
# 如果不是七对,返回空列表
def qidui(self):
#qiduisymbol = True
ans = []
for i in range(0, 7):
if self.tehai[i * 2] != self.tehai[i * 2 + 1] or self.tehai[i * 2] == self.tehai[i * 2 - 1]:
pass
# qiduisymbol = False
else:
ans.append([tehai[i * 2], tehai[i * 2 + 1]])
#if qiduisymbol:
# return ans
return ans
def gsws(self):
gswslist = [1, 9, 11, 19, 21, 29, 31, 32, 34, 35, 37, 38, 40]
symbol = True
for i in gswslist:
if i not in self.tehai:
symbol = False
if symbol:
return [self.tehai]
else:
return []
def hupai_dealall(self):
self.hupaiway = self.hupai32_deal() + self.qidui() + self.gsws()
return self.hupaiway
def hupaiway_usersee(self):
if self.hupaiway != []:
for i in self.hupaiway:
print('胡牌方式有:')
for j in i:
for k in j:
print(numtopai[k], end='')
print(' ', end='')
print('\n')
if __name__ == '__main__':
pai = '1m1m1m2m3m4m5m6m7m8m9m9m9m1z'
hc = HupaiCheck(tehai=pai)
hc.hupai_dealall()
hc.hupaiway_usersee()
# pai.numtehai = [1,1,2,2,2,3,3,3,4,4,5,5,6,6]
#print(pai.hupai32_deal(pai.numtehai))
#print(pai.hupai_dealall(pai.numtehai))
#print(pai.hupaiway_usersee(pai.hupai_dealall(pai.numtehai)))
```
|
{
"source": "jcq15/wechatDevelop",
"score": 3
}
|
#### File: jcq15/wechatDevelop/handle.py
```python
import hashlib
import web
import reply
import receive
import requests
import json
import passwd #各种token
import time
import chrishat
import traceback
import cv2
import numpy as np
import gufeng
import newyear
from urllib import parse
class Handle(object):
welcome = ('感谢关注!\n0.发送help2019查看2019年会发生的事\n'
'1.直接发送消息即可调戏机器人\n'
'2.表情包制作:回复helpmake查看\n'
'3.古风歌词制作:发送gufeng随机生成古风歌词\n'
'4.后台代码已经托管到GitHub,回复\"code\"查看项目\n'
'5.公众号有很多有趣的推送,欢迎查看历史消息!\n'
'6.您可随时回复menu查看此消息\n'
'----------------\n'
'商业合作请联系<EMAIL>')
def POST(self):
try:
#if True:
webData = web.data()
print("Handle Post webdata is ", webData) #后台打日志
recMsg = receive.parse_xml(webData)
if isinstance(recMsg, receive.Msg):
openid = recMsg.FromUserName #用户
me = recMsg.ToUserName #我
# 文本消息
if recMsg.MsgType == 'text':
#如果是文本消息
receive_content = recMsg.Content.decode() #消息内容
# 文本处理
send_content = self.dealText(receive_content)
print("\nreceive: ", receive_content, '\nsend: ', send_content)
replyMsg = reply.TextMsg(openid, me, send_content)
return replyMsg.send()
# 图片
if recMsg.MsgType == 'image':
return success
'''
pic_url = recMsg.PicUrl + '.jpg'
response = requests.get(pic_url)
image = np.asarray(bytearray(response.content), dtype="uint8")
img = cv2.imdecode(image, cv2.IMREAD_COLOR)
# 读帽子图,做
hat_img = cv2.imread("chris/hat.png",-1)
output = chrishat.add_hat(img, hat_img)
# 存
timename = time.strftime("%Y%m%d%H%M%S", time.localtime())
cv2.imwrite("/assets/image/"+timename+".png", output)
cv2.destroyAllWindows()
replyMsg = reply.TextMsg(openid, me, send_content)
return replyMsg.send()
'''
# 事件
if recMsg.MsgType == 'event':
event = recMsg.Event
if event == 'subscribe': #如果是关注
print("有人关注了!")
replyMsg = reply.TextMsg(openid, me, Handle.welcome)
return replyMsg.send()
return "success"
else:
print("啥玩意啊")
return "success" #微信爸爸要求回复success,否则不依不饶
except Exception as Argment:
print(traceback.format_exc())
return "success"
# 一开始验证用
def GET(self):
try:
data = web.input()
if len(data) == 0:
return "hello, this is handle view"
signature = data.signature
timestamp = data.timestamp
nonce = data.nonce
echostr = data.echostr
token = passwd.check_token #按照公众号-基本配置中的自定义的token填写
list = [token, timestamp, nonce]
list.sort() #为啥要排序
sha1 = hashlib.sha1()
map(sha1.update, list)
hashcode = sha1.hexdigest()
print("handle/GET func: hashcode, signature: ", hashcode, signature)
if hashcode == signature:
return echostr
else:
return echostr
except Exception as Argument:
return Argument
# 表情包制作
def makegif(self, txt):
t = txt.split(' ')
# value第一项是模板里几句话,第二项是API的template name
makeDict = {
'makewjz':(4, 'wangjingze'),
'makejkl':(6, 'jinkela'),
'maketbs':(2, 'marmot'),
'makeqgwl':(6, 'dagong'),
'makewsyw':(9, 'sorry'),
}
try:
tmplt = makeDict[t[0]]
except KeyError as keyerr:
return ('输入错误!第一行只能是:'
'\nmakewjz\nmakejkl\nmaketbs\nmakeqgwl\nmakewsyw\n'
'可是你这个皮孩子竟然输入了') + txt
if len(t)-1 != tmplt[0]:
return '不对!应该输入%d句话,但你竟然输入了%d句话,我的天呐,笨死!%s'\
% (tmplt[0], len(t)-1, '要不然你回复helpmake查看一下说明吧!')
# 一会要post的东西
postDict = dict([(str(i), t[i+1]) for i in range(tmplt[0])])
response = requests.post(url = 'https://sorry.xuty.tk/api/%s/make' % tmplt[1],\
data = json.dumps(postDict))
if 200 == response.status_code:
res = bytes.decode(response.content)
r = ('制作成功!打开后点击下面的“访问原网页”或者复制链接用浏览器打开即可!\nhttps://sorry.xuty.tk') + res
return r
else:
return "API出现异常,请联系我查看情况!\n<EMAIL>"
# 机器人
def robot(self, txt):
url = 'http://api.qingyunke.com/api.php?key=free&appid=0&msg=%s' % txt
response = requests.post(url=url)
res_dic = json.loads(response.content.decode('utf-8'))
return res_dic['content']
# 处理文本,先检查是否是关键词,不是就发送到图灵
def dealText(self, txt):
if len(txt) >= 4 and txt[0:4] == 'make':
return self.makegif(txt)
if txt == 'helpmake':
return ('表情包制作功能,格式为:\n\n'
'makexxx 第1句 第2句 ... 第n句\n\n'
'每句之间用一个半角空格分割,将xxx替换为下列表情包名字即可:\n'
'wjz:王境泽,4句\njkl:金坷垃,6句\ntbs:土拨鼠,2句\n'
'qgwl:窃格瓦拉,6句\nwsyw:为所欲为,9句\n\n'
'示例:\n\nmakewjz 我就是饿死 死外边 不吃你们东西 真香')
if txt == 'code':
return '项目地址:https://github.com/jcq15/wechatDevelop'
if txt == 'menu':
return Handle.welcome
#if txt == 'helphat':
# return '直接发送图片即可制作,但必须有人脸。做完之后点击链接,再点下面的“访问原网页”。如果打开一片空白,说明没检测到人脸。'
if txt == 'gufeng':
return gufeng.getSentence(10)
if len(txt) >= 4 and txt[0:4] == '2019':
return newyear.makeNewYear(txt)
if txt == 'help2019':
return '发送“2019+你的名字”,例如“2019李华”,打开后点击下面的“访问原网页”保存即可!'
# 不是关键词,发送到机器人
return self.robot(txt)
```
#### File: jcq15/wechatDevelop/menu.py
```python
import requests
import passwd
import json
# 这个函数以后分离出来
def get_access_token():
url = 'https://api.weixin.qq.com/cgi-bin/token?grant_type=client_credential&appid=%s&secret=%s' % (passwd.appid, passwd.app_secret)
response = requests.get(url)
#print(response.content.decode('utf-8'))
res_dic = json.loads(response.content.decode('utf-8'))
return res_dic['access_token']
data = {
"button":[
{
"name":"药春女少",
"sub_button":[
{
"type":"view",
"name":"更换微信号之后",
"url":"https://mp.weixin.qq.com/s?__biz=MzIwOTgzOTI5MQ==&mid=2247484353"
},
]
},
{
"type":"click",
"name":"你是个啥",
"key":"menu"
},
{
"name":"冰开塞露",
"sub_button":[
{
"type":"view",
"name":"学霸谈996",
"url":"https://mp.weixin.qq.com/s/h3sibQFx_Lq-dZLV7vMgGw"
},
]
}
]
}
url = 'https://api.weixin.qq.com/cgi-bin/menu/create?access_token=%s' % get_access_token()
if __name__ == '__main__':
response = requests.post(url, data=json.dumps(data))
res_dic = json.loads(response.content.decode('utf-8'))
print(res_dic)
```
|
{
"source": "jcq15/wechaty",
"score": 3
}
|
#### File: wechaty/features/gif_manager.py
```python
from features.feature_manager import ReflectiveManager
from local_libs.gif_generator import GIFGenerator
import uuid
import re
class GIFManager(ReflectiveManager):
def __init__(self):
super().__init__()
self.gif_generator = GIFGenerator()
self.default_resolution = 600
self.default_fg_color = '660874'
self.default_bg_color = 'ffffff'
self.user_config = {'default': [self.default_fg_color, self.default_bg_color]}
self.gif_generator.set_fg(self.default_fg_color)
self.gif_generator.set_bg(self.default_bg_color)
def reflective_handle(self, data) -> list:
args, text = self.preprocess(data)
recipient = ReflectiveManager.get_source(data)
if not args or len(args) < 2:
return self.make_null_response()
else:
if args[0].lower() == 'gif':
filename = self.abs_path + 'resources/images/' + str(uuid.uuid1()) + '.gif'
fg_color, bg_color = self.user_config.get(recipient, self.user_config['default'])
self.gif_generator.set_fg(fg_color)
self.gif_generator.set_bg(bg_color)
self.gif_generator.create_gif(text, filename=filename)
return ReflectiveManager.reply_image(filename, data)
elif args[0].lower() == 'gif前景色':
if len(args[1]) == 6 and re.match(r'[0-9a-fA-F]{6}', args[1]):
args[1] = args[1].lower()
if recipient in self.user_config:
self.user_config[recipient][0] = args[1]
else:
self.user_config[recipient] = [args[1], self.default_bg_color]
msg = 'gif前景色修改成功!当前颜色为:' + args[1]
return self.reply_text(msg, data)
else:
msg = '别胡闹,没这色儿!'
return self.reply_text(msg, data, with_mention=True)
elif args[0].lower() == 'gif背景色': # temporarily hidden
if len(args[1]) == 6 and re.match(r'[0-9a-fA-F]{6}', args[1]):
args[1] = args[1].lower()
if recipient in self.user_config:
self.user_config[recipient][1] = args[1]
else:
self.user_config[recipient] = [self.default_fg_color, args[1]]
msg = 'gif背景色修改成功!当前颜色为:' + args[1]
return self.reply_text(msg, data)
else:
msg = '别胡闹,没这色儿!'
return self.reply_text(msg, data, with_mention=True)
else:
return self.make_null_response()
```
#### File: wechaty/features/twenty_four_manager.py
```python
from features.feature_manager import ReflectiveManager
from features.response_message import ResponseMessage
import numpy as np
class TwentyFourManager(ReflectiveManager):
def __init__(self):
super().__init__()
self.file24 = self.abs_path + 'resources/24p_1_9.txt'
self.file48 = self.abs_path + 'resources/48p_1_13.txt'
self.data24 = np.loadtxt(self.file24)
self.data48 = np.loadtxt(self.file48)
def reflective_handle(self, data) -> list:
args, _ = self.preprocess(data)
if not args:
return self.make_null_response()
else:
if args[0] == '24点':
ind = np.random.randint(low=0, high=len(list(self.data24)))
choice = list(self.data24)[ind]
choice = [int(c) for c in choice]
num_str_list = [str(c) for c in choice]
res = ' '.join(num_str_list)
text = '4 numbers: ' + res
return self.reply_text(text, data, with_mention=True)
elif args[0] == '48点':
ind = np.random.randint(low=0, high=len(list(self.data48)))
choice = list(self.data48)[ind]
choice = [int(c) for c in choice]
num_str_list = [str(c) for c in choice]
res = ' '.join(num_str_list)
text = '4 numbers: ' + res
return self.reply_text(text, data, with_mention=True)
else:
return self.make_null_response()
```
#### File: wechaty/local_libs/gif_generator.py
```python
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
import unicodedata
# 根据文本创建gif
class GIFGenerator:
def __init__(self, fg_color=(0, 0, 0), bg_color=(255, 255, 255)):
self.SELECTOR = '\ufe0f' # emoji selector
self.canvas_height = 1024
self.canvas_width = 1024
self.max_cols = 10
self.max_rows = 10
self.max_chars = 100
self.font_file = 'simhei'
# self.fontsize = 56
self.fontsize = 96
self.fg_color = fg_color
self.bg_color = bg_color
# self.font_file = 'resources/fonts/EmojiOneColor-SVGinOT.ttf'
def set_fg(self, color_str):
self.fg_color = '#' + color_str
def set_bg(self, color_str):
self.bg_color = '#' + color_str
def parse(self, txt): # return parsed text with \n added
output = ""
current_row = 1
current_col = 0 # width already occupied
char_count = 0
for ch in txt:
# what will happen AFTER ch is appended (not yet
if unicodedata.east_asian_width(ch) in ['W', 'F', 'A']:
current_width = 1
else:
current_width = 0.5
if ch == '\n':
current_row += 1
current_col = 0
elif ch == self.SELECTOR:
continue
else:
current_col += current_width
if current_col > self.max_cols:
output += '\n'
char_count += 1
current_row += 1
current_col = current_width
else:
pass
if current_row > self.max_rows:
print('Warning: canvas full. String may be trimmed.')
break
elif char_count > self.max_chars:
print('Warning: Memory limit reached. String may be trimmed.')
break
else:
output += ch
char_count += 1
return output
def create_gif(self, text, filename='test.gif', imgsize=None, duration=200, loop=0):
if not imgsize:
imgsize = self.canvas_width, self.canvas_height
else:
pass
img_seed = Image.new('RGB', imgsize, self.bg_color)
font = ImageFont.truetype(self.font_file, self.fontsize)
ctr = 0
images = [img_seed]
text = self.parse(text)
for ind in range(len(text)):
print('{0}/{1} frames'.format(ind + 1, len(text)))
if text[ind] != '\n':
frame = img_seed.copy()
d = ImageDraw.Draw(frame)
d.text((10, 10), text[: ind + 1], fill=self.fg_color, font=font)
ctr = ctr + 1
images.append(frame)
else:
continue
images[0].save(filename, save_all=True, append_images=images[1:],
optimize=True, duration=duration, loop=loop)
```
|
{
"source": "JcQSteven/wanballk",
"score": 3
}
|
#### File: JcQSteven/wanballk/main.py
```python
import colorsys
import cv2
import numpy as np
import collections
import os
import time
def canConnect(x1,y1,x2,y2,imageMatrix):
'''
:两个方块是否可以连通函数
'''
# 将传入的二维数组赋值给本地全局变量,
result = imageMatrix
# 如果有一个为0 直接返回False
if result[x1][y1] == 0 or result[x2][y2] == 0:
return False
if x1 == x2 and y1 == y2:
return False
if result[x1][y1] != result[x2][y2]:
return False
# 先判断横向可连通
if horizontalCheck(x1, y1, x2, y2,result):
return True
# 在判断纵向可连通
if verticalCheck(x1, y1, x2, y2,result):
return True
# 判断一个拐点的可连通情况
if turnOnceCheck(x1, y1, x2, y2,result):
return True
# 判断两个拐点的可连通情况
if turnTwiceCheck(x1, y1, x2, y2,result):
return True
# 都不可连通,返回false
return False
def horizontalCheck(x1,y1,x2,y2,result):
'''
:判断水平方向能够连通
'''
# 判断两个不是同一个方块
if x1 == x2 and y1 == y2:
return False
# 判断两个的纵坐标相同
if x1 != x2:
return False
startY = min(y1, y2)
endY = max(y1, y2)
# 判断两个方块是否相邻
if (endY - startY) == 1:
return True
# 判断两个方块通路上是否都是0,有一个不是,就说明不能联通,返回false
for i in range(startY+1,endY):
if result[x1][i] != 0:
return False
return True
def verticalCheck(x1,y1,x2,y2,result):
'''
:判断垂直方向能否连通
'''
# 判断不是同一个方块
if x1 == x2 and y1 == y2:
return False
# 判断两个横坐标相同
if y1 != y2:
return False
startX = min(x1, x2)
endX = max(x1, x2)
# 判断两个方块是否相邻
if (endX - startX) == 1:
return True
# 判断两方块儿通路上是否可连。
for i in range(startX+1,endX):
if result[i][y1] != 0:
return False
return True
def turnOnceCheck(x1,y1,x2,y2,result):
'''
:判断单拐点情况能否连通
'''
# 实现单拐点校验。
if x1 == x2 and y1 == y2:
return False
# 一个拐点,说明两个方块必须在不同行不同列!
if x1 != x2 and y1 != y2:
# cx cy dx dy 记录两个拐点的坐标
cx = x1
cy = y2
dx = x2
dy = y1
# 拐点为空,从第一个点到拐点并且从拐点到第二个点可通,则整条路可通。
if result[cx][cy] == 0:
if horizontalCheck(x1, y1, cx, cy,result) and verticalCheck(cx, cy, x2, y2,result):
return True
if result[dx][dy] == 0:
if verticalCheck(x1, y1, dx, dy,result) and horizontalCheck(dx, dy, x2, y2,result):
return True
return False
def turnTwiceCheck(x1,y1,x2,y2,result):
'''
:两个拐点的情况能否连通
'''
if x1 == x2 and y1 == y2:
return False
# 遍历整个数组找合适的拐点
for i in range(0,len(result)):
for j in range(0,len(result[1])):
# 不为空不能作为拐点
if result[i][j] != 0:
continue
# 不和被选方块在同一行列的
# 不能作为拐点
if i != x1 and i != x2 and j != y1 and j != y2:
continue
# 作为交点的部分也要过滤掉
if (i == x1 and j == y2) or (i == x2 and j == y1):
continue
if turnOnceCheck(x1, y1, i, j,result) and (horizontalCheck(i, j, x2, y2,result) or verticalCheck(i, j, x2, y2,result)):
return True
if turnOnceCheck(i, j, x2, y2,result) and (horizontalCheck(x1, y1, i, j,result) or verticalCheck(x1, y1, i, j,result)):
return True
return False
def getColorList():
dict = collections.defaultdict(list)
# 黑色
lower_black = np.array([0, 0, 0])
upper_black = np.array([180, 255, 46])
color_list = []
color_list.append(lower_black)
color_list.append(upper_black)
dict['black'] = color_list
# #灰色
# lower_gray = np.array([0, 0, 46])
# upper_gray = np.array([180, 43, 220])
# color_list = []
# color_list.append(lower_gray)
# color_list.append(upper_gray)
# dict['gray']=color_list
# 白色
lower_white = np.array([0, 0, 221])
upper_white = np.array([180, 30, 255])
color_list = []
color_list.append(lower_white)
color_list.append(upper_white)
dict['white'] = color_list
# 红色
lower_red = np.array([156, 43, 46])
upper_red = np.array([180, 255, 255])
color_list = []
color_list.append(lower_red)
color_list.append(upper_red)
dict['red'] = color_list
# 红色2
lower_red = np.array([0, 43, 46])
upper_red = np.array([10, 255, 255])
color_list = []
color_list.append(lower_red)
color_list.append(upper_red)
dict['red2'] = color_list
# 橙色
lower_orange = np.array([11, 43, 46])
upper_orange = np.array([19, 255, 255])
color_list = []
color_list.append(lower_orange)
color_list.append(upper_orange)
dict['orange'] = color_list
# 黄色
lower_yellow = np.array([20, 43, 46])
upper_yellow = np.array([34, 255, 255])
color_list = []
color_list.append(lower_yellow)
color_list.append(upper_yellow)
dict['yellow'] = color_list
# 绿色
lower_green = np.array([35, 43, 46])
upper_green = np.array([77, 255, 255])
color_list = []
color_list.append(lower_green)
color_list.append(upper_green)
dict['green'] = color_list
# 青色
lower_cyan = np.array([78, 43, 46])
upper_cyan = np.array([99, 255, 255])
color_list = []
color_list.append(lower_cyan)
color_list.append(upper_cyan)
dict['cyan'] = color_list
# 蓝色
lower_blue = np.array([100, 43, 46])
upper_blue = np.array([124, 255, 255])
color_list = []
color_list.append(lower_blue)
color_list.append(upper_blue)
dict['blue'] = color_list
# 紫色
lower_purple = np.array([125, 43, 46])
upper_purple = np.array([155, 255, 255])
color_list = []
color_list.append(lower_purple)
color_list.append(upper_purple)
dict['purple'] = color_list
return dict
def get_color(frame):
print('go in get_color')
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
maxsum = -100
color = None
color_dict = getColorList()
for d in color_dict:
mask = cv2.inRange(hsv, color_dict[d][0], color_dict[d][1])
# cv2.imwrite(d + '.jpg', mask)
binary = cv2.threshold(mask, 127, 255, cv2.THRESH_BINARY)[1]
binary = cv2.dilate(binary, None, iterations=2)
img, cnts, hiera = cv2.findContours(binary.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
sum = 0
for c in cnts:
sum += cv2.contourArea(c)
if sum > maxsum:
maxsum = sum
color = d
return color
def getImage():
os.system("adb shell /system/bin/screencap -p /sdcard/tmp/tupian.png")
os.system("adb pull /sdcard/tmp/tupian.png /Users/steven/Desktop/lianliankan/tupian.png")
img=cv2.imread('./tupian.png')
# print(img.shape)
cropImage=img[790:2207,45:1033]
cv2.imwrite("./cropImage.png", cropImage)
return cropImage
def getMatrix(cropImage):
#对单个方块进行切割
square_list=[]
lenX=130
blankX=13
lenY=130
blankY=13
numX=7 #列数
numY=10 #行数
count=0
types = [] # 记录颜色种类
tmp=[] #缓存队列
imageMatrix=[] #生成的矩阵
for y in range(numY):
for x in range(numX):
square=cropImage[(lenY+blankY)*y:blankY*y+lenY*(y+1),(lenX+blankX)*x:blankX*x+lenX*(x+1)]
cv2.imwrite("./tmp/{0}.png".format(count), square)
square_list.append(square)
count=count+1
#获取方块主体颜色种类并生成矩阵
count=0
for square in square_list:
print("tag:{0}".format(count))
count = count + 1
image_value = get_color(square)
tmp.append(image_value)
print(image_value)
if len(tmp) == numX:
imageMatrix.append(tmp)
tmp=[]
if image_value not in types:
types.append(image_value)
print("types:{0}".format(len(types)))
return imageMatrix
def removeSquare(imageMatrix):
for i in range(35):
autoRemove(imageMatrix)
def autoRemove(imageMatrix):
game_x=45
game_y=790
touchSize=65
TIME_INTERVAL=0.5
for i in range(0,len(imageMatrix)):
for j in range(0,len(imageMatrix[0])):
# 以上两个for循环,定位第一个选中点
if imageMatrix[i][j] != 0:
for m in range(0,len(imageMatrix)):
for n in range(0,len(imageMatrix[0])):
if imageMatrix[m][n] != 0:
# 后两个for循环定位第二个选中点
# 执行消除算法并返回
if canConnect(i,j,m,n,imageMatrix):
imageMatrix[i][j] = 0
imageMatrix[m][n] = 0
print('可消除点:'+ str(i+1) + ',' + str(j+1) + '和' + str(m+1) + ',' + str(n+1))
x1 = game_x + j*130+touchSize
y1 = game_y + i*130+touchSize
x2 = game_x + n*130+touchSize
y2 = game_y + m*130+touchSize
print("点击:({0},{1}),({2},{3})".format(x1,y1,x2,y2))
adbControl(x1,y1)
time.sleep(TIME_INTERVAL)
adbControl(x2,y2)
time.sleep(TIME_INTERVAL)
# win32api.SetCursorPos((x1 + 15,y1 + 18))
# win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, x1+15, y1+18, 0, 0)
# win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, x1+15, y1+18, 0, 0)
# time.sleep(TIME_INTERVAL)
#
# win32api.SetCursorPos((x2 + 15, y2 + 18))
# win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, x2 + 15, y2 + 18, 0, 0)
# win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, x2 + 15, y2 + 18, 0, 0)
# time.sleep(TIME_INTERVAL)
return True
return False
pass
def adbControl(x,y):
os.system("adb shell input tap {0} {1}".format(x,y))
if __name__ == '__main__':
cropImage=getImage()
imageMatrix=getMatrix(cropImage)
removeSquare(imageMatrix)
# print(imageMatrix)
```
|
{
"source": "JcQSteven/zjutmail",
"score": 3
}
|
#### File: JcQSteven/zjutmail/zjutmail.py
```python
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
import time
class BrowserTest:
browser = None
def getBrowser(self):
self.browser = webdriver.Chrome()
return self
def getWeb(self,url):
self.browser.get(url)
def getElement(self,element):
try:
wait = WebDriverWait(self.browser, 5)
ele = wait.until(lambda x: x.find_element_by_xpath(element))
except:
print("元素不存在")
raise
return ele
def setElement(self,element,param):
ele = self.getElement(element)
ele.clear()
ele.send_keys(param)
def clickElement(self,element):
ele = self.browser.find_element_by_xpath(element)
ele.click()
def close(self):
self.browser.close()
class FileObj:
route = None
def setRoute(self,route):
self.route = route
def Write(self,str):
with open(self.route,'a') as f:
f.write(str+'\n')
setPswd = '123qweQWE'
Outputfile = FileObj()
Outputfile.setRoute("/Users/jerry-pc/Desktop/1.out")
browser = BrowserTest()
count = -1
for count, f in enumerate(open('/Users/jerry-pc/Desktop/1.in', 'rU')):
user,pswd = f.strip('\n').split(",")
print(user+" "+pswd)
browser.getBrowser().getWeb(url="http://mail.zjut.edu.cn/")
browser.setElement('//input[@id="user"]',user)
browser.setElement('//input[@id="password"]',pswd)
browser.clickElement('//input[@id="submit"]')
try:
browser.setElement('//input[@id="oldpassword"]',"<PASSWORD>") #实际使用时修改为pswd
browser.setElement('//input[@id="newpassword"]',setPswd)
browser.setElement('//input[@id="repassword"]',setPswd)
browser.clickElement('//div[@class="layercontentbtn"]/input')
except:
Outputfile.Write("%s,%s,Error,%s"%(user,pswd,pswd))
else:
Outputfile.Write("%s,%s,Success,%s"%(user,pswd,setPswd))
browser.close()
count += 1
```
|
{
"source": "jcquinlan/colophon",
"score": 2
}
|
#### File: core/forms/edit_design_document_form.py
```python
import json
from django import forms
from core.models import DesignDocument, DesignDocumentImage, DesignDocumentPackage
class EditDesignDocumentForm(forms.ModelForm):
document_images = forms.CharField(widget=forms.HiddenInput(), required=False, label='')
asset_url = forms.CharField(widget=forms.HiddenInput(), required=False, label='')
document_images_to_remove = forms.CharField(
widget=forms.HiddenInput(),
required=False,
label=''
)
asset_url_to_remove = forms.CharField(widget=forms.HiddenInput(), required=False, label='')
class Meta:
model = DesignDocument
exclude = ['uploaded_by', 'created_at', 'has_download', 'has_assets',]
def is_valid(self):
valid = super(EditDesignDocumentForm, self).is_valid()
if not valid:
return valid
design_document = self.instance
removing_too_many_images = False
trying_to_remove_asset = False
document_images_to_remove = self.pull_and_parse_attr('document_images_to_remove', [])
asset_url = self.pull_and_parse_attr('asset_url', None)
asset_url_to_remove = self.pull_and_parse_attr('asset_url_to_remove', None)
# If the user is trying to remove all their images
if len(document_images_to_remove) >= len(design_document.images.all()):
removing_too_many_images = True
self.add_error(None, 'You must leave at least one image.')
# If the user is trying to remove an asset without also providing one
if asset_url_to_remove and not asset_url:
trying_to_remove_asset = True
self.add_error(None, 'You cannot remove an asset without providing a new one.')
if removing_too_many_images or trying_to_remove_asset:
return False
return valid
def save(self, request):
data = self.cleaned_data
design_document = super(EditDesignDocumentForm, self).save(commit=False)
# Mutably move the file urls from the cleaned data
file_urls = self.pull_and_parse_attr('document_images', [])
asset_url = self.pull_and_parse_attr('asset_url', None)
document_images_to_remove = self.pull_and_parse_attr('document_images_to_remove', [])
# Assign the user that created/edited the document
design_document.uploaded_by = request.user
# Save document package if available
if asset_url:
design_document_package = DesignDocumentPackage(
design_document=design_document,
package_url=asset_url
)
design_document.has_download = True
design_document.save()
design_document_package.save()
# Save document images if available
for url in file_urls:
design_document_image = DesignDocumentImage(
design_document=design_document,
image_url=url,
)
design_document_image.save()
design_document.images.add(design_document_image)
for image_id in document_images_to_remove:
design_document_images = DesignDocumentImage.objects.filter(
id=image_id,
design_document=design_document,
)
if design_document_images:
design_document_images.delete()
design_document.save()
return design_document
def pull_and_parse_attr(self, attr_name, default):
"""Pulls an attribute from the uncleaned data and parses it into a useable value"""
string_val = self.data.get(attr_name)
return json.loads(string_val) if string_val else default
```
#### File: core/templatetags/format_font_name_filter.py
```python
from django import template
from django.template.defaultfilters import stringfilter
register = template.Library()
@register.filter
@stringfilter
def format_font_name(value):
"""Formats a font name into the display version"""
return value \
.replace('_', ' ') \
.title()
```
#### File: views/design_documents/design_document_detail.py
```python
from django.views import View
from django.http import JsonResponse
from django.shortcuts import render, reverse, redirect, get_object_or_404
from core.models import DesignDocument, DesignDocumentPackage, UserDocumentFavorite
class DesignDocumentDetailView(View):
template_name = 'core/design-documents/design-document-detail.html'
def get(self, request, document_id):
design_document = get_object_or_404(DesignDocument, id=document_id)
document_package = None
if design_document.has_download:
document_package = DesignDocumentPackage.objects.get(design_document=design_document)
context = {
'document': design_document,
'document_package': document_package
}
return render(request, self.template_name, context)
def delete(self, request, document_id):
design_document = DesignDocument.objects.get(id=document_id)
if design_document:
design_document.delete()
return JsonResponse({'message': 'Document deleted'}, status=204)
else:
return JsonResponse({'message': 'Document not found'}, status=404)
```
#### File: views/design_documents/design_document_edit.py
```python
from django.views import View
from django.shortcuts import render, reverse, redirect, get_object_or_404
from django.contrib.auth.mixins import LoginRequiredMixin
from core.models import DesignDocument, DesignDocumentPackage
from core.forms.edit_design_document_form import EditDesignDocumentForm
class DesignDocumentEditView(LoginRequiredMixin, View):
template_name = 'core/design-documents/design-document-edit.html'
def get(self, request, document_id):
design_document = get_object_or_404(DesignDocument, id=document_id, uploaded_by=request.user)
design_document_form = EditDesignDocumentForm(instance=design_document)
document_package = None
if design_document.has_download:
document_package = DesignDocumentPackage.objects.get(design_document=design_document)
context = {
'document': design_document,
'design_document_form': design_document_form,
'document_package': document_package
}
return render(request, self.template_name, context)
def post(self, request, document_id):
design_document = get_object_or_404(DesignDocument, id=document_id, uploaded_by=request.user)
design_document_form = EditDesignDocumentForm(request.POST, instance=design_document)
document_package = None
if design_document_form.is_valid():
design_document_form.save(request)
if design_document.has_download:
document_package = DesignDocumentPackage \
.objects.get(design_document=design_document)
# Redirect to the detail page, with message param for successful edit
return redirect("%s?m=es" % reverse(
'design_document_detail',
args=(design_document.id,)
))
context = {
'document': design_document,
'design_document_form': design_document_form,
'document_package': document_package
}
return render(request, self.template_name, context)
```
|
{
"source": "jcrafts318/Project-Submission-Portal",
"score": 3
}
|
#### File: Project-Submission-Portal/python/network_lib.py
```python
import re
# Transport functions
def ConstructRequest(type, size, flags):
# PRE: type is a type of request; must be one that is laid out
# in the "options" variable of the receiving host
# size is the size of a buffer to be sent following the
# request, or 0 if no buffer is to be sent
# flags represents any special options for this request
# POST: FCTVAL == a string usable as a request to send across the network
return type + ":" + str(size) + ":" + str(flags)
def ValidateResponse(connection, request, type, size, flags):
# PRE: connection is an active socket connecting 2 hosts
# request is the request that has just been made
# type, size, and flags, are the arguments used to
# create the request
# a request has just been passed to the host on the other side of
# connection
# POST: if request is not equal to the request that would be constructed
# using type, size, and flags, the connection is closed
# NOTE: each request results in receiving the same request back from
# the other host, so this should only be used in SendRequest to validate
# that the request has been received successfully, as it will close the
# connection if not
if request != ConstructRequest(type, size, flags):
print "Connection error has occured. Closing Connection."
connection.close()
exit(3)
def SendRequest(connection, type, size, flags):
# PRE: connection is an active socket connecting 2 hosts
# type is a type of request; must be one that is laid out
# in the "options" variable of the receiving host
# size is the size of a buffer to be sent following the
# request, or 0 if no buffer is to be sent
# flags represents any special options for this request
# POST: FCTVAL == response from the other host
# if the response is valid, the other host is prepared to handle the request,
# o.w. the connection is closed
connection.send(ConstructRequest(type, size, flags))
response = connection.recv(1024)
ValidateResponse(connection, response, type, size, flags)
return response
def ReceiveRequest(connection):
# PRE: connection is an active socket connecting 2 hosts
# POST: FCTVAL == an array of size 3 containing type, size, and flags
# of the received request
request = connection.recv(1024)
connection.send(request)
return re.split(":", request)
def SendBuf(connection, size, buf):
# PRE: connection is an active socket connecting 2 hosts
# size is the size of the buffer to be sent
# buf is the buffer itself
# POST: the buffer is received by the other host
bytesSent = 0
while (bytesSent < size):
bytesSent += connection.send(buf[bytesSent:])
def ReceiveBuf(connection, size):
# PRE: connection is an active socket connecting 2 hosts
# size is the size of the buffer to be sent
# POST: FCTVAL == the data contained in the buffer sent by the other host
bytesRemaining = size
buf = ""
while (bytesRemaining > 0):
chunk = connection.recv(min(bytesRemaining, 1024))
bytesRemaining -= len(chunk)
buf += chunk
return buf
# Request Handling Functions
def ConfirmConnection(connection, size, flags):
# PRE: connection is an active socket connecting 2 hosts
# size is the size communicated by the preceding request
# flags are the flags communicated by the preceding request
# POST: FCTVAL == the data contained in the buffer sent by the other host
return "connected", 0
def GetEmail(connection, size, flags):
# PRE: connection is an active socket connecting 2 hosts
# size is the size communicated by the preceding request
# flags are the flags communicated by the preceding request
# POST: FCTVAL == a tuple containing the post-status of this function and
# email sent from the other host
email = ReceiveBuf(connection, size)
return "email_set", email
def GetFilename(connection, size, flags):
# PRE: connection is an active socket connecting 2 hosts
# size is the size communicated by the preceding request
# flags are the flags communicated by the preceding request
# POST: FCTVAL == a tuple containing the post-status of this function and
# filename sent from the other host
filename = ReceiveBuf(connection, size)
return "filename_set", filename
def GetProject(connection, size, flags):
# PRE: connection is an active socket connecting 2 hosts
# size is the size communicated by the preceding request
# flags are the flags communicated by the preceding request
# POST: FCTVAL == a tuple containing the post-status of this function and
# tar sent from the other host
tar = ReceiveBuf(connection, size)
return "project_set", tar
def GetStatus(connection, size, flags):
# PRE: connection is an active socket connecting 2 hosts
# size is the size communicated by the preceding request
# flags are the flags communicated by the preceding request
# POST: FCTVAL == a tuple containing the post-status of this function and
# status of this thread
print "status check"
status = 0
return "status_check", status
def CloseConnection(connection, size, flags):
# PRE: connection is an active socket connecting 2 hosts
# size is the size communicated by the preceding request
# flags are the flags communicated by the preceding request
# POST: FCTVAL == a tuple containing the post-status of this function and
# 0 as the result
return "close", 0
```
|
{
"source": "jcrafts/dragon-radar",
"score": 2
}
|
#### File: jcrafts/dragon-radar/auto_detect.py
```python
import os
import re
import sys
import logging
import constants
import subprocess
from utils import load_autodetect, save_autodetect, load_episode_disc_data
APP_NAME = constants.APP_NAME
logger = logging.getLogger(APP_NAME)
def _detect_ifo(directory):
files = os.listdir(directory)
max_vobs = 0
ifo_file = ''
for f in files:
fname, ext = os.path.splitext(f)
if fname.upper().startswith('VTS') and ext.upper() == '.IFO':
ifo_base = fname.rsplit('_', 1)[0]
vobs = list(
filter(
lambda v: v != f and v.startswith(ifo_base),
files)
)
if len(vobs) > max_vobs:
max_vobs = len(vobs)
ifo_file = f
return ifo_file
def auto_detect(episode):
'''
Run PGCDemux for each VOB ID and check the logfile
Also save this information for later use
'''
demux_map = load_autodetect(episode.number)
if demux_map:
return demux_map
logger.info('Auto-detecting demux information for '
'R1 DBox episode %s...', episode.number)
disc_name = episode.demux_map['R1_DBOX']['disc']
logger.debug('Disc: %s', disc_name)
auto_demux_map = {}
disc_dir = os.path.join(episode.src_dir_top,
episode.series,
'R1_DBOX',
disc_name,
'VIDEO_TS')
# get list of episodes that are on the current disc
m = re.match(r'DRAGON_BOX_S(\d)_D(\d)',
disc_name)
disc_ep_range = load_episode_disc_data('DBoxZ', m.group(1), m.group(2))
disc_eps = range(disc_ep_range[0], disc_ep_range[1] + 1)
ifo_file = _detect_ifo(disc_dir)
ifo_file_abspath = os.path.join(disc_dir, ifo_file)
vts = int(re.match(r'VTS_(\d\d)_0\.IFO', ifo_file.upper()).group(1))
logger.debug('Using detected IFO file %s', ifo_file_abspath)
eps_detected = 0
for v in range(1, 100):
# only generate logfile
if eps_detected == len(disc_eps):
break
args = [episode.pgcdemux, '-vid', str(v),
'-nom2v', '-noaud', '-nosub',
'-nocellt', '-log', '-nogui',
ifo_file_abspath, episode.temp_dir]
subprocess.call(args)
logfile = os.path.join(episode.temp_dir, 'LogFile.txt')
with open(logfile) as file_:
for line in file_:
if line.startswith('Number of Cells in Selected VOB='):
num_cells = int(line.strip().split('=')[1])
# at least 5 cells per episode
if num_cells >= 5:
episode_num = str(disc_eps[eps_detected]).zfill(3)
logger.debug('Detected episode: %s', episode_num)
auto_demux_map[episode_num] = {
'audio': ['en', 'jp'],
'cells': None,
'disc': disc_name,
'pgc': 1,
'type': 'vid',
'vid': [v],
'vts': vts
}
eps_detected = eps_detected + 1
break
if disc_name == 'DRAGON_BOX_S6_D3':
# weird offset of episodes on this disc
vids = {k: m['vid'][0] for (k, m) in auto_demux_map.items()}
for e in disc_eps:
if e < 230:
cur = str(e)
nxt = str(e+1)
auto_demux_map[nxt]['vid'][0] = vids[cur]
auto_demux_map['224']['vid'][0] = vids['230']
logger.debug('Saving detected information for disc %s.', disc_name)
save_autodetect(auto_demux_map)
return auto_demux_map[episode.number]
```
#### File: jcrafts/dragon-radar/__main__.py
```python
import os
import sys
import argparse
import configparser
import tempfile
import logging
import atexit
import colorama
import time
import constants
from episode import Episode
from utils import (get_op_offset,
pad_zeroes,
load_validate,
delete_temp,
create_dir)
from subtitle import detect_streams
WELCOME_MSG = constants.WELCOME_MSG
CONF_FILE = constants.CONF_FILE
APP_NAME = constants.APP_NAME
LOG_FILE = constants.LOG_FILE
logger = logging.getLogger(APP_NAME)
def load_config_file():
'''
Load config from dragon-radar.ini
'''
config = configparser.ConfigParser()
try:
config.read(CONF_FILE)
except configparser.Error:
pass
try:
config.add_section(APP_NAME)
except configparser.Error:
pass
return config
def create_args():
'''
Set up command line options
'''
parser = argparse.ArgumentParser(description='Create multiplexed Dragon '
'Ball MKVs with Dragon Box '
'video, and English audio '
'and subtitles.')
parser.add_argument('--series',
metavar='<series>',
help='Choose a series [DB, DBZ, DBGT]',
required=True)
episode_group = parser.add_mutually_exclusive_group(required=True)
episode_group.add_argument('--episode',
metavar='<number>',
help='Episode to process. '
'Can also be used with a range, i.e. '
'--episode <first>:<last>')
episode_group.add_argument('--movie',
metavar='<number>',
help='Movie to process. '
'Can also be used with a range, i.e. '
'--movie <first>:<last>')
parser.add_argument('--verbose',
action='store_true',
default=False,
help='More descriptive output')
# for Z, get R1 assets from Dragon Box
parser.add_argument('--r1-dbox',
action='store_true',
default=False,
help='For DBZ, use the audio and subtitle assets '
'from the Funimation Dragon Box')
# the first 3 Z movies, get R1 assets from Pioneer DVDs
parser.add_argument('--pioneer',
action='store_true',
default=False,
help='For the first 3 DBZ movies, use the audio and '
'subtitle assets from the Pioneer DVDs.')
# don't use Funimation Remastered DVDs for the first 3 movies
parser.add_argument('--no-funi',
action='store_true',
default=False,
help='Use in conjunction with --pioneer to ignore '
'assets from the Funimation remastered DVDs.')
# shh, hidden options for debug use only
# skip demux
parser.add_argument('--no-demux',
action='store_true',
default=False,
help=argparse.SUPPRESS)
# save demuxed files to destination directory
parser.add_argument('--no-mux',
action='store_true',
default=False,
help=argparse.SUPPRESS)
# skip retiming
parser.add_argument('--no-retime',
action='store_true',
default=False,
help=argparse.SUPPRESS)
# only demux subtitles
parser.add_argument('--sub-only',
action='store_true',
default=False,
help=argparse.SUPPRESS)
# create AVIsynth after demux
parser.add_argument('--make-avs',
action='store_true',
default=False,
help=argparse.SUPPRESS)
# demux r1 video in addition to audio/subs
parser.add_argument('--r1-vid',
action='store_true',
default=False,
help=argparse.SUPPRESS)
return parser
def init_logging(verbose):
level = logging.INFO
if verbose:
level = logging.DEBUG
logger.setLevel(logging.DEBUG)
stdout_handler = logging.StreamHandler()
stdout_handler.setLevel(level)
file_handler = logging.FileHandler(LOG_FILE, mode='w')
file_handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
file_handler.setFormatter(formatter)
logger.addHandler(stdout_handler)
logger.addHandler(file_handler)
def pre_check(args, config):
'''
Make sure directories are correct
and required programs are installed
'''
def exe_check(name, isfile=False):
try:
path = config.get(APP_NAME, name.lower())
except configparser.Error:
logger.error('Path to %s is not defined in dragon-radar.ini', name)
return True
logger.debug('%s path: %s', name, path)
if isfile and not os.path.isfile(path):
logger.error('Path to %s \"%s\" is invalid.', name, path)
return True
return False
logger.debug('Performing pre-check...')
bad_conf = False
bad_conf = bad_conf or exe_check('PGCDemux', True)
bad_conf = bad_conf or exe_check('VSRip', True)
bad_conf = bad_conf or exe_check('DelayCut', True)
bad_conf = bad_conf or exe_check('ReStream', True)
bad_conf = bad_conf or exe_check('source_dir')
bad_conf = bad_conf or exe_check('output_dir')
if (args.series == 'DB' and args.episode in [26, 41] or
args.series == 'DBZ' and args.episode == 24 or
args.make_avs):
# complex demux or avs generation, DGIndex required
bad_conf = bad_conf or exe_check('DGIndex', True)
if not args.no_mux:
bad_conf = bad_conf or exe_check('mkvmerge', True)
if bad_conf:
sys.exit(1)
else:
logger.debug('Pre-check finished.')
def bad_arg_exit(arg):
logger.error('Bad argument for --%s', arg)
sys.exit(1)
def bad_combos(args):
msg = 'Bad combination: '
for a in args:
msg += '--' + a + ' '
if args.index(a) < len(args) - 1:
msg += ', '
logger.error(msg)
sys.exit(1)
def split_args(argtype, arg):
'''
Split argument into start/end
'''
spread = arg.split(':', 1)
try:
start = int(spread[0])
end = int(spread[1])
except ValueError:
bad_arg_exit(argtype)
except IndexError:
logger.debug('No end %s specified.', argtype)
end = start
return start, end
def validate_args(args):
'''
Validate all arguments
'''
# series/episode checks
start = 0
end = 0
special = None
if args.series not in ['DB', 'DBZ', 'DBGT']:
bad_arg_exit('series')
valid = load_validate(args.series)
if args.episode:
argtype = 'episode'
if args.series == 'DBZ':
if args.episode in ['bardock', 'trunks']:
special = args.episode
elif 'bardock' in args.episode or 'trunks' in args.episode:
logger.error('Please run --episode bardock or '
'--episode trunks on their own.')
sys.exit(1)
if args.series == 'DBGT':
if args.episode == 'special':
special = args.episode
elif 'special' in args.episode:
logger.error('Please run --episode special on its own.')
sys.exit(1)
if not special:
start, end = split_args('episode', args.episode)
elif args.movie:
argtype = 'movie'
start, end = split_args('movie', args.movie)
if not special and not all((a - 1) in range(
valid[argtype]) for a in (start, end)):
bad_arg_exit(argtype)
# contradictory arguments
if args.r1_dbox and args.series != 'DBZ':
logger.error('--r1-dbox can only be used with --series DBZ')
sys.exit(1)
if args.movie and args.r1_dbox:
logger.error('Bad combination --movie and --r1-dbox')
sys.exit(1)
if not args.movie and args.pioneer:
logger.error('--pioneer can only be used with --movie')
sys.exit(1)
if not args.pioneer and args.no_funi:
logger.error('--no-funi can only be used with --pioneer')
sys.exit(1)
return start, end, special
def main():
colorama.init()
config = load_config_file()
args, wtf = create_args().parse_known_args()
if (wtf):
logger.error('Unknown argument %s', wtf[0])
sys.exit(1)
init_logging(args.verbose)
# don't proceed if paths aren't right/programs missing
pre_check(args, config)
try:
working_dir = config.get(APP_NAME, 'working_dir')
except configparser.Error:
working_dir = None
if working_dir:
if not os.path.isdir(working_dir):
create_dir(working_dir)
tempfile.tempdir = working_dir
tmp_dir = tempfile.mkdtemp()
logger.debug('Episode temp folder: %s', tmp_dir)
atexit.register(delete_temp, tmp_dir)
start, end, special = validate_args(args)
print(WELCOME_MSG)
for ep in range(start, end + 1):
start_time = time.clock()
episode = Episode(ep, config, args, tmp_dir, special)
if not args.no_demux:
episode.demux()
else:
if args.sub_only:
detect_streams(os.path.join(config.get(APP_NAME, 'output_dir'),
args.series,
str(ep if not special else special).zfill(3),
'R1', 'Subtitle.idx'))
if not args.no_retime:
episode.retime_subs()
episode.retime_audio()
if not args.no_demux and args.no_mux:
# move files to destination folder
episode.move_demuxed_files()
if not args.no_mux:
episode.mux()
if args.make_avs:
# only works on files generated with --no-mux
episode.make_avs()
delete_temp(episode.temp_dir)
elapsed = time.clock() - start_time
logger.debug('Elapsed time: %s seconds', elapsed)
logger.info('Finished!')
if __name__ == "__main__":
try:
main()
except EOFError:
# cancel the input in the input prompt
logger.error('Aborting.')
sys.exit(1)
except KeyboardInterrupt:
logger.error('Aborting.')
sys.exit(1)
```
|
{
"source": "jcraig0/MiniAlphaZero",
"score": 4
}
|
#### File: jcraig0/MiniAlphaZero/othello.py
```python
class OthelloBoard():
def __init__(self):
self.grid = [[' '] * 8 for i in range(8)]
self.grid[3][4] = self.grid[4][3] = 'X'
self.grid[3][3] = self.grid[4][4] = 'O'
self.turn = True
self.winner = ''
self.pieces = ('O', 'X')
self.directions = ((-1, -1), (-1, 0), (-1, 1), (0, 1),
(1, 1), (1, 0), (1, -1), (0, -1))
self.legal_moves = self.get_legal_moves(self.turn)
self.num_total_moves = 65
self.size = 8
def get_legal_moves(self, turn):
moves = []
for i in range(8):
for j in range(8):
if self.grid[i][j] == self.pieces[turn]:
for dir in self.directions:
span = 1
while True:
new_i, new_j = i+dir[0]*span, j+dir[1]*span
if new_i < 0 or new_i > 7 or \
new_j < 0 or new_j > 7:
break
space = self.grid[new_i][new_j]
# If opponent's line of pieces continues
if space == self.pieces[1-turn]:
span += 1
else:
# If current player can place piece
# at end of the line
if space == ' ' and span != 1:
moves.append(new_i * 8 + new_j)
break
return set(moves) if moves else [64]
def is_game_over(self):
if self.legal_moves == [64] and \
self.get_legal_moves(not self.turn) == [64]:
num_X = sum([i == 'X' for row in self.grid for i in row])
num_O = sum([i == 'O' for row in self.grid for i in row])
if num_X > num_O:
self.winner = '1-0'
elif num_O > num_X:
self.winner = '0-1'
else:
self.winner = '1/2-1/2'
return True
def flip_pieces(self, i, j):
for dir in self.directions:
places = []
span = 1
while True:
new_i, new_j = i+dir[0]*span, j+dir[1]*span
if new_i < 0 or new_i > 7 or new_j < 0 or new_j > 7:
break
# If opponent's line of pieces continues
if self.grid[new_i][new_j] == self.pieces[1-self.turn]:
places.append((new_i, new_j))
span += 1
else:
# If current player's piece at end of the line
if self.grid[new_i][new_j] == self.pieces[self.turn]:
for place in places:
self.grid[place[0]][place[1]] = \
self.pieces[self.turn]
break
def push(self, move):
if move != 64:
i, j = move // 8, move % 8
self.grid[i][j] = self.pieces[self.turn]
self.flip_pieces(i, j)
self.turn = False if self.turn else True
self.legal_moves = self.get_legal_moves(self.turn)
def __str__(self):
return '\n' + '\n'.join(
[' '.join(['_' if i == ' ' else i for i in row])
for row in self.grid]) + '\n'
```
|
{
"source": "jcraig5/Pippin",
"score": 2
}
|
#### File: Pippin/pippin/config.py
```python
import configparser
import inspect
import os
import logging
import hashlib
import shutil
import os
import shutil
import stat
def singleton(fn):
instance = None
def get(*args, **kwargs):
nonlocal instance
if instance is None:
instance = fn(*args, **kwargs)
return instance
return get
@singleton
def get_config():
filename = os.path.abspath(os.path.dirname(inspect.stack()[0][1]) + "/../cfg.ini")
config = configparser.ConfigParser()
config.read(filename)
return config
def get_output_dir():
output_dir = get_config()['OUTPUT']['output_dir']
if output_dir.startswith("$"):
output_dir = os.path.expandvars(output_dir)
elif not output_dir.startswith("/"):
output_dir = os.path.abspath(os.path.dirname(inspect.stack()[0][1]) + "/../" + output_dir)
return output_dir
def get_output_loc(path):
if "$" in path:
path = os.path.expandvars(path)
if path.startswith("/"):
return path
else:
return os.path.join(get_output_dir(), path)
def get_hash(input_string):
return hashlib.sha256(input_string.encode('utf-8')).hexdigest()
@singleton
def get_logger():
return logging.getLogger("pippin")
def mkdirs(path):
if not os.path.exists(path):
os.makedirs(path, exist_ok=True, mode=0o775)
chown_dir(path)
def copytree(src, dst, symlinks=False, ignore=None):
lst = os.listdir(src)
if ignore:
excl = ignore(src, lst)
lst = [x for x in lst if x not in excl]
for item in lst:
s = os.path.join(src, item)
d = os.path.join(dst, item)
if symlinks and os.path.islink(s):
if os.path.lexists(d):
os.remove(d)
os.symlink(os.readlink(s), d)
try:
st = os.lstat(s)
mode = stat.S_IMODE(st.st_mode)
os.lchmod(d, mode)
except:
pass # lchmod not available
elif os.path.isdir(s):
copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
def chown_dir(directory):
global_config = get_config()
logger = get_logger()
try:
shutil.chown(directory, group=global_config["SNANA"]["group"])
except Exception as e:
logger.debug(str(e))
return
for root, dirs, files in os.walk(directory):
for d in dirs:
try:
shutil.chown(os.path.join(root, d), group=global_config["SNANA"]["group"])
except Exception:
logger.debug(f"Chown error: {os.path.join(root, d)}")
for f in files:
try:
shutil.chown(os.path.join(root, f), group=global_config["SNANA"]["group"])
except Exception:
logger.debug(f"Chown error: {os.path.join(root, f)}")
if __name__ == "__main__":
c = get_config()
print(c.sections())
print(c.get("SNANA", "sim_dir"))
print(c["OUTPUT"].getint("ping_frequency"))
```
#### File: Pippin/pippin/dataprep.py
```python
import shutil
import subprocess
import os
from pippin.config import mkdirs, get_output_loc, get_config
from pippin.task import Task
class DataPrep(Task): # TODO: Define the location of the output so we can run the lc fitting on it.
""" Smack the data into something that looks like the simulated data
"""
def __init__(self, name, output_dir, options, dependencies=None):
super().__init__(name, output_dir, dependencies=dependencies)
self.options = options
self.global_config = get_config()
self.logfile = os.path.join(self.output_dir, "output.log")
self.conda_env = self.global_config["DataSkimmer"]["conda_env"]
self.path_to_task = output_dir
self.raw_dir = self.options.get("RAW_DIR")
self.genversion = os.path.basename(self.raw_dir)
self.data_path = os.path.dirname(self.raw_dir)
self.job_name = f"DATAPREP_{self.name}"
self.output["genversion"] = self.genversion
self.output["data_path"] = self.data_path
self.output["photometry_dir"] = get_output_loc(self.raw_dir)
self.output["raw_dir"] = self.raw_dir
self.clump_file = os.path.join(self.output_dir, self.genversion + ".SNANA.TEXT")
self.output["clump_file"] = self.clump_file
self.slurm = """#!/bin/bash
#SBATCH --job-name={job_name}
#SBATCH --time=0:20:00
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=1
#SBATCH --partition=broadwl
#SBATCH --output={log_file}
#SBATCH --account=pi-rkessler
#SBATCH --mem=2GB
cd {path_to_task}
snana.exe clump.nml
if [ $? -eq 0 ]; then
echo SUCCESS > {done_file}
else
echo FAILURE > {done_file}
fi
"""
self.clump_command = """#
# Obtaining Clump fit
# to run:
# snana.exe SNFIT_clump.nml
# outputs csv file with space delimiters
&SNLCINP
! For SNN-integration:
OPT_SETPKMJD = 16
SNTABLE_LIST = 'SNANA(text:key)'
TEXTFILE_PREFIX = '{genversion}'
! data
PRIVATE_DATA_PATH = '{data_path}'
VERSION_PHOTOMETRY = '{genversion}'
PHOTFLAG_MSKREJ = 1016 !PHOTFLAG eliminate epoch that has errors, not LC
&END
"""
def _get_types(self):
self.logger.warning("Data does not report types, let's hope the defaults are up to date!")
return None
def _check_completion(self, squeue):
if os.path.exists(self.done_file):
self.logger.debug(f"Done file found at f{self.done_file}")
with open(self.done_file) as f:
if "FAILURE" in f.read():
self.logger.info(f"Done file reported failure. Check output log {self.logfile}")
return Task.FINISHED_FAILURE
else:
self.output["types"] = self._get_types()
return Task.FINISHED_SUCCESS
return 1 # The number of CPUs being utilised
def _run(self, force_refresh):
command_string = self.clump_command.format(genversion=self.genversion, data_path=self.data_path)
format_dict = {
"job_name": self.job_name,
"log_file": self.logfile,
"path_to_task": self.path_to_task,
"done_file": self.done_file
}
final_slurm = self.slurm.format(**format_dict)
new_hash = self.get_hash_from_string(command_string + final_slurm)
old_hash = self.get_old_hash()
if force_refresh or new_hash != old_hash:
self.logger.debug("Regenerating and launching task")
shutil.rmtree(self.output_dir, ignore_errors=True)
mkdirs(self.output_dir)
self.save_new_hash(new_hash)
slurm_output_file = os.path.join(self.output_dir, "slurm.job")
clump_file = os.path.join(self.output_dir, "clump.nml")
with open(slurm_output_file, "w") as f:
f.write(final_slurm)
with open(clump_file, "w") as f:
f.write(command_string)
self.logger.info(f"Submitting batch job for data prep")
subprocess.run(["sbatch", slurm_output_file], cwd=self.output_dir)
else:
self.logger.info("Hash check passed, not rerunning")
return True
```
|
{
"source": "jcramer/bchscript",
"score": 3
}
|
#### File: bchscript/bchscript/bchprimitives.py
```python
import pdb
import copy
import bchscript.bchopcodes as bchopcodes
statementConsumerCallout = None
def SetStatementConsumerCallout(t):
global statementConsumerCallout
statementConsumerCallout = t
def compileStatementList(statements, symbols):
ret = []
for s in statements:
if type(s) in [int, str, bytes]:
ret.append(s)
else:
ret.extend(s.compile(symbols))
return ret
def compileParamsList(params, symbols):
ret = []
if params is None:
return [] # parameter is not yet bound (passing a macro)
for s in params:
if type(s) is int:
ret.append(s)
elif type(s) is str:
ret.append(s)
else:
temp = s.compile(symbols)
if type(temp) == list:
ret.extend(temp)
else:
ret.append(temp)
return ret
def isInt(obj):
try:
i = int(obj) # Push a number onto the stack
return True
except:
pass
return False
def paramsConsumer(tokens, n):
args = {}
assert tokens[n] == "("
n += 1
count = 0
while tokens[n] != ")":
args[tokens[n]] = Param(tokens[n]) # store by name
args[count] = tokens[n] # store by order
n += 1
count += 1
if tokens[n] == ",":
n += 1
n += 1
return (n, args)
def argsConsumer(tokens, n, validSymbols):
args = []
arg = []
assert tokens[n] == "("
n += 1
while tokens[n] != ")":
if isInt(tokens[n]):
arg.append(int(tokens[n]))
n += 1
elif tokens[n][0] == '"': # Push data onto the stack
arg.append(tokens[n])
n += 1
elif tokens[n] in validSymbols:
(n, obj) = validSymbols[tokens[n]].parse(tokens, n + 1, validSymbols)
arg.append(obj)
else:
if tokens[n] == ")":
continue
pdb.set_trace()
raise Exception("invalid symbol: %s" % tokens[n])
if tokens[n] == ",":
if len(arg) > 1:
args.append(arg)
else:
args.extend(arg)
arg = []
n += 1
if len(arg) > 1:
args.append(arg)
else:
args.extend(arg)
n += 1
return (n, args)
class Binding:
"""Connects a macro definition with the arguments of a particular invocation
"""
def __init__(self, name, parserFn=None):
self.name = name
self.invocation = None
self.outputs = None
self.parserFn = parserFn
self.instanceOf = None
def matchArgs(self, bindings):
"""Match passed arguments with their definitions to produce a dictionary of { symbol: binding } pairs.
Since this is itself a binding, apply both this binding & the outer binding to the definition to create the final
argument binding
"""
if self.invocation:
invocation = copy.copy(self.invocation)
else:
invocation = {}
if bindings:
invocation += bindings
return self.instanceOf.matchArgs(invocation)
def compile(self, symbols=None):
"""Compile into a list of statements, given an input substitution (symbol) table"""
if self.instanceOf is None: # name has no binding,
self.instanceOf = symbols[self.name] # so find it
if symbols:
syms = copy.copy(symbols)
else:
syms = {}
cinv = compileParamsList(self.invocation, symbols) # Compile all the args
syms.update(self.instanceOf.matchArgs(cinv))
ret = self.instanceOf.compile(syms)
return ret
def parse(self, tokens, n, symbols=None):
"""
could be a basic variable or an invocation
"""
if self.parserFn:
return self.parserFn(self, tokens, n, symbols)
else:
params = None
if tokens[n] == "(":
params = []
(n, p) = argsConsumer(tokens, n, symbols)
params += p
if n < len(tokens) and tokens[n] == "->":
n += 1
(n, p) = paramsConsumer(tokens, n)
self.outputs = p
self.invocation = params
return (n, copy.copy(self))
class Param:
"""
"""
def __init__(self, name, parserFn=None):
self.name = name
def compile(self, symbols=None):
if self.name[0] == "@": # This is an implicitly pushed stack parameter
return [symbols.get(self.name, self.name)] # return the binding if it exists or the param name
binding = symbols.get(self.name)
if binding is None:
assert not "parameter %s is not bound" % self.name
if type(binding) is list: # Already compiled
return binding
if type(binding) is str: # primitive
return [binding]
elif type(binding) is int: # primitive
return [binding]
elif type(binding) is bytes: # primitive
return [binding]
return binding.compile(symbols) # compile it
def parse(self, tokens, n, symbols=None):
"""
could be a basic variable use or an invocation, used in the body of the function
"""
if 1:
params = None
if tokens[n] == "(":
params = []
(n, p) = argsConsumer(tokens, n, symbols)
params += p
if n < len(tokens) and tokens[n] == "->":
n += 1
(n, p) = paramsConsumer(tokens, n)
self.outputs = p
self.invocation = params
return (n, copy.copy(self))
class Primitive:
def __init__(self, name, bin, parserFn=None):
self.name = name
self.parserFn = parserFn
self.outputs = None
self.bin = bin
self.params = []
def str(self):
return self.name
def serialize(self):
return bytes([self.bin])
def compile(self, symbols):
if self.params:
ret = compileParamsList(self.params, symbols)
else:
ret = []
ret.append(self)
return ret
def parse(self, tokens, n, symbols=None):
"""
default doesn't accept any other tokens
"""
if self.parserFn:
return self.parserFn(self, tokens, n, symbols)
else:
if tokens[n] == "(": # invocation with params is optional
(n, self.params) = argsConsumer(tokens, n, symbols)
if tokens[n] == "->":
(n, self.outputs) = paramsConsumer(tokens, n + 1)
dup = copy.copy(self)
return (n, dup)
class IfConstruct:
"""Implement the if () {} construct"""
def __init__(self):
self.name = "OP_IF"
self.statements = None
self.arg = None
self.outputs = None
def parse(self, tokens, n, symbols=None):
if tokens[n] == "(": # optional if param
(n, self.arg) = argsConsumer(tokens, n, symbols)
if tokens[n] == "{":
(n, self.statements) = statementConsumerCallout(tokens, n + 1, symbols)
else:
assert 0, "need block"
assert tokens[n] == "}"
return (n + 1, copy.copy(self))
def compile(self, symbols):
ret = []
if not self.arg is None:
assert len(self.arg) == 1, "if statement can only have one argument"
ret.extend(compileParamsList(self.arg, symbols))
ret.append("OP_IF")
if not self.statements is None:
ret.extend(compileStatementList(self.statements, symbols))
# TODO what if no else follows?
return ret
class ElseConstruct:
"""Implement the if () {} construct"""
def __init__(self):
self.name = "OP_ELSE"
self.statements = None
self.arg = None
self.outputs = None
def parse(self, tokens, n, symbols=None):
if tokens[n] == "(": # optional if param
(n, self.arg) = argsConsumer(tokens, n, symbols)
if tokens[n] == "{":
(n, self.statements) = statementConsumerCallout(tokens, n + 1, symbols)
else:
assert 0, "need block"
assert tokens[n] == "}"
return (n + 1, copy.copy(self))
def compile(self, symbols):
ret = []
if not self.arg is None:
assert len(self.arg) == 1, "if statement can only have one argument"
ret.extend(compileParamsList(self.arg, symbols))
ret.append("OP_ELSE")
if not self.statements is None:
ret.extend(compileStatementList(self.statements, symbols))
ret.append("OP_ENDIF")
return ret
def elseParser(prim, tokens, n, symbols):
if tokens[n] == "{":
(n, prim.statements) = statementConsumerCallout(tokens, n + 1, symbols)
else:
assert 0, "need block"
assert tokens[n] == "}"
return (n + 1, prim)
def repeatParser(prim, tokens, n, symbols):
if tokens[n] == "(":
(n, prim.arg) = argsConsumer(tokens, n, symbols)
else:
assert 0, "need args"
if tokens[n] == "{":
(n, prim.statements) = statementConsumerCallout(tokens, n + 1, symbols)
else:
assert 0, "need block"
assert tokens[n] == "}"
return (n + 1, prim)
SP_IF = IfConstruct()
SP_ELSE = ElseConstruct() # Primitive("else", None, elseParser)
SP_REPEAT = Primitive("repeat", None, repeatParser)
SP_EXEC = Primitive("exec", None)
primitives = {
"if": SP_IF,
"else": SP_ELSE,
"exec": SP_EXEC,
"repeat": SP_REPEAT
}
for name, bin in bchopcodes.opcode2bin.items():
primitives[name] = Primitive(name, bin)
```
|
{
"source": "JCRamzz/Fyyur",
"score": 2
}
|
#### File: migrations/versions/7a28c474398d_changed_relationships_between_artists_.py
```python
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '5<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint('Artist_venue_id_fkey', 'Artist', type_='foreignkey')
op.drop_column('Artist', 'venue_id')
op.add_column('shows', sa.Column('venues_id', sa.Integer(), nullable=False))
op.create_foreign_key(None, 'shows', 'Venue', ['venues_id'], ['id'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'shows', type_='foreignkey')
op.drop_column('shows', 'venues_id')
op.add_column('Artist', sa.Column('venue_id', sa.INTEGER(), autoincrement=False, nullable=False))
op.create_foreign_key('Artist_venue_id_fkey', 'Artist', 'Venue', ['venue_id'], ['id'])
# ### end Alembic commands ###
```
|
{
"source": "jcrangel/AI-for-Trading",
"score": 3
}
|
#### File: m1_quant_basics/l2_stock_prices/quiz_tests.py
```python
from collections import OrderedDict
import pandas as pd
from tests import project_test, assert_output
@project_test
def test_csv_to_close(fn):
tickers = ['A', 'B', 'C']
dates = ['2017-09-22', '2017-09-25', '2017-09-26', '2017-09-27', '2017-09-28']
fn_inputs = {
'csv_filepath': 'prices_2017_09_22_2017-09-28.csv',
'field_names': ['ticker', 'date', 'open', 'high', 'low', 'close', 'volume', 'adj_close', 'adj_volume']}
fn_correct_outputs = OrderedDict([
(
'close',
pd.DataFrame(
[
[152.48000000, 149.19000000, 59.35000000],
[151.11000000, 145.06000000, 60.29000000],
[152.42000000, 145.21000000, 57.74000000],
[154.34000000, 147.02000000, 58.41000000],
[153.68000000, 147.19000000, 56.76000000]],
dates, tickers))])
assert_output(fn, fn_inputs, fn_correct_outputs)
```
#### File: m1_quant_basics/l6_momentum_trading/project_tests.py
```python
from collections import OrderedDict
import pandas as pd
from tests import project_test, generate_random_tickers, generate_random_dates, assert_output
@project_test
def test_date_top_industries(fn):
tickers = generate_random_tickers(10)
dates = generate_random_dates(2)
fn_inputs = {
'prices': pd.DataFrame(
[
[21.050810483942833, 17.013843810658827, 10.984503755486879, 11.248093428369392, 12.961712733997235,
482.34539247360806, 35.202580592515041, 3516.5416782257166, 66.405314327318209, 13.503960481087077],
[15.63570258751384, 14.69054309070934, 11.353027688995159, 475.74195118202061, 11.959640427803022,
10.918933017418304, 17.9086438675435, 24.801265417692324, 12.488954191854916, 15.63570258751384]],
dates, tickers),
'sector': pd.Series(
['ENERGY', 'MATERIALS', 'ENERGY', 'ENERGY', 'TELECOM', 'FINANCIALS',
'TECHNOLOGY', 'HEALTH', 'MATERIALS', 'REAL ESTATE'],
tickers),
'date': dates[-1],
'top_n': 4}
fn_correct_outputs = OrderedDict([
(
'top_industries',
{'ENERGY', 'HEALTH', 'TECHNOLOGY'})])
assert_output(fn, fn_inputs, fn_correct_outputs)
@project_test
def test_generate_positions(fn):
tickers = generate_random_tickers(5)
dates = generate_random_dates(6)
fn_inputs = {
'prices': pd.DataFrame(
[
[65.40757705426432, 27.556319958924323, 50.59935209411175, 56.274712269629134, 99.9873070881051],
[47.82126720752715, 56.812865745668375, 40.75685814634723, 27.469680989736023, 41.449858088448735],
[88.20038097315815, 45.573972499280494, 36.592711369868724, 21.36570423559795, 0.698919959739297],
[14.670236824202721, 49.557949251949054, 18.935364730808935, 23.163368660093298, 8.075599541367884],
[41.499140208637705, 9.75987296846733, 66.08677766062186, 37.927861417544385, 10.792730405945827],
[86.26923464863536, 32.12679487375028, 15.621592524570282, 77.1908860965619, 52.733950486350444]],
dates, tickers)}
fn_correct_outputs = OrderedDict([
(
'final_positions',
pd.DataFrame(
[
[30, 0, 30, 30, 30],
[0, 30, 0, 0, 0],
[30, 0, 0, 0, -10],
[-10, 0, -10, 0, -10],
[0, -10, 30, 0, -10],
[30, 0, -10, 30, 30]],
dates, tickers))])
assert_output(fn, fn_inputs, fn_correct_outputs)
```
#### File: m2_advanced_quants/l5_volatility/volatility_estimation.py
```python
import pandas as pd
import numpy as np
def estimate_volatility(prices, l):
"""Create an exponential moving average model of the volatility of a stock
price, and return the most recent (last) volatility estimate.
Parameters
----------
prices : pandas.Series
A series of adjusted closing prices for a stock.
l : float
The 'lambda' parameter of the exponential moving average model. Making
this value smaller will cause the model to weight older terms less
relative to more recent terms.
Returns
-------
last_vol : float
The last element of your exponential moving averge volatility model series.
"""
# TODO: Implement the exponential moving average volatility model and return the last value.
return prices.ewm(alpha=(1-l)).mean()[-1]
def test_run(filename='data.csv'):
"""Test run get_most_volatile() with stock prices from a file."""
prices = pd.read_csv(filename, parse_dates=[
'date'], index_col='date', squeeze=True)
print("Most recent volatility estimate: {:.6f}".format(estimate_volatility(prices, 0.7)))
# print(estimate_volatility(prices, 0.7))
if __name__ == '__main__':
test_run()
```
#### File: m2_advanced_quants/l5_volatility/volatility.py
```python
import pandas as pd
import numpy as np
def get_most_volatile(prices):
"""Return the ticker symbol for the most volatile stock.
Parameters
----------
prices : pandas.DataFrame
a pandas.DataFrame object with columns: ['ticker', 'date', 'price']
Returns
-------
ticker : string
ticker symbol for the most volatile stock
"""
# TODO: Fill in this function.
pass
def test_run(filename='prices.csv'):
"""Test run get_most_volatile() with stock prices from a file."""
prices = pd.read_csv(filename, parse_dates=['date'])
print("Most volatile stock: {}".format(get_most_volatile(prices)))
if __name__ == '__main__':
test_run()
```
#### File: m3_funds_etfs_portfolio_optimization/l4_portfolio_optimization/quiz_tests_advanced.py
```python
from collections import OrderedDict
import numpy as np
import cvxpy as cvx
from tests import project_test, generate_random_tickers, generate_random_dates, assert_output
import string
@project_test
def test_optimize_portfolio(fn):
"""Test with a 3 simulated stock return series"""
days_per_year = 252
years = 3
total_days = days_per_year * years
return_market = np.random.normal(loc=0.05, scale=0.3, size=days_per_year)
return_1 = np.random.uniform(low=-0.000001, high=.000001, size=days_per_year) + return_market
return_2 = np.random.uniform(low=-0.000001, high=.000001, size=days_per_year) + return_market
return_3 = np.random.uniform(low=-0.000001, high=.000001, size=days_per_year) + return_market
returns = np.array([return_1, return_2, return_3])
"""simulate index weights"""
index_weights = np.array([0.9,0.15,0.05])
scale = .00001
m = returns.shape[0]
cov = np.cov(returns)
x = cvx.Variable(m)
portfolio_variance = cvx.quad_form(x, cov)
distance_to_index = cvx.norm(x - index_weights)
objective = cvx.Minimize(portfolio_variance + scale* distance_to_index)
constraints = [x >= 0, sum(x) == 1]
problem = cvx.Problem(objective, constraints).solve()
x_values = x.value
fn_inputs = {
'returns': returns,
'index_weights': index_weights,
'scale': scale
}
fn_correct_outputs = OrderedDict([
('x_values',x_values)
])
assert_output(fn, fn_inputs, fn_correct_outputs)
```
#### File: Quiz/m5_financial_statements/bagofwords_solutions.py
```python
import numpy as np
from math import log
# for nice number printing
np.set_printoptions(precision=3, suppress=True)
# tokenize and clean the text
import nltk
from nltk.stem import WordNetLemmatizer, SnowballStemmer
from collections import Counter
from nltk.corpus import stopwords
from nltk import word_tokenize
from nltk.tokenize import RegexpTokenizer
# tokenize anything that is not a number and not a symbol
word_tokenizer = RegexpTokenizer(r'[^\d\W]+')
nltk.download('stopwords')
nltk.download('wordnet')
sno = SnowballStemmer('english')
wnl = WordNetLemmatizer()
# get our list of stop_words
stop_words = set(stopwords.words('english'))
# add some extra stopwords
stop_words |= {"may", "business", "company", "could", "service", "result", "product",
"operation", "include", "law", "tax", "change", "financial", "require",
"cost", "market", "also", "user", "plan", "actual", "cash", "other",
"thereto", "thereof", "therefore"}
# useful function to print a dictionary sorted by value (largest first by default)
def print_sorted(d, ascending=False):
factor = 1 if ascending else -1
sorted_list = sorted(d.items(), key=lambda v: factor*v[1])
for i, v in sorted_list:
print("{}: {:.3f}".format(i, v))
# convert text into bag-of-words
def clean_text(txt):
lemm_txt = [ wnl.lemmatize(wnl.lemmatize(w.lower(),'n'),'v') \
for w in word_tokenizer.tokenize(txt) if \
w.isalpha() and w not in stop_words ]
return [ sno.stem(w) for w in lemm_txt if w not in stop_words and len(w) > 2 ]
from collections import defaultdict
def bag_of_words(words):
bag = defaultdict(int)
for w in words:
bag[w] += 1
return bag
def get_sentiment(txt, wordlist):
matching_words = [ w for w in txt if w in wordlist ]
return len(matching_words)/len(txt)
# compute idf
def get_idf(corpus, include_log=True):
N = len(corpus)
freq = defaultdict(int)
words = set()
for c in corpus:
words |= set(c)
for w in words:
freq[w] = sum([ w in c for c in corpus])
if include_log:
return { w:log(N/freq[w]) for w in freq }
else:
return { w:N/freq[w] for w in freq }
def _tf(freq, avg, include_log=True):
if include_log:
return 0 if freq == 0 else (1+log(freq))/(1+log(avg))
else:
return freq/avg
def get_tf(txt, include_log=True):
freq = bag_of_words(txt)
avg = np.mean(list(freq.values()))
tf = {w:_tf(f,avg, include_log) for w,f in freq.items()}
return defaultdict(int, tf)
def get_vector(tf, idf):
return np.array([ tf[w]*idf[w] for w in idf ])
from numpy.linalg import norm
def sim_cos(u,v):
return np.dot(u,v)/(norm(u)*norm(v))
def sim_jac(u,v):
return np.sum(np.minimum(u,v))/np.sum(np.maximum(u,v))
```
|
{
"source": "jcrangel/Dual-encoder-with-BERT",
"score": 2
}
|
#### File: jcrangel/Dual-encoder-with-BERT/train.py
```python
import pdb, time, math
from biencoder_parameters import Biencoder_params
from utils import experiment_logger, EmbLoader, ForOnlyFaiss_KBIndexer, cuda_device_parser, parse_cuidx2encoded_emb_for_debugging
from utils import parse_cuidx2encoded_emb_2_cui2emb
from data_reader import FixedDatasetTokenizedReader, AllEntityCanonical_and_Defs_loader
from allennlp.data.vocabulary import Vocabulary
from allennlp.data.iterators import BucketIterator
from allennlp.training.trainer import Trainer
from mention_and_entity_encoders import Concatenate_Right_and_Left_MentionEncoder, Pooler_for_cano_and_def, Pooler_for_blink_mention
from biencoder_models import InBatchBiencoder, InBatchBLINKBiencoder, WrappedModel_for_entityencoding, BLINKBiencoder_OnlyforEncodingMentions
import torch.optim as optim
from KBentities_encoder import InKBAllEntitiesEncoder
from evaluator import BLINKBiEncoderTopXRetriever, DevandTest_BLINKBiEncoder_IterateEvaluator
import copy
import torch
from allennlp.models.archival import archive_model, load_archive
import os
CANONICAL_AND_DEF_CONNECTTOKEN = '[unused3]'
def main():
exp_start_time = time.time()
Parameters = Biencoder_params()
opts = Parameters.get_params()
experiment_logdir = experiment_logger(args=opts)
Parameters.dump_params(experiment_dir=experiment_logdir)
cuda_devices = cuda_device_parser(str_ids=opts.cuda_devices)
reader_for_mentions = FixedDatasetTokenizedReader(args=opts, canonical_and_def_connecttoken=CANONICAL_AND_DEF_CONNECTTOKEN)
trains = reader_for_mentions.read('train')
if not opts.allen_lazyload:
print('\ntrain statistics:', len(trains), '\n')
vocab = Vocabulary()
iterator_for_training_and_evaluating_mentions = BucketIterator(batch_size=opts.batch_size_for_train, sorting_keys=[('context', 'num_tokens')])
iterator_for_training_and_evaluating_mentions.index_with(vocab)
embloader = EmbLoader(args=opts)
emb_mapper, emb_dim, textfieldEmbedder = embloader.emb_returner()
if opts.model_for_training == 'blink_implementation_inbatchencoder':
mention_encoder = Pooler_for_blink_mention(args=opts, word_embedder=textfieldEmbedder)
else:
mention_encoder = Concatenate_Right_and_Left_MentionEncoder(args=opts, input_dim=emb_dim, word_embedder=textfieldEmbedder)
current_cui2idx, current_idx2cui, current_cui2emb, current_cui2cano, current_cui2def = reader_for_mentions.currently_stored_KB_dataset_returner()
if opts.model_for_training == 'biencoder':
entity_encoder = Pooler_for_cano_and_def(args=opts, word_embedder=textfieldEmbedder)
model = InBatchBiencoder(args=opts, mention_encoder=mention_encoder, entity_encoder=entity_encoder, vocab=vocab, input_dim=emb_dim)
elif opts.model_for_training == 'blink_implementation_inbatchencoder':
entity_encoder = Pooler_for_cano_and_def(args=opts, word_embedder=textfieldEmbedder)
model = InBatchBLINKBiencoder(args=opts, mention_encoder=mention_encoder, entity_encoder=entity_encoder, vocab=vocab)
else:
print('currently', opts.model_for_training,'are not supported')
raise NotImplementedError
model = model.cuda()
if not opts.debug_for_entity_encoder:
optimizer = optim.Adam(filter(lambda param: param.requires_grad, model.parameters()), lr=opts.lr,eps=opts.epsilon,
weight_decay=opts.weight_decay, betas=(opts.beta1, opts.beta2), amsgrad=opts.amsgrad)
trainer = Trainer(model=model,optimizer=optimizer,
iterator=iterator_for_training_and_evaluating_mentions, train_dataset=trains,
# validation_dataset=devs,
cuda_device=cuda_devices, num_epochs=opts.num_epochs
)
trainer.train()
else:
print('\n==Skip Biencoder training==\n')
# Save the model
serialization_dir = 'model'
config_file = os.path.join(serialization_dir, 'config.json')
vocabulary_dir = os.path.join(serialization_dir, 'vocabulary')
weights_file = os.path.join(serialization_dir, 'weights.th')
model_pytorch_file = os.path.join(serialization_dir, 'model.th')
os.makedirs(serialization_dir, exist_ok=True)
#params.to_file(config_file)
#vocab.save_to_files(vocabulary_dir)
torch.save(model, model_pytorch_file)
with torch.no_grad():
model.eval()
model.switch2eval()
print('======Start encoding all entities in KB=====\n======1. Start Tokenizing All Entities=====')
entity_encoder_wrapping_model = WrappedModel_for_entityencoding(args=opts, entity_encoder=entity_encoder, vocab=vocab)
entity_encoder_wrapping_model.eval()
# entity_encoder_wrapping_model.cpu()
Tokenizer = reader_for_mentions.berttokenizer_returner()
TokenIndexer = reader_for_mentions.token_indexer_returner()
kbentity_loader = AllEntityCanonical_and_Defs_loader(args=opts, idx2cui=current_idx2cui, cui2cano=current_cui2cano,
cui2def=current_cui2def, textfield_embedder=textfieldEmbedder,
pretrained_tokenizer=Tokenizer, tokenindexer=TokenIndexer,
canonical_and_def_connect_token=CANONICAL_AND_DEF_CONNECTTOKEN)
Allentity_embedding_encodeIterator = InKBAllEntitiesEncoder(args=opts, entity_loader_datasetreaderclass=kbentity_loader,
entity_encoder_wrapping_model=entity_encoder_wrapping_model,
vocab=vocab)
print('======2. Encoding All Entities=====')
cuidx2encoded_emb = Allentity_embedding_encodeIterator.encoding_all_entities()
if opts.debug_for_entity_encoder:
cuidx2encoded_emb = parse_cuidx2encoded_emb_for_debugging(cuidx2encoded_emb=cuidx2encoded_emb, original_cui2idx=current_cui2idx)
cui2encoded_emb = parse_cuidx2encoded_emb_2_cui2emb(cuidx2encoded_emb=cuidx2encoded_emb, original_cui2idx=current_cui2idx)
print('=====Encoding all entities in KB FINISHED!=====')
print('\n+++++Indexnizing KB from encoded entites+++++')
forstoring_encoded_entities_to_faiss = ForOnlyFaiss_KBIndexer(args=opts,
input_cui2idx=current_cui2idx,
input_idx2cui=current_idx2cui,
input_cui2emb=cui2encoded_emb,
search_method_for_faiss=opts.search_method_before_re_sorting_for_faiss,
entity_emb_dim=768)
print('+++++Indexnizing KB from encoded entites FINISHED!+++++')
print('Loading BLINKBiencoder')
blinkbiencoder_onlyfor_encodingmentions = BLINKBiencoder_OnlyforEncodingMentions(args=opts, mention_encoder=mention_encoder, vocab=vocab)
blinkbiencoder_onlyfor_encodingmentions.cuda()
blinkbiencoder_onlyfor_encodingmentions.eval()
print('Loaded: BLINKBiencoder')
print('Evaluation for BLINK start')
blinkBiEncoderEvaluator = BLINKBiEncoderTopXRetriever(args=opts, vocab=vocab, blinkbiencoder_onlyfor_encodingmentions=blinkbiencoder_onlyfor_encodingmentions,
fortrainigmodel_faiss_stored_kb_kgemb=forstoring_encoded_entities_to_faiss.indexed_faiss_returner(),
reader_for_mentions=reader_for_mentions)
finalblinkEvaluator = DevandTest_BLINKBiEncoder_IterateEvaluator(args=opts, blinkBiEncoderEvaluator=blinkBiEncoderEvaluator, experiment_logdir=experiment_logdir)
finalblinkEvaluator.final_evaluation(dev_or_test_flag='dev')
finalblinkEvaluator.final_evaluation(dev_or_test_flag='test')
exp_end_time = time.time()
print('Experiment time', math.floor(exp_end_time - exp_start_time), 'sec')
if __name__ == '__main__':
main()
```
|
{
"source": "jcranmer/dxr",
"score": 2
}
|
#### File: dxr/dxr/app.py
```python
from logging import StreamHandler
import os.path
from os.path import isdir
from sys import stderr
from time import time
from urllib import quote_plus
from flask import (Blueprint, Flask, send_from_directory, current_app,
send_file, request, redirect, jsonify, render_template)
from dxr.query import Query
from dxr.server_utils import connect_db
from dxr.utils import non_negative_int, search_url, sqlite3 # Make sure we load trilite before possibly importing the wrong version of sqlite3.
# Look in the 'dxr' package for static files, templates, etc.:
dxr_blueprint = Blueprint('dxr_blueprint', 'dxr')
def make_app(instance_path):
"""Return a DXR application which looks in the given folder for
configuration.
Also set up the static and template folder according to the configured
template.
"""
# TODO: Actually obey the template selection in the config file by passing
# a different static_folder and template_folder to Flask().
app = Flask('dxr', instance_path=instance_path)
app.register_blueprint(dxr_blueprint)
# Load the special config file generated by dxr-build:
app.config.from_pyfile(os.path.join(app.instance_path, 'config.py'))
# Log to Apache's error log in production:
app.logger.addHandler(StreamHandler(stderr))
return app
@dxr_blueprint.route('/')
def index():
return send_file(current_app.open_instance_resource('trees/index.html'))
@dxr_blueprint.route('/<tree>/search')
def search(tree):
"""Search by regex, caller, superclass, or whatever."""
# TODO: This function still does too much.
querystring = request.values
offset = non_negative_int(querystring.get('offset'), 0)
limit = non_negative_int(querystring.get('limit'), 100)
config = current_app.config
# Arguments for the template:
arguments = {
# Common template variables
'wwwroot': config['WWW_ROOT'],
'tree': config['TREES'][0],
'trees': config['TREES'],
'config': config['TEMPLATE_PARAMETERS'],
'generated_date': config['GENERATED_DATE']}
error = warning = ''
status_code = None
if tree in config['TREES']:
arguments['tree'] = tree
# Connect to database
conn = connect_db(tree, current_app.instance_path)
if conn:
# Parse the search query
qtext = querystring.get('q', '')
q = Query(conn, qtext, should_explain='explain' in querystring)
# Try for a direct result:
if querystring.get('redirect') == 'true':
result = q.direct_result()
if result:
path, line = result
# TODO: Does this escape qtext properly?
return redirect(
'%s/%s/source/%s?from=%s#l%i' %
(config['WWW_ROOT'], tree, path, qtext, line))
# Return multiple results:
template = 'search.html'
start = time()
try:
results = list(q.results(offset, limit))
except sqlite3.OperationalError as e:
if e.message.startswith('REGEXP:'):
# Malformed regex
warning = e.message[7:]
results = []
elif e.message.startswith('QUERY:'):
warning = e.message[6:]
results = []
else:
error = 'Database error: %s' % e.message
if not error:
# Search template variables:
arguments['query'] = qtext
arguments['search_url'] = search_url(arguments['wwwroot'],
arguments['tree'],
qtext,
redirect=False)
arguments['results'] = results
arguments['offset'] = offset
arguments['limit'] = limit
arguments['time'] = time() - start
else:
error = 'Failed to establish database connection.'
else:
error = "Tree '%s' is not a valid tree." % tree
status_code = 404
if warning or error:
arguments['error'] = error or warning
if querystring.get('format') == 'json':
if error:
# Return a non-OK code so the live search doesn't try to replace
# the results with our empty ones:
return jsonify(arguments), status_code or 500
# Tuples are encoded as lists in JSON, and these are not real
# easy to unpack or read in Javascript. So for ease of use, we
# convert to dictionaries before returning the json results.
# If further discrepancies are introduced, please document them in
# templating.mkd.
arguments['results'] = [
{'icon': icon,
'path': path,
'lines': [{'line_number': nb, 'line': l} for nb, l in lines]}
for icon, path, lines in arguments['results']]
return jsonify(arguments)
if error:
return render_template('error.html', **arguments), status_code or 500
else:
return render_template('search.html', **arguments)
@dxr_blueprint.route('/<tree>/source/')
@dxr_blueprint.route('/<tree>/source/<path:path>')
def browse(tree, path=''):
"""Show a directory listing or a single file from one of the trees."""
tree_folder = os.path.join(current_app.instance_path, 'trees', tree)
if isdir(os.path.join(tree_folder, path)):
# It's a bare directory. Add the index file to the end:
path = os.path.join(path, current_app.config['DIRECTORY_INDEX'])
else:
# It's a file. Add the .html extension:
path += '.html'
return send_from_directory(tree_folder, path)
```
#### File: plugins/omniglot/htmlifier.py
```python
import os
import subprocess
import urlparse
import dxr.plugins
"""Omniglot - Speaking all commonly-used version control systems.
At present, this plugin is still under development, so not all features are
fully implemented.
Omniglot first scans the project directory looking for the hallmarks of a VCS
(such as the .hg or .git directory). It also looks for these in parent
directories in case DXR is only parsing a fraction of the repository. Once this
information is found, it attempts to extract upstream information about the
repository. From this information, it builds the necessary information to
reproduce the links.
Currently supported VCSes and upstream views:
- git (github)
- mercurial (hgweb)
Todos:
- add gitweb support for git
- add cvs, svn, bzr support
- produce in-DXR blame information using VCSs
- check if the mercurial paths are specific to Mozilla's customization or not.
"""
# Global variables
tree = None
source_repositories = {}
class VCS(object):
"""A class representing an abstract notion of a version-control system.
In general, all path arguments to query methods should be normalized to be
relative to the root directory of the VCS.
"""
def __init__(self, root):
self.root = root
self.untracked_files = set()
def get_root_dir(self):
"""Return the directory that is at the root of the VCS."""
return self.root
def get_vcs_name(self):
"""Return a recognizable name for the VCS."""
return type(self).__name__
def invoke_vcs(self, args):
"""Return the result of invoking said command on the repository, with
the current working directory set to the root directory.
"""
return subprocess.check_output(args, cwd=self.get_root_dir())
def is_tracked(self, path):
"""Does the repository track this file?"""
return path not in self.untracked_files
def get_rev(self, path):
"""Return a human-readable revision identifier for the repository."""
raise NotImplemented
def generate_log(self, path):
"""Return a URL for a page that lists revisions for this file."""
raise NotImplemented
def generate_blame(self, path):
"""Return a URL for a page that lists source annotations for lines in
this file.
"""
raise NotImplemented
def generate_diff(self, path):
"""Return a URL for a page that shows the last change made to this file.
"""
raise NotImplemented
def generate_raw(self, path):
"""Return a URL for a page that returns a raw copy of this file."""
raise NotImplemented
class Mercurial(VCS):
def __init__(self, root):
super(Mercurial, self).__init__(root)
# Find the revision
self.revision = self.invoke_vcs(['hg', 'id', '-i']).strip()
# Sometimes hg id returns + at the end.
if self.revision.endswith("+"):
self.revision = self.revision[:-1]
# Make and normalize the upstream URL
upstream = urlparse.urlparse(self.invoke_vcs(['hg', 'paths', 'default']).strip())
recomb = list(upstream)
if upstream.scheme == 'ssh':
recomb[0] == 'http'
recomb[1] = upstream.hostname # Eliminate any username stuff
if not upstream.path.endswith('/'):
recomb[2] += '/' # Make sure we have a '/' on the end
recomb[3] = recomb[4] = recomb[5] = '' # Just those three
self.upstream = urlparse.urlunparse(recomb)
# Find all untracked files
self.untracked_files = set(line.split()[1] for line in
self.invoke_vcs(['hg', 'status', '-u', '-i']).split('\n')[:-1])
@staticmethod
def claim_vcs_source(path, dirs):
if '.hg' in dirs:
dirs.remove('.hg')
return Mercurial(path)
return None
def get_rev(self, path):
return self.revision
def generate_log(self, path):
return self.upstream + 'filelog/' + self.revision + '/' + path
def generate_blame(self, path):
return self.upstream + 'annotate/' + self.revision + '/' + path
def generate_diff(self, path):
return self.upstream + 'diff/' + self.revision + '/' + path
def generate_raw(self, path):
return self.upstream + 'raw-file/' + self.revision + '/' + path
class Git(VCS):
def __init__(self, root):
super(Git, self).__init__(root)
self.untracked_files = set(line for line in
self.invoke_vcs(['git', 'ls-files', '-o']).split('\n')[:-1])
self.revision = self.invoke_vcs(['git', 'rev-parse', 'HEAD'])
source_urls = self.invoke_vcs(['git', 'remote', '-v']).split('\n')
for src_url in source_urls:
name, url, _ = src_url.split()
if name == 'origin':
self.upstream = self.synth_web_url(url)
break
@staticmethod
def claim_vcs_source(path, dirs):
if '.git' in dirs:
dirs.remove('.git')
return Git(path)
return None
def get_rev(self, path):
return self.revision[:10]
def generate_log(self, path):
return self.upstream + "/commits/" + self.revision + "/" + path
def generate_blame(self, path):
return self.upstream + "/blame/" + self.revision + "/" + path
def generate_diff(self, path):
# I really want to make this anchor on the file in question, but github
# doesn't seem to do that nicely
return self.upstream + "/commit/" + self.revision
def generate_raw(self, path):
return self.upstream + "/raw/" + self.revision + "/" + path
def synth_web_url(self, repo):
if repo.startswith("<EMAIL>:"):
self._is_github = True
return "https://github.com/" + repo[len("<EMAIL>:"):]
elif repo.startswith("git://github.com/"):
self._is_github = True
if repo.endswith(".git"):
repo = repo[:-len(".git")]
return "https" + repo[len("git"):]
raise Exception("I don't know what's going on")
every_vcs = [Mercurial, Git]
# Load global variables
def load(tree_, conn):
global tree, lookup_order
tree = tree_
# Find all of the VCS's in the source directory
for cwd, dirs, files in os.walk(tree.source_folder):
for vcs in every_vcs:
attempt = vcs.claim_vcs_source(cwd, dirs)
if attempt is not None:
source_repositories[attempt.root] = attempt
# It's possible that the root of the tree is not a VCS by itself, so walk up
# the hierarchy until we find a parent folder that is a VCS. If we can't
# find any, than no VCSs exist for the top-level of this repository.
directory = tree.source_folder
while directory != '/' and directory not in source_repositories:
directory = os.path.dirname(directory)
for vcs in every_vcs:
attempt = vcs.claim_vcs_source(directory, os.listdir(directory))
if attempt is not None:
source_repositories[directory] = attempt
# Note: we want to make sure that we look up source repositories by deepest
# directory first.
lookup_order = source_repositories.keys()
lookup_order.sort(key=len)
def find_vcs_for_file(path):
"""Given an absolute path, find a source repository we know about that
claims to track that file.
"""
for directory in lookup_order:
# This seems to be the easiest way to find "is path in the subtree
# rooted at directory?"
if os.path.relpath(path, directory).startswith('..'):
continue
vcs = source_repositories[directory]
if vcs.is_tracked(os.path.relpath(path, vcs.get_root_dir())):
return vcs
return None
class LinksHtmlifier(object):
"""Htmlifier which adds blame and external links to VCS web utilities."""
def __init__(self, path):
if not os.path.isabs(path):
path = os.path.join(tree.source_folder, path)
self.vcs = find_vcs_for_file(path)
if self.vcs is not None:
self.path = os.path.relpath(path, self.vcs.get_root_dir())
self.name = self.vcs.get_vcs_name()
def refs(self):
return []
def regions(self):
return []
def annotations(self):
return []
def links(self):
if self.vcs is None:
yield 5, 'Untracked file', []
return
def items():
yield 'log', "Log", self.vcs.generate_log(self.path)
yield 'blame', "Blame", self.vcs.generate_blame(self.path)
yield 'diff', "Diff", self.vcs.generate_diff(self.path)
yield 'raw', "Raw", self.vcs.generate_raw(self.path)
yield 5, '%s (%s)' % (self.name, self.vcs.get_rev(self.path)), items()
def htmlify(path, text):
return LinksHtmlifier(path)
__all__ = dxr.plugins.htmlifier_exports()
```
#### File: dxr/dxr/server_utils.py
```python
import ctypes
import os.path
import sys
import dxr.utils # Load trilite before we load sqlite3. Here be dragons. Reorder these import normally once we merge connect_db and connect_database.
import sqlite3
# This makes results a lot more fun!
def _collate_loc(str1, str2):
parts1 = str1.split(':')
parts2 = str2.split(':')
for i in range(1, len(parts1)):
parts1[i] = int(parts1[i])
for i in range(2, len(parts2)):
parts2[i] = int(parts2[i])
return cmp(parts1, parts2)
# Get database connection for tree
# TODO: Why do both this and connect_database() exist?
def connect_db(tree, instance_path):
dbname = os.path.join(instance_path, 'trees', tree, '.dxr-xref.sqlite')
try:
conn = sqlite3.connect(dbname)
conn.text_factory = str
conn.execute("PRAGMA temp_store = MEMORY;")
conn.create_collation("loc", _collate_loc)
conn.row_factory = sqlite3.Row
return conn
except: # TODO: Die, bare except, die!
return None
# Log message
def log(msg):
print >> sys.stderr, "Log: %s" % msg
```
#### File: dxr/tests/test_types.py
```python
from dxr.testing import SingleFileTestCase, MINIMAL_MAIN
class TypeTests(SingleFileTestCase):
source = r"""
class Foo {};
class Bar {};
""" + MINIMAL_MAIN
def test_simple_type(self):
self.found_line_eq('type:Foo',
'class <b>Foo</b> {};')
self.found_line_eq('type:Bar',
'class <b>Bar</b> {};')
def test_two_terms(self):
# There's no type that matches both of these conditions
self.found_nothing('type:*Foo* type:*Quux*')
```
|
{
"source": "jcranmer/factorio-tools",
"score": 3
}
|
#### File: jcranmer/factorio-tools/factorio_types.py
```python
mining_schema = [{
"amount": { "optional": True, "type": "float", "default": 0 },
"amount_max": { "optional": True, "type": "float", "default": 0 },
"amount_min": { "optional": True, "type": "float", "default": 0 },
"name": { "optional": False, "type": "string" },
"type": { "optional": False, "type": "string" },
"probability": { "optional": True, "type": "float", "default": 1.0 }
}]
def mining_results(data, lua, output_dict):
from factorio_schema import parse_data_value
# First level of lua is a dict, watch out!
if 'results' in lua:
arr = parse_data_value(mining_schema, data, lua['results'])
claimed = ['results']
else:
arr = [{
'count': lua.get('count', 1),
'name': lua['result'],
'type': 'item',
'probability': 1.0
}]
claimed = ['result', 'count']
output_dict['results'] = arr
return claimed
def encode_lua(obj):
resp = dict()
for key, value in obj.items():
resp[key] = value
if all(x + 1 in resp for x in range(len(resp))):
return list(resp[x + 1] for x in range(len(resp)))
return resp
def _map_array(lua_array, out_dict):
for element in lua_array.values():
if element.name:
name = element.name
ty = element.type or 'item'
if element.amount is not None:
amount = element.amount
if element.probability:
amount *= element.probability
else:
amount = (element.amount_max + element.amount_min) / 2.0
else:
name = element[1]
ty = "item"
amount = element[2]
out_dict[name] = amount
return
def recipe_ingredients(data, lua):
if not lua.ingredients:
return recipe_ingredients(data, lua.normal)
ingredients = {}
_map_array(lua.ingredients, ingredients)
return (ingredients, ['ingredients'])
def recipe_results(data, lua):
results = {}
if lua.result:
results[lua.result] = lua.result_count if lua.result_count else 1
return (results, ['result', 'result_count'])
elif lua.results:
_map_array(lua.results, results)
return (results, ['results'])
else:
return recipe_results(data, lua.normal)
picture_schema = {
"animation_speed": {"optional": True, "type": "float", "default": 1.0},
"apply_projection": {"optional": True, "type": "bool", "default": False},
"direction_count": {"optional": True, "type": "integer", "default": 4},
"filename": {"optional": False, "type": "FileName"},
"flags": {"optional": True, "type": ["string"], "default": []},
"frame_count": {"optional": True, "type": "integer", "default": 1},
"height": {"optional": False, "type": "integer"},
"line_length": {"optional": True, "type": "integer", "default": 1},
"priority": {"optional": True, "type": "string", "default": "normal"},
"scale": {"optional": True, "type": "float", "default": 1.0},
"shift": {"optional": True, "type": ["float"], "default": [0, 0]},
"tint": {"optional": True, "default": None, "type": {
"r": {"optional": False, "type": "float"},
"g": {"optional": False, "type": "float"},
"b": {"optional": False, "type": "float"},
"a": {"optional": True, "type": "float", "default": 1.0}
}},
"variation_count": {"optional": True, "type": "integer", "default": 1},
"width": {"optional": False, "type": "integer"},
"x": {"optional": True, "type": "integer", "default": 0},
"y": {"optional": True, "type": "integer", "default": 0}
}
animation_schema = dict(picture_schema)
animation_schema.update({
"axially_symmetrical": {"optional": True, "type": "bool", "default": True},
"blend_mode": {"optional": True, "type": "string", "default": ""},
"duration": {"optional": True, "type": "integer", "default": 0},
"fade_away_duration": {"optional": True, "type": "integer", "default": 0}
})
def entity_animation(data, lua):
return (None, []) # XXX skip brokenness for now
from factorio_schema import parse_data_value
def parse_block(lua_block, schema):
keys = list(lua_block.keys())
if keys == range(1, len(keys) + 1):
d = [parse_block(lua_block[k], schema) for k in keys]
elif 'filename' in keys:
d = parse_data_value(schema, data, lua_block)
elif 'layers' in keys or 'sheet' in keys:
# XXX: fix me.
d = {}
else:
d = []
for k in keys:
val = parse_block(lua_block[k], schema)
# Rail remnants are PITAs. May need to change tack here.
if isinstance(val, dict):
val['name'] = k
d.append(val)
return d
if 'animation' in lua:
d = parse_block(lua.animation, animation_schema)
return (d, ['animation'])
elif 'pictures' in lua:
d = parse_block(lua.pictures, picture_schema)
return (d, ['pictures'])
elif 'picture' in lua:
d = parse_block(lua.picture, picture_schema)
return (d, ['picture'])
else:
return (None, [])
def tech_effects(data, lua):
from factorio_schema import encode_lua
d = {'recipes': [], 'modifiers': []}
if 'effects' not in lua:
return (d, [])
for effect in lua.effects.values():
if effect.type == 'unlock-recipe':
d['recipes'].append(effect.recipe)
elif 'modifier' in effect:
d['modifiers'].append(encode_lua(effect))
else:
raise Exception("Unknown technology effect: %s" % str(list(effect.keys())))
return (d, ['effects'])
complex_types = [
]
```
|
{
"source": "jcranmer-intel/llvm-sycl",
"score": 2
}
|
#### File: llvm-sycl/buildbot/configure.py
```python
import argparse
import os
import subprocess
import sys
import platform
def do_configure(args):
ret = False
llvm_dir = os.path.join(args.src_dir, "llvm")
sycl_dir = os.path.join(args.src_dir, "sycl")
spirv_dir = os.path.join(args.src_dir, "llvm-spirv")
ocl_header_dir = os.path.join(args.obj_dir, "OpenCL-Headers")
icd_loader_lib = ''
if platform.system() == 'Linux':
icd_loader_lib = os.path.join(args.obj_dir, "OpenCL-ICD-Loader", "build", "libOpenCL.so")
else:
icd_loader_lib = os.path.join(args.obj_dir, "OpenCL-ICD-Loader", "build", "OpenCL.lib")
cmake_cmd = ["cmake",
"-G", "Ninja",
"-DCMAKE_BUILD_TYPE={}".format(args.build_type),
"-DLLVM_EXTERNAL_PROJECTS=sycl;llvm-spirv",
"-DLLVM_EXTERNAL_SYCL_SOURCE_DIR={}".format(sycl_dir),
"-DLLVM_EXTERNAL_LLVM_SPIRV_SOURCE_DIR={}".format(spirv_dir),
"-DLLVM_ENABLE_PROJECTS=clang;llvm-spirv;sycl",
"-DOpenCL_INCLUDE_DIR={}".format(ocl_header_dir),
"-DOpenCL_LIBRARY={}".format(icd_loader_lib),
"-DLLVM_BUILD_TOOLS=OFF",
llvm_dir]
print(cmake_cmd)
try:
subprocess.check_call(cmake_cmd, cwd=args.obj_dir)
except subprocess.CalledProcessError:
cmake_cache = os.path.join(args.obj_dir, "CMakeCache.txt")
if os.path.isfile(cmake_cache):
os.remove(cmake_cache)
subprocess.check_call(cmake_cmd, cwd=args.obj_dir)
ret = True
return ret
def main():
parser = argparse.ArgumentParser(prog="configure.py",
description="script to do configure",
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("-n", "--build-number", metavar="BUILD_NUM", help="build number")
parser.add_argument("-b", "--branch", metavar="BRANCH", help="pull request branch")
parser.add_argument("-d", "--base-branch", metavar="BASE_BRANCH", help="pull request base branch")
parser.add_argument("-r", "--pr-number", metavar="PR_NUM", help="pull request number")
parser.add_argument("-w", "--builder-dir", metavar="BUILDER_DIR",
help="builder directory, which is the directory contains source and build directories")
parser.add_argument("-s", "--src-dir", metavar="SRC_DIR", required=True, help="source directory")
parser.add_argument("-o", "--obj-dir", metavar="OBJ_DIR", required=True, help="build directory")
parser.add_argument("-t", "--build-type",
metavar="BUILD_TYPE", required=True, help="build type, debug or release")
args = parser.parse_args()
print("args:{}".format(args))
return do_configure(args)
if __name__ == "__main__":
ret = main()
exit_code = 0 if ret else 1
sys.exit(exit_code)
```
|
{
"source": "jcran/vulnerability-rating-taxonomy",
"score": 3
}
|
#### File: vulnerability-rating-taxonomy/tests/test_deprecated_mapping.py
```python
import utils
import unittest
from semantic_version import Version
class TestDeprecatedMapping(unittest.TestCase):
def setUp(self):
self.vrt_versions = utils.all_versions(utils.VRT_FILENAME)
self.last_tagged_version = max([Version.coerce(x) for x in self.vrt_versions.keys() if x != 'current'])
self.deprecated_json = utils.get_json(utils.DEPRECATED_MAPPING_FILENAME)
def test_old_vrt_ids_have_current_node(self):
for version, vrt in self.vrt_versions.iteritems():
if version == 'current':
continue
for id_list in utils.all_id_lists(vrt):
vrt_id = '.'.join(id_list)
if vrt_id in self.deprecated_json:
max_ver = sorted(self.deprecated_json[vrt_id].keys(), key=lambda s: map(int, s.split('.')))[-1]
vrt_id = self.deprecated_json[vrt_id][max_ver]
id_list = vrt_id.split('.')
self.assertTrue(vrt_id == 'other' or self.check_mapping(id_list),
'%s from v%s has no mapping' % (vrt_id, version))
def test_deprecated_nodes_map_valid_node(self):
for old_id, mapping in self.deprecated_json.iteritems():
for new_version, new_id in mapping.iteritems():
self.assertTrue(new_id == 'other' or utils.id_valid(self.vrt_version(new_version), new_id.split('.')),
new_id + ' is not valid')
def check_mapping(self, id_list):
if utils.id_valid(self.vrt_versions['current'], id_list):
return True
elif len(id_list) == 1:
return False
else:
return self.check_mapping(id_list[0:-1])
def vrt_version(self, version):
if version in self.vrt_versions:
return self.vrt_versions[version]
elif Version.coerce(version) > self.last_tagged_version:
return self.vrt_versions['current']
else:
self.fail('Unknown version: %s' % version)
if __name__ == "__main__":
unittest.main()
```
|
{
"source": "jcraver1021/downer",
"score": 3
}
|
#### File: downer/downer/downer.py
```python
import time
import random
import requests
from os.path import exists, join, dirname
# Settings
INTERVALS = [2, 3, 5, 8]
"""INTERVALS (list): list of ints representing a choice of seconds to wait between requests"""
FILTER = True
"""FILTER (boolean): whether to filter out destination files that already exists (without even making the request to download)"""
AFILE = join(dirname(__file__), 'agents.txt')
"""AFILE (string): filename containing default list of user agent choices"""
CLOSE = True
"""CLOSE (boolean): whether to tell server to close connection after download (equivalent to adding it into header map)"""
# Since you can't raise in a lambda
def _raise_value(msg):
raise ValueError(msg)
# Interpret a file as a list of strings
def _load_file_as_list(filename):
with open(filename, 'r') as strings_file:
return [line.strip() for line in strings_file]
# This is a "switch statement" of behaviors for the agent setting
_agent_ldr_ = {
'list': (lambda h: h['list'] if 'list' in h else _raise_value("'list' option required if list is used for agent")),
'file': (lambda h: _load_file_as_list(h['file']) if 'agents-file' in h else _raise_value("'file' option required if file is used for agent")),
'default': (lambda h: _load_file_as_list(AFILE)),
}
def download(pairs, **kwargs):
"""Given a list of (source url, destination filename) pairs, download all of them
Inputs:
pairs: List of (string, string) pairs representing source URL to be downloaded and destination filename to put it
options:
agent: user-agent to claim in the header file
None - the default requests user-agent will be used
list - choose from the list of agents provided in list
file - choose from the list of agents found in the file file
default - equivalent to file with file = "agents.txt"
filter: whether to filter out existing files (default true)
intervals: list of numbers of seconds from which to sample wait times between requests (default [2, 3, 5, 8])
close: whether to close connection after download (equivalent to adding it to header map)
headers: map of headers to use in the requests (default {})
Output:
None
"""
headers = {}
settings = {}
# Apply settings
if 'agent' in kwargs:
# Options are 'list', 'file', and 'default' (equivalent to 'file' with 'file' equal to 'agents.txt')
# 'list' requires 'list' argument
# 'file' requires 'file' argument
settings['agents'] = _agent_ldr_.get(kwargs['agent'], (lambda h: _raise_value("'%s' not a valid value for 'agent'" % h['agent'])))(kwargs)
settings['filter'] = kwargs.get('filter', FILTER)
settings['intervals'] = kwargs.get('intervals', INTERVALS)
settings['close'] = kwargs.get('close', CLOSE)
headers = kwargs.get('headers', {})
if settings['close']:
headers['Connection'] = 'close'
# Download each URL in the list
for url, filename in pairs:
# If filter is True, do not download if the file is already written
if settings['filter'] and not exists(filename):
# Break out once we download
while True:
# Choose a random user agent to represent ourselves
if 'agents' in settings:
headers['user-agent'] = random.choice(settings['agents'])
try:
# Wait a random amount of time to avoid a regular request profile (i.e. requested URLs are likely to be served by the same server)
time.sleep(random.choice(settings['intervals']))
# Get that bread!
response = requests.get(url, headers=headers)
# Write it out to a file
with open(filename, "w") as text_file:
print(response.text, file=text_file)
break
except:
# This is not suited to general exceptions in the above (e.g. file write exception would cause an unnecessary loop)
print('Unexpected error:', sys.exc_info()[0])
```
|
{
"source": "jcraver1021/flex-chess",
"score": 3
}
|
#### File: flex-chess/common/object.py
```python
from copy import deepcopy
from typing import Generator, List, NamedTuple, Optional, Tuple, Union
from common.common import Point, Tensor
from common.player import Player
class IllegalBoardState(Exception):
"""Raised when the board is moved into an illegal state"""
class Piece:
"""A piece belonging to a player."""
def __init__(self,
player: Player,
token: str = 'X',
move_generators: Generator['Mutation', None, None] = None
) -> None:
self.player = player
self.token = token
self.move_generators = move_generators or []
self._board = None
self._place = None
def find(self) -> Tuple['Board', Point]:
"""Get the board and the place where this piece is."""
return self._board, self._place
def place(self, board: 'Board', place: Point) -> None:
"""Place this piece on a board."""
self._board = board
self._place = place
def get_moves(self) -> List[List['Mutation']]:
"""Get a list of all legal sequences by this piece from this point."""
moves = []
for generator in self.move_generators:
for sequence in generator(self._board, self._place):
# It is incumbent on the board to say if it has been put into an illegal state
try:
board_copy = deepcopy(self._board)
board_copy.apply(sequence)
moves.append(sequence)
except IllegalBoardState:
pass
return moves
def __str__(self) -> str:
return self.token
class Mutation(NamedTuple):
"""A mutation of the board state."""
source: Optional[Union[Point, Piece]]
target: Point
class Board:
"""A game board with cartesian coordinates. Point are finite and at least 0."""
def __init__(self, shape: Tuple) -> None:
self.shape = Point(*shape)
self._board = Tensor(self.shape)
def __getitem__(self, point) -> Piece:
return self._board[point]
def _place(self, piece: Optional[Piece], point: Optional[Point]) -> None:
"""Update a piece to the new location.
Do nothing if both inputs are None.
"""
if point:
self._board[point] = piece
if piece:
piece.place(self, point)
def mutate(self, mutation: Mutation) -> Optional[Piece]:
"""Mutate the board state.
Args:
mutation: The mutation to apply to the board.
Returns:
The piece displaced by this mutation, or None if no piece was displaced
"""
current_piece = self[mutation.target]
if current_piece:
self._place(current_piece, None)
new_piece = None
if mutation.source:
if isinstance(mutation.source, Point):
new_piece = self[mutation.source]
self._board[mutation.source] = None
else:
new_piece = mutation.source
self._place(new_piece, mutation.target)
# Do not return if no-op move, since this represents capture/removal
return current_piece if current_piece != new_piece else None
```
|
{
"source": "jcravi/plantuml-markdown",
"score": 2
}
|
#### File: plantuml-markdown/test/test_plantuml_legacy.py
```python
import unittest
from test.markdown_builder import MarkdownBuilder
from test.test_plantuml import PlantumlTest
class PlantumlTest_legacy(PlantumlTest):
def setUp(self):
super(PlantumlTest_legacy, self).setUp()
# Setup testing with old block delimiter (I don't remember where I've seen this syntax)
self.text_builder = MarkdownBuilder('::uml::')
if __name__ == '__main__':
unittest.main()
```
#### File: plantuml-markdown/test/test_plantuml.py
```python
import os
import re
import markdown
import tempfile
from unittest import TestCase, SkipTest
import mock
class PlantumlTest(TestCase):
@classmethod
def setUpClass(cls):
if cls is PlantumlTest:
raise SkipTest("Base class")
super(PlantumlTest, cls).setUpClass()
def setUp(self):
self.md = markdown.Markdown(extensions=['markdown.extensions.fenced_code',
'pymdownx.snippets', 'plantuml_markdown'])
self.text_builder = None
def _load_file(self, filename):
dir_path = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(dir_path, 'data', filename), 'r') as f:
return f.read()[:-1] # skip the last newline
FAKE_IMAGE = 'ABCDEF=='
IMAGE_REGEX = re.compile(r'<(?:img|.*object)(?:( alt=".*?")|( class=".*?")|( title=".*?")|( style=".*?")|( src=".*?")|(?:.*?))+/>')
BASE64_REGEX = re.compile(
r'("data:image/[a-z+]+;base64,)(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?')
@classmethod
def _stripImageData(cls, html):
def sort_attributes(groups):
alt = next(x for x in groups if x.startswith(' alt='))
title = next(x for x in groups if x.startswith(' title='))
classes = next(x for x in groups if x.startswith(' class='))
style = next(iter(x for x in groups if x and x.startswith(' style=')), None)
src = next(iter(x for x in groups if x and x.startswith(' src=')), None)
style = style if style and '""' not in style else ''
src = src if src and '""' not in src else ''
html = "<img{}{}{}{}{}/>".format(alt, title, classes, style, src)
return cls.BASE64_REGEX.sub(r'\1%s' % cls.FAKE_IMAGE, html)
return cls.IMAGE_REGEX.sub(lambda x: sort_attributes(x.groups()), html)
#return cls.BASE64_REGEX.sub(r'\1%s' % cls.FAKE_IMAGE, html)
FAKE_SVG = '...svg-body...'
SVG_REGEX = re.compile(r'<(?:\w+:)?svg(?:( alt=".*?")|( class=".*?")|( title=".*?")|( style=".*?")|(?:.*?))+>.*</(?:\w+:)?svg>')
@classmethod
def _stripSvgData(cls, html):
"""
Simplifies SVG tags to easy comparing.
:param html: source HTML
:return: HTML code with simplified svg tags
"""
def sort_attributes(groups):
"""
Sorts attributes in a specific order.
:param groups: matched attributed groups
:return: a SVG tag string source
"""
alt = next(x for x in groups if x.startswith(' alt='))
title = next(x for x in groups if x.startswith(' title='))
classes = next(x for x in groups if x.startswith(' class='))
style = next(iter(x for x in groups if x and x.startswith(' style=')), None)
style = style if style and '""' not in style else ''
return "<svg{}{}{}{}>{}</svg>".format(alt, title, classes, style, cls.FAKE_SVG)
return cls.SVG_REGEX.sub(lambda x: sort_attributes(x.groups()), html)
def test_priority_after_snippets(self):
"""
Verifies the normal priority of the plantuml_markdown plugin: it must be execute before the fenced code
but after the snippets plugin.
"""
self._test_snippets(30, 'A --> B\n')
def test_priority_before_snippets(self):
"""
Verifies changing plugin priority: in must be execute even before the snippets plugin.
:return:
"""
# raising priority, so the plantuml plugin is executed before the snippet plugin
# expecting that the snippet is not inserted in the plantuml source code
self._test_snippets(40, '--8<-- "'+os.path.join(tempfile.gettempdir(), 'test-defs.puml')+'"\n')
def _test_snippets(self, priority, expected):
"""
Verifies the execution order with the snippets plugin.
If priority is lower than 32, the snippets plugin has priority; if greater, the
plantml_markdown plugin has priority over the snippets plugin.
:param priority: execution priority of the plantuml_markdown plugin
:param expected: expected generated plantuml source code
"""
self.md = markdown.Markdown(extensions=['markdown.extensions.fenced_code',
'pymdownx.snippets', 'plantuml_markdown'],
extension_configs={
'plantuml_markdown': {
'priority': priority
}
})
tempdir = tempfile.gettempdir()
defs_file = os.path.join(tempdir, 'test-defs.puml')
# preparing a file to include
with open(defs_file, 'w') as f:
f.write('A --> B')
from test.markdown_builder import MarkdownBuilder
from plantuml_markdown import PlantUMLPreprocessor
# mcking a method to capture the generated PlantUML source code
with mock.patch.object(PlantUMLPreprocessor, '_render_diagram',
return_value='testing'.encode('utf8')) as mocked_plugin:
text = self.text_builder.diagram("--8<-- \"" + defs_file + "\"").build()
self.md.convert(text)
mocked_plugin.assert_called_with(expected, 'png')
def test_arg_title(self):
"""
Test for the correct parsing of the title argument
"""
text = self.text_builder.diagram("A --> B").title("Diagram test").build()
self.assertEqual(
self._stripImageData('<p><img alt="uml diagram" class="uml" src="data:image/png;base64,%s" title="Diagram test" /></p>' % self.FAKE_IMAGE),
self._stripImageData(self.md.convert(text)))
def test_arg_title_characters(self):
"""
Test for the correct parsing of the title argument with special characters
"""
text = self.text_builder.diagram("A --> B").title("Diagram-test/%&\"").build()
self.assertEqual(
self._stripImageData('<p><img alt="uml diagram" class="uml" src="data:image/png;base64,%s" title="Diagram-test/%%&"" /></p>' % self.FAKE_IMAGE),
self._stripImageData(self.md.convert(text)))
def test_arg_title_inline_svg(self):
"""
Test for setting title attribute in inline SVG
"""
text = self.text_builder.diagram("A --> B").format("svg_inline").title("Diagram test").build()
self.assertEqual(
self._stripImageData('<p><svg alt="uml diagram" title="Diagram test" class="uml">%s</svg></p>' % self.FAKE_SVG),
self._stripSvgData(self.md.convert(text)))
def test_arg_alt(self):
"""
Test for the correct parsing of the alt argument
"""
text = self.text_builder.diagram("A --> B").alt("Diagram test").build()
self.assertEqual(
self._stripImageData('<p><img alt="Diagram test" class="uml" src="data:image/png;base64,%s" title="" /></p>' % self.FAKE_IMAGE),
self._stripImageData(self.md.convert(text)))
def test_arg_alt_characters(self):
"""
Test for the correct parsing of the alt argument with special characters
"""
text = self.text_builder.diagram("A --> B").alt("Diagram-test/%&\"").build()
self.assertEqual(
self._stripImageData('<p><img alt="Diagram-test/%%&"" class="uml" src="data:image/png;base64,%s" title="" /></p>' % self.FAKE_IMAGE),
self._stripImageData(self.md.convert(text)))
def test_arg_alt_inline_svg(self):
"""
Test for setting alt attribute in inline SVG
"""
text = self.text_builder.diagram("A --> B").format("svg_inline").alt("Diagram test").build()
self.assertEqual(
self._stripImageData('<p><svg alt="Diagram test" title="" class="uml">%s</svg></p>' % self.FAKE_SVG),
self._stripSvgData(self.md.convert(text)))
def test_arg_classes(self):
"""
Test for the correct parsing of the classes argument
"""
text = self.text_builder.diagram("A --> B").classes("class1 class2").build()
self.assertEqual(
self._stripImageData('<p><img alt="uml diagram" class="class1 class2" src="data:image/png;base64,%s" title="" /></p>' % self.FAKE_IMAGE),
self._stripImageData(self.md.convert(text)))
def test_arg_classes_inline_svg(self):
"""
Test for setting class attribute in inline SVG
"""
text = self.text_builder.diagram("A --> B").format("svg_inline").classes("class1 class2").build()
self.assertEqual(
self._stripImageData('<p><svg alt="uml diagram" title="" class="class1 class2">%s</svg></p>' % self.FAKE_SVG),
self._stripSvgData(self.md.convert(text)))
def test_arg_format_png(self):
"""
Test for the correct parsing of the format argument, generating a png image
"""
text = self.text_builder.diagram("A --> B").format("png").build()
self.assertEqual(self._stripImageData(self._load_file('png_diag.html')),
self._stripImageData(self.md.convert(text)))
def test_arg_format_svg(self):
"""
Test for the correct parsing of the format argument, generating a svg image
"""
text = self.text_builder.diagram("A --> B").format("svg").build()
self.assertEqual(self._stripImageData(self._load_file('svg_diag.html')),
self._stripImageData(self.md.convert(text)))
def test_arg_format_svg_object(self):
"""
Test for the correct parsing of the format argument, generating a svg image
"""
text = self.text_builder.diagram("A --> B").format("svg_object").build()
self.assertEqual(self._stripImageData(self._load_file('svg_object_diag.html')),
self._stripImageData(self.md.convert(text)))
def test_arg_format_svg_inline(self):
"""
Test for the correct parsing of the format argument, generating a svg image
"""
text = self.text_builder.diagram("A --> B").format("svg_inline").build()
self.assertEqual(self._stripSvgData(self._load_file('svg_inline_diag.html')),
self._stripSvgData(self.md.convert(text)))
def test_arg_format_txt(self):
"""
Test for the correct parsing of the format argument, generating a txt image
"""
text = self.text_builder.diagram("A --> B").format("txt").build()
self.assertEqual(self._load_file('txt_diag.html'),
self.md.convert(text))
def test_arg_width(self):
"""
Test for the correct parsing of the width argument
"""
text = self.text_builder.diagram("A --> B").width("120px").build()
self.assertEqual(
self._stripImageData('<p><img alt="uml diagram" class="uml" src="data:image/png;base64,%s" style="max-width:120px" title="" width="100%%" /></p>' % self.FAKE_IMAGE),
self._stripImageData(self.md.convert(text)))
def test_arg_with_percent(self):
"""
Test for the correct parsing of the width argument
"""
text = self.text_builder.diagram("A --> B").width("70%").build()
self.assertEqual(
self._stripImageData('<p><img alt="uml diagram" class="uml" src="data:image/png;base64,%s" style="max-width:70%%" title="" width="100%%" /></p>' % self.FAKE_IMAGE),
self._stripImageData(self.md.convert(text)))
def test_arg_height(self):
"""
Test for the correct parsing of the width argument
"""
text = self.text_builder.diagram("A --> B").height("120px").build()
self.assertEqual(
self._stripImageData('<p><img alt="uml diagram" class="uml" src="data:image/png;base64,%s" style="max-height:120px" title="" width="100%%" /></p>' % self.FAKE_IMAGE),
self._stripImageData(self.md.convert(text)))
def test_arg_height_percent(self):
"""
Test for the correct parsing of the width argument
"""
text = self.text_builder.diagram("A --> B").height("50%").build()
self.assertEqual(
self._stripImageData('<p><img alt="uml diagram" class="uml" src="data:image/png;base64,%s" style="max-height:50%%" title="" width="100%%" /></p>' % self.FAKE_IMAGE),
self._stripImageData(self.md.convert(text)))
def test_arg_width_and_height(self):
"""
Test for the correct parsing of the width and height arguments
"""
text = self.text_builder.diagram("A --> B").width("120px").height("120px").build()
self.assertEqual(
self._stripImageData('<p><img alt="uml diagram" class="uml" src="data:image/png;base64,%s" style="max-width:120px;max-height:120px" title="" width="100%%" /></p>' % self.FAKE_IMAGE),
self._stripImageData(self.md.convert(text)))
def test_arg_format_width_svg_inline(self):
"""
Test for the correct parsing of the format argument, generating a svg image
"""
text = self.text_builder.diagram("A --> B").format("svg_inline").width("120px").build()
self.assertEqual(self._stripSvgData('<p><svg alt="uml diagram" title="" class="uml" style="max-width:120px">...svg-body...</svg></p>'),
self._stripSvgData(self.md.convert(text)))
def test_arg_format_height_svg_inline(self):
"""
Test for the correct parsing of the format argument, generating a svg image
"""
text = self.text_builder.diagram("A --> B").format("svg_inline").height("120px").build()
self.assertEqual(self._stripSvgData('<p><svg alt="uml diagram" title="" class="uml" style="max-height:120px">...svg-body...</svg></p>'),
self._stripSvgData(self.md.convert(text)))
def test_arg_format_width_and_height_svg_inline(self):
"""
Test for the correct parsing of the format argument, generating a svg image
"""
text = self.text_builder.diagram("A --> B").format("svg_inline").width('120px').height("120px").build()
self.assertEqual(self._stripSvgData('<p><svg alt="uml diagram" title="" class="uml" style="max-width:120px;max-height:120px">...svg-body...</svg></p>'),
self._stripSvgData(self.md.convert(text)))
def test_multidiagram(self):
"""
Test for the definition of multiple diagrams on the same document
- """
text = self.text_builder.text('Paragraph before.\n\n') \
.diagram('A --> B') \
.text('\nMiddle paragraph.\n\n') \
.diagram('A <- B') \
.text('\nParagraph after.\n\n') \
.build()
self.assertEqual(self._stripImageData(self._load_file('multiple_diag.html')),
self._stripImageData(self.md.convert(text)))
def test_other_fenced_code(self):
"""
Test the coexistence of diagrams and other fenced code
"""
text = self.text_builder.text('```bash\nls -l\n```\n') \
.text('\nA paragraph\n\n') \
.diagram('A --> B') \
.text('\nAnother paragraph\n') \
.build()
self.assertEqual(self._stripImageData(self._load_file('code_and_diag.html')),
self._stripImageData(self.md.convert(text)))
def test_indented_fenced_code(self):
"""
Test handling of indented fenced code
"""
text = self.text_builder.text('* list item\n\n') \
.indent(4) \
.diagram('A --> B') \
.build()
self.assertEqual(self._stripImageData('''<ul>
<li>
<p>list item</p>
<p><img alt="uml diagram" class="uml" src="data:image/png;base64,%s" title="" /></p>
</li>
</ul>''' % self.FAKE_IMAGE),
self._stripImageData(self.md.convert(text)))
def test_unicode_chars(self):
"""indented_code
Test that svg_inline handles correctly utf8 characters
"""
# Example diagram from issue 21
text = self.text_builder.diagram(u'Alicja -> Łukasz: "Zażółć gęślą jaźń"')\
.format("svg_inline")\
.build()
svg = self.md.convert(text)
self.assertTrue('Alicja' in svg)
self.assertTrue('Łukasz' in svg)
self.assertTrue('"Zażółć gęślą jaźń"' in svg)
```
|
{
"source": "jcrawford888/mbc",
"score": 4
}
|
#### File: mbc/mbc/mbc.py
```python
import requests
import html2text
import random
import os
import pickle
# Just a magic number to distinguish different formats
MAGIC = 0x1
# Encryption methods
# FAST - all characters lower cased
# FLEX - attempts to find a capital Letter in book, otherwise uses lower case. (default)
# STRICT - Match the message exactly, fails if can't find capitals
METHOD_FAST = 1
METHOD_FLEX = 2
METHOD_STRICT = 3
def encrypts(message=None, url=None, method=METHOD_FLEX):
"""
Encrypt the message string specified in the parameters. Use the contents of the url as the 'book' for
encrypting the contents. NOTE: html tags are removed from the book contents and all text is lower-cased.
:param message: parameter containing the message to encrypt
:param url: the url pointing to a page of text that will be used as the 'book'
:param method: encryption method
:return: array of integer offsets into the 'book'
"""
if not message:
raise ValueError("Missing message to encrypt")
if not url:
raise ValueError("Missing url")
try:
book = _prepare_book(url, method)
except Exception:
raise
def _encode_char(ch, book, offsetmap):
attempts = 0
while True:
# find a random offset in the book then search forward to find the character
offset = random.randint(0, len(book) - 1)
if ch != book[offset]:
# hunt forward for a character (loop to beginning if not found)
# abort if we looped and hit this index again
offsetorig = offset
looped = False
while True:
offset = offset + 1
if offset >= len(book):
looped = True
offset = 0
if looped and offset >= offsetorig:
# looped once and couldn't find a character in the book to encode the message character, abort
return -1
if ch == book[offset]:
# found the character we need
break
# make sure we don't use the same offset more than once in the encoded message
if offset not in offsetmap:
return offset
attempts += 1
if attempts >= 3:
# prevent infinite loop if char only appears once in the book
return -1
offsetmap = {}
offsets = []
for ch in message:
if method == METHOD_FAST:
ch = ch.lower()
bk_offset = _encode_char(ch, book, offsetmap)
if bk_offset < 0 and method == METHOD_FLEX:
# try again with lower cased char
bk_offset = _encode_char(ch.lower(), book, offsetmap)
if bk_offset < 0:
raise ValueError(
f"Could not find a character in the book to encode the message character '{ch}'."
)
offsetmap[bk_offset] = 1
offsets.append(bk_offset)
return offsets
def encrypt(infilename=None, url=None, method=METHOD_FLEX):
"""
Encrypt the contents of the file specified in the parameters. Use the contents of the url as the 'book' for
encrypting the contents. NOTE: html tags are removed from the book contents and all text is lower-cased.
:param infilename: file containing the message to encrypt
:param url: the url pointing to a page of text that will be used as the 'book'
:param method: encryption method
:return: array of integer offsets into the 'book'
"""
if not url:
raise ValueError("Missing url")
if not infilename:
raise ValueError("Missing input file")
fh = open(infilename, "r")
lines = []
for line in fh:
lines.append(line)
fh.close()
message = " ".join(lines)
return encrypts(message, url, method)
def decrypts(cipher_arr=None, url=None, method=METHOD_FLEX):
"""
Decrypt an array of cipher offsets that has been encoded using mbc.
:param cipher_arr: The cipher array encrypted using the encryption routine
:param url: same url as used by the encryption routine
:param method: encryption method
:return: string containing the decrypted message
"""
if not url:
raise ValueError("Missing url")
if not cipher_arr:
raise ValueError("Missing cipher array")
try:
book = _prepare_book(url, method)
except Exception:
raise
if len(book) == 0:
raise ValueError("Book is empty")
message_arr = []
for offset in cipher_arr:
if offset >= len(book) or offset < 0:
raise ValueError(f"Bad offset ({offset}). Out of bounds for this book")
message_arr.append(book[offset])
message = "".join(message_arr)
return message
def decrypt(infilename=None, url=None):
"""
Decrypt a file that has been encoded using mbc.
:param infilename: The cipher text file encrypted using the encryption routine
:param url: same url as used by the encryption routine
:return: string containing the decrypted message
"""
if not url:
raise ValueError("Missing url")
if not infilename:
raise ValueError("Missing input file")
if not os.path.isfile(infilename):
raise ValueError(f"Unable to open file: {infilename}")
with open(infilename, "rb") as fh:
header = pickle.load(fh)
if not header or header != "mbc":
raise ValueError("Invalid mbc cipher file")
# We may use this later if we have different versions of the output format
magicnum = pickle.load(fh)
if magicnum != MAGIC:
raise ValueError("Bad Magic Number")
# Get the encryption method used to encrypt the file
method = pickle.load(fh)
# load the cipher text offsets
cipher_arr = pickle.load(fh)
return decrypts(cipher_arr, url, method)
def _prepare_book(url_link, method=METHOD_FLEX):
# try to get the data from the url
raw = requests.get(url_link)
handler = html2text.HTML2Text()
handler.ignore_images = True
handler.ignore_links = True
handler.ignore_tables = True
# extract the text from the url (e.g. 'book')
if method == METHOD_FAST:
clean = handler.handle(raw.text).lower()
else:
clean = handler.handle(raw.text)
return clean
def write(outfile, data, method=METHOD_FLEX):
"""
Routine to write out the encrypted data to a file
convert the ints to a binary format and put a small header in the front. e.g.) mbc
:param outfile: the name of the file to write the encrypted contents into
:param data: array of offsets to write (encrypted message)
:param method: algorithm identifier (e.g fast, flex, strict)
:return: None
"""
if os.path.isfile(outfile):
raise ValueError(f"File {outfile} already exists. aborting.")
fh = open(outfile, "wb")
# write the header
pickle.dump("mbc", fh)
# write the magic number
pickle.dump(MAGIC, fh)
# write the algorithm id used to encrypt the data
pickle.dump(method, fh)
# write the data
pickle.dump(data, fh)
fh.close()
```
|
{
"source": "JC-R/cluster-semantic-vectors",
"score": 3
}
|
#### File: JC-R/cluster-semantic-vectors/cluster_vectors.py
```python
from __future__ import division
from sklearn.cluster import MiniBatchKMeans
from numbers import Number
from pandas import DataFrame
import sys, codecs, numpy, json
class autovivify_list(dict):
'''Pickleable class to replicate the functionality of collections.defaultdict'''
def __missing__(self, key):
value = self[key] = []
return value
def __add__(self, x):
'''Override addition for numeric types when self is empty'''
if not self and isinstance(x, Number):
return x
raise ValueError
def __sub__(self, x):
'''Also provide subtraction method'''
if not self and isinstance(x, Number):
return -1 * x
raise ValueError
def build_word_vector_matrix(vector_file, n_words):
'''Iterate over the GloVe array read from sys.argv[1] and return its vectors and labels as arrays'''
numpy_arrays = []
labels_array = []
with codecs.open(vector_file, 'r', 'latin1') as f:
for c, r in enumerate(f):
sr = r.lower().split()
if len(sr) != 301:
continue
try:
labels_array.append(sr[0])
numpy_arrays.append( numpy.array([float(i) for i in sr[1:]]) )
except ValueError:
print c, len(sr)
if c == n_words:
return numpy.array( numpy_arrays ), labels_array
return numpy.array( numpy_arrays ), labels_array
def find_word_clusters(labels_array, cluster_labels):
'''Read in the labels array and clusters label and return the set of words in each cluster'''
cluster_to_words = autovivify_list()
for c, i in enumerate(cluster_labels):
cluster_to_words[ str(i) ].append( labels_array[c] )
return cluster_to_words
if __name__ == "__main__":
input_vector_file = sys.argv[1]
n_words = int(sys.argv[2])
reduction_factor = float(sys.argv[3])
clusters_to_make = int( n_words * reduction_factor )
df, labels_array = build_word_vector_matrix(input_vector_file, n_words)
kmeans_model = MiniBatchKMeans(init='k-means++', n_clusters=clusters_to_make)
kmeans_model.fit(df)
cluster_labels = kmeans_model.labels_
cluster_inertia = kmeans_model.inertia_
cluster_to_words = find_word_clusters(labels_array, cluster_labels)
with open("glove_clusters_" + str(sys.argv[2]) + "_words.json",'w') as json_out:
json.dump(cluster_to_words, json_out)
'''
for c in cluster_to_words:
print cluster_to_words[c]
print "\n"
'''
```
|
{
"source": "jcrd/devlog",
"score": 3
}
|
#### File: devlog/devlog/config.py
```python
from configparser import ConfigParser
from pathlib import Path
CONFIG_NAME = ".devlog.conf"
DEFAULT_CONFIG = """# [options]
# auto_push = true
# title =
"""
class Config(ConfigParser):
"""
A configuration object.
"""
@property
def auto_push(self):
"""
The state of the auto push option.
:return: `True` if set, otherwise `False`
"""
return self.get("options", "auto_push", fallback=True)
def new_config(path, init=False):
"""
Create a new config object.
:param path: Path to config file to read
"param init: Initialize config file if `True`, defaults to `False`
:return: A `ConfigParser` object
"""
path = Path(path, CONFIG_NAME).resolve()
config = Config()
if path.is_file():
config.read(path)
elif init:
path.write_text(DEFAULT_CONFIG)
return config
```
#### File: devlog/devlog/editor.py
```python
import os
import subprocess
import shutil
DEFAULT_METADATA = """---
title: {title} | {date}
date: {date}
author: {author}
---
"""
class Editor:
"""
An editor interface.
:param config: Config object
:param cmd: Editor command. Falls back to the environment variables \
`DEVLOG_EDITOR` or `EDITOR`, then the command "vim"
"""
def __init__(self, config, cmd=None):
self.config = config
if cmd:
self.cmd = cmd
else:
self.cmd = os.environ.get("DEVLOG_EDITOR")
if not self.cmd:
self.cmd = os.environ.get("EDITOR", "vim")
if not shutil.which(self.cmd):
raise FileNotFoundError(self.cmd)
def _set_config_opt(self, kwargs, opt, value):
kwargs[opt] = self.config.get("options", opt, fallback=kwargs.get(opt, value))
def edit(self, file, **kwargs):
"""
Edit file with editor.
:param file: Pathlib-based file path
:param \**kwargs: Keyword arguments corresponding to file metadata
"""
if not file.is_file():
self._set_config_opt(kwargs, "title", "unknown")
file.write_text(DEFAULT_METADATA.format(**kwargs))
subprocess.run(self.cmd.split() + [str(file)], check=True)
```
#### File: devlog/devlog/__main__.py
```python
from subprocess import CalledProcessError
import argparse
import sys
from devlog.config import new_config
from devlog.editor import Editor
from devlog.repo import GitRepo
def get_repo(path):
try:
repo = GitRepo(path)
except FileNotFoundError:
sys.stderr.write("Repo uninitialized; run `devlog init`\n")
sys.exit(1)
return repo
def cmd_init(args):
print("Initializing git repo...")
GitRepo(args.directory, init=True)
print("Initializing config file...")
new_config(args.directory, init=True)
def cmd_remote(args):
repo = get_repo(args.directory)
repo.set_remote(args.url)
def cmd_push(args):
repo = get_repo(args.directory)
try:
if repo.push() == GitRepo.PushStatus.NO_REMOTE:
sys.stderr.write("No remote; run `devlog remote <URL>`\n")
sys.exit(1)
except GitRepo.PushError as err:
sys.stderr.write(str(err))
sys.exit(1)
def cmd_pull(args):
repo = get_repo(args.directory)
try:
if not repo.pull():
sys.stderr.write("No remote; run `devlog remote <URL>`\n")
sys.exit(1)
except CalledProcessError as err:
sys.stderr.write(err.stderr)
sys.exit(err.returncode)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--directory", default=".", help="Path to dev directory")
parser.add_argument("-e", "--editor", help="Editor to use")
subp = parser.add_subparsers(title="commands", dest="cmd")
initp = subp.add_parser("init", help="Initialize repo")
initp.set_defaults(cmd=cmd_init)
remotep = subp.add_parser("remote", help="Set remote URL")
remotep.add_argument("url", help="Remote URL")
remotep.set_defaults(cmd=cmd_remote)
pushp = subp.add_parser("push", help="Update remote")
pushp.set_defaults(cmd=cmd_push)
pullp = subp.add_parser("pull", help="Pull from remote")
pullp.set_defaults(cmd=cmd_pull)
args = parser.parse_args()
if args.cmd:
args.cmd(args)
sys.exit()
config = new_config(args.directory)
repo = get_repo(args.directory)
if config.auto_push:
try:
repo.auto_push()
except GitRepo.PushError as err:
sys.stderr.write(str(err))
sys.exit(1)
try:
editor = Editor(config, cmd=args.editor)
except FileNotFoundError as error:
sys.stderr.write("Command not found: {}\n".format(error.args[0]))
sys.exit(2)
try:
repo.edit_today(editor)
except GitRepo.CommitError as err:
sys.stderr.write(str(err))
sys.exit(1)
if __name__ == "__main__":
main()
```
#### File: devlog/devlog/repo.py
```python
from datetime import date, datetime
from enum import Enum, auto
from pathlib import Path
import subprocess
GIT_REPO = ".devlog"
GIT_BRANCH = "devlog"
class GitRepo:
"""
A git repo interface.
:param path: Path to directory containing git repo
"param init: Initialize git repo if `True`, defaults to `False`
"""
DATE_FORMAT = "%Y-%m-%d"
class CommitError(Exception):
"""
An exception raised when committing fails.
"""
class PushError(Exception):
"""
An exception raised when updating remote fails.
"""
class CommitStatus(Enum):
"""
Status of commit made after editing an entry.
"""
NEW = auto()
AMEND = auto()
class PushStatus(Enum):
"""
Status of push command.
"""
NO_REMOTE = auto()
INACTION = auto()
DRY_RUN = auto()
SUCCESS = auto()
def __init__(self, path, init=False):
path = Path(path).resolve()
self.name = path.name
self.path = Path(path, GIT_REPO).resolve(strict=not init)
if init:
if not self.path.is_dir():
self.path.mkdir()
if not Path(self.path, ".git").is_dir():
self._git("init", "-q")
self._git("checkout", "--orphan", GIT_BRANCH)
def _git(self, *args, check=True, **kwargs):
return subprocess.run(
["git", "-C", str(self.path)] + list(args), check=check, **kwargs
)
def _git_dry_run(self, *args):
return self._git(
*args, "--dry-run", check=False, capture_output=True, text=True
)
def _username(self):
ret = self._git(
"config", "user.name", check=False, capture_output=True, text=True
)
if ret.returncode != 0:
return "unknown"
return ret.stdout.rstrip()
def last_commit_date(self):
"""
Get date of last commit.
:return: Date of last commit
"""
ret = self._git(
"log",
"-1",
"--pretty=format:%B",
check=False,
capture_output=True,
text=True,
)
if ret.returncode == 128:
return None
return datetime.strptime(ret.stdout.rstrip(), self.DATE_FORMAT).date()
def _check_remote(self):
return (
self._git(
"remote", "get-url", "origin", check=False, capture_output=True
).returncode
!= 2
)
def edit_today(self, editor, today=None):
"""
Edit entry for current or specified date.
:param editor: Editor instance
:param today: Date of entry to edit, defaults to current date
:raises CommitError: Raised when committing fails
:return: Commit status, one of `GitRepo.CommitStatus`
"""
if not today:
today = date.today()
path = Path(self.path, str(today.year), str(today.month))
file = Path(path, str(today.day) + ".md")
path.mkdir(parents=True, exist_ok=True)
datefmt = today.strftime(self.DATE_FORMAT)
editor.edit(file, title=self.name, date=datefmt, author=self._username())
self._git("add", str(file))
ret = self._git_dry_run("commit")
if ret.returncode == 1:
raise self.CommitError(ret.stdout.rstrip())
cmd = ["commit", "-m", datefmt]
status = self.CommitStatus.NEW
if self.last_commit_date() == today:
cmd.append("--amend")
status = self.CommitStatus.AMEND
self._git(*cmd)
return status
def set_remote(self, url):
"""
Set remote URL.
:param url: The remote URL
"""
self._git("remote", "add", "origin", url)
def push(self, dry_run=False):
"""
Update remote.
:param dry_run: If `True`, don't update remote, only return status
:raises PushError: Raised when updating remote fails
:return: Push status, one of `GitRepo.PushStatus`
"""
if not self._check_remote():
return self.PushStatus.NO_REMOTE
if dry_run:
return self.PushStatus.DRY_RUN
ret = self._git_dry_run("push", "origin")
if ret.returncode > 0:
raise self.PushError(ret.stderr.rstrip())
if not dry_run:
self._git("push", "origin")
return self.PushStatus.SUCCESS
def auto_push(self, today=None, dry_run=False):
"""
Automatically update remote if the last commit date is before today.
:param today: Date to use as condition, defaults to current date
:param dry_run: If `True`, don't update remote, only return status
:raises PushError: Raised when updating remote fails
:return: Push status, one of `GitRepo.PushStatus`
"""
if not today:
today = date.today()
last_date = self.last_commit_date()
if last_date and last_date < today:
return self.push(dry_run=dry_run)
return self.PushStatus.INACTION
def pull(self):
"""
Pull from remote.
:raises CalledProcessError: Raised when pull command fails
:return: `True` if pulling succeeds, `False` otherwise
"""
if not self._check_remote():
return False
self._git("pull", "origin", GIT_BRANCH, capture_output=True, text=True)
return True
```
#### File: devlog/test/test_repo.py
```python
from datetime import date
import tempfile
import unittest
from devlog.repo import GitRepo
class RepoCtx:
def __init__(self):
self.dir = tempfile.TemporaryDirectory()
self.repo = GitRepo(self.dir.name, init=True)
self.repo._git("config", "user.email", "<EMAIL>")
self.repo._git("config", "user.name", "<NAME>")
def __enter__(self):
return self.repo
def __exit__(self, *_):
self.dir.cleanup()
class Editor:
def __init__(self):
self.text = ""
def edit(self, file, **_):
file.write_text(self.text)
class TestGitRepo(unittest.TestCase):
def setUp(self):
self.editor = Editor()
self.yesterday = date(2021, 1, 1)
self.today = date(2021, 1, 2)
def test_commit_status(self):
with RepoCtx() as repo:
def assert_status(text, today, status):
self.editor.text = text
ret = repo.edit_today(self.editor, today=today)
self.assertEqual(ret, status)
assert_status("1", self.yesterday, GitRepo.CommitStatus.NEW)
assert_status("1", self.today, GitRepo.CommitStatus.NEW)
with self.assertRaises(GitRepo.CommitError):
repo.edit_today(self.editor, today=self.today)
assert_status("1 2", self.today, GitRepo.CommitStatus.AMEND)
assert_status("1 2 3", self.today, GitRepo.CommitStatus.AMEND)
def test_push_status(self):
with RepoCtx() as repo:
ret = repo.push(dry_run=True)
self.assertEqual(ret, GitRepo.PushStatus.NO_REMOTE)
repo.set_remote("https://github.com/jones/test.git")
with self.assertRaises(GitRepo.PushError):
repo.push()
def test_last_commit_date(self):
with RepoCtx() as repo:
self.assertIsNone(repo.last_commit_date())
repo.edit_today(self.editor, today=self.today)
self.assertEqual(repo.last_commit_date(), self.today)
def test_auto_push(self):
with RepoCtx() as repo:
repo.set_remote("https://github.com/jones/test.git")
repo.edit_today(self.editor, today=self.yesterday)
ret = repo.auto_push(today=self.today, dry_run=True)
self.assertEqual(ret, GitRepo.PushStatus.DRY_RUN)
repo.edit_today(self.editor, today=self.today)
ret = repo.auto_push(today=self.today, dry_run=True)
self.assertEqual(ret, GitRepo.PushStatus.INACTION)
```
|
{
"source": "jcrd/lifelight",
"score": 3
}
|
#### File: python/samples/rotating-block-generator.py
```python
from samplebase import SampleBase
import math
def scale_col(val, lo, hi):
if val < lo:
return 0
if val > hi:
return 255
return 255 * (val - lo) / (hi - lo)
def rotate(x, y, sin, cos):
return x * cos - y * sin, x * sin + y * cos
class RotatingBlockGenerator(SampleBase):
def __init__(self, *args, **kwargs):
super(RotatingBlockGenerator, self).__init__(*args, **kwargs)
def run(self):
cent_x = self.matrix.width / 2
cent_y = self.matrix.height / 2
rotate_square = min(self.matrix.width, self.matrix.height) * 1.41
min_rotate = cent_x - rotate_square / 2
max_rotate = cent_x + rotate_square / 2
display_square = min(self.matrix.width, self.matrix.height) * 0.7
min_display = cent_x - display_square / 2
max_display = cent_x + display_square / 2
deg_to_rad = 2 * 3.14159265 / 360
rotation = 0
# Pre calculate colors
col_table = []
for x in range(int(min_rotate), int(max_rotate)):
col_table.insert(x, scale_col(x, min_display, max_display))
offset_canvas = self.matrix.CreateFrameCanvas()
while True:
rotation += 1
rotation %= 360
# calculate sin and cos once for each frame
angle = rotation * deg_to_rad
sin = math.sin(angle)
cos = math.cos(angle)
for x in range(int(min_rotate), int(max_rotate)):
for y in range(int(min_rotate), int(max_rotate)):
# Our rotate center is always offset by cent_x
rot_x, rot_y = rotate(x - cent_x, y - cent_x, sin, cos)
if x >= min_display and x < max_display and y >= min_display and y < max_display:
x_col = col_table[x]
y_col = col_table[y]
offset_canvas.SetPixel(rot_x + cent_x, rot_y + cent_y, x_col, 255 - y_col, y_col)
else:
offset_canvas.SetPixel(rot_x + cent_x, rot_y + cent_y, 0, 0, 0)
offset_canvas = self.matrix.SwapOnVSync(offset_canvas)
# Main function
if __name__ == "__main__":
rotating_block_generator = RotatingBlockGenerator()
if (not rotating_block_generator.process()):
rotating_block_generator.print_help()
```
|
{
"source": "jcrd/python-pkgbuilder",
"score": 2
}
|
#### File: python-pkgbuilder/pkgbuilder/aur.py
```python
from pathlib import Path
from subprocess import run
import json
import logging
import tarfile
import urllib.request
log = logging.getLogger('pkgbuilder.aur')
class GitRepo:
"""
A local git repository.
:param path: Path to local repository
"""
def __init__(self, path):
self.path = Path(path)
def _git(self, cmd):
return 'git -C {} {}'.format(str(self.path), cmd).split()
def is_repo(self):
return run(self._git('status')).returncode == 0
def up_to_date(self):
"""
Check if the repository is up-to-date.
:return: `True` if up-to-date, `False` otherwise
:raises CalledProcessError: Raised if the git command fails
"""
run(self._git('fetch origin master'), check=True)
r = run(self._git('rev-list HEAD..origin/master --count'), check=True)
return r.stdout == '0'
def pull(self):
"""
Run git pull in this repository.
:raises CalledProcessError: Raised if the git command fails
"""
run(self._git('pull'), check=True)
def clone(self, url):
"""
Clone a git repository to self.path.
:param url: Git repository URL
:raises FileExistsError: Raised if self.path already exists
:raises CalledProcessError: Raised if the git command fails
"""
if self.path.exists():
raise FileExistsError
run(['git', 'clone', '--depth=1', url, str(self.path)], check=True)
class AurPackage:
"""
A package found on the AUR that can be downloaded or cloned via git.
:param info: Package info
:param url: URL to the AUR interface
"""
def __init__(self, info, url):
self.info = info
self.url = url
self.name = info['Name']
self.urlpath = url + info['URLPath']
self.giturl = url + '/{}.git'.format(self.name)
def download(self, dest):
"""
Download and extract package snapshot to given destination.
:param dest: Extraction destination
"""
with urllib.request.urlopen(self.urlpath) as r:
with tarfile.open(fileobj=r, mode='r:gz') as t:
log.info('%s: Downloading AUR snapshot to %s...',
self.name, dest)
prefix = self.name + '/'
members = []
for m in t.getmembers():
if m.name.startswith(prefix):
m.name = m.name[len(prefix):]
members.append(m)
t.extractall(dest, members)
def git_clone(self, dest):
"""
Clone the AUR package's git repository to given destination.
:param dest: Local repository destination
:raises FileExistsError: Raised if the destination already exists
:raises CalledProcessError: Raised if the git command fails
"""
GitRepo(dest).clone(self.giturl)
class Aur:
"""
Wrapper around the AUR RPC interface with caching for package info.
See https://aur.archlinux.org/rpc.php.
:param url: URL providing the RPC interface, defaults to \
https://aur.archlinux.org
"""
def __init__(self, url='https://aur.archlinux.org'):
self.url = url
self.rpc = url + '/rpc/?v=5&type=info'
self.cache = {}
def infos(self, *names):
"""
Get info about AUR packages.
:param names: Positional arguments specifying package names
:return: A dictionary mapping names to info
"""
res = {}
args = ''
for name in names:
if name in self.cache:
res[name] = self.cache[name]
else:
args += '&arg[]={}'.format(name)
if not args:
return res
with urllib.request.urlopen(self.rpc + args) as r:
s = r.read().decode('utf-8')
for pkg in json.loads(s)['results']:
name = pkg['Name']
self.cache[name] = pkg
res[name] = pkg
return res
def info(self, name):
"""
Get info about an AUR package.
:param name: Name of the AUR package
:return: Package info or None if name is not found
"""
try:
return self.infos(name)[name]
except KeyError:
return None
def get_package(self, name):
"""
Get an AurPackage that can be downloaded.
:param name: Name of the AUR package
:return: an AurPackage or None if name is not found
"""
i = self.info(name)
if i:
return AurPackage(i, self.url)
```
#### File: python-pkgbuilder/test/common.py
```python
from pathlib import Path
test1_pkg = 'test1-1-1-any.pkg.tar.xz'
test1_dep1_pkg = 'test1-dep1-1-1-any.pkg.tar.xz'
test1_makedep1_pkg = 'test1-makedep1-1-1-any.pkg.tar.xz'
localdir = str(Path(__file__).parent) + '/pkgbuilds'
chrootdir = '/var/lib/pkgbuilder'
def pkgnames(pkgs):
return [str(Path(p).name) for p in pkgs]
```
#### File: python-pkgbuilder/test/test_utils.py
```python
from pathlib import Path
from shutil import copytree
from tempfile import TemporaryDirectory
import os
import unittest
from pkgbuilder.utils import synctree
class TestSynctree(unittest.TestCase):
def setUp(self):
self.seed = TemporaryDirectory()
self.tmp = TemporaryDirectory()
self.dest = Path(self.tmp.name, 'synctree')
self.dir = Path(self.seed.name, 'dir')
os.mkdir(self.dir)
def echo(contents, *args):
with open(Path(self.seed.name, *args), 'w') as f:
f.write(contents)
echo('before', 'file1')
echo('before', 'file2')
echo('before', 'file3')
echo('before', self.dir, 'file1')
echo('before', self.dir, 'file2')
echo('before', self.dir, 'file3')
copytree(self.seed.name, self.dest)
echo('after', 'file1')
echo('after', self.dir, 'file1')
echo('new', 'file4')
echo('new', self.dir, 'file4')
def test_synctree(self):
synctree(self.seed.name, self.dest)
with open(Path(self.dest, 'file1')) as f:
content = list(f)
self.assertEqual(content[0], 'after')
with open(Path(self.dest, self.dir, 'file1')) as f:
content = list(f)
self.assertEqual(content[0], 'after')
self.assertTrue(Path(self.dest, 'file4').exists())
self.assertTrue(Path(self.dest, self.dir, 'file4').exists())
def tearDown(self):
self.seed.cleanup()
self.tmp.cleanup()
```
|
{
"source": "jcredi/article-titles-generator",
"score": 3
}
|
#### File: jcredi/article-titles-generator/lstmtrainer.py
```python
from __future__ import print_function
import numpy as np
import random
import os
import sys
import argparse
from keras.models import Sequential
from keras.layers import LSTM, Dense, Activation
from keras.optimizers import RMSprop
__author__ = "<NAME>"
__license__ = "MIT"
__version__ = "1.4.1-rc"
__email__ = "<EMAIL>"
parser = argparse.ArgumentParser(description='Trains an LSTM for scientific paper titles generation.')
parser.add_argument('--dataDumpPath', dest='dump_file_path', default=os.getcwd()+'/data/dump.csv', help='Path to text corpus file. Default: "<current_working_directory>/data_dump/arxiv_dump.csv"')
parser.add_argument('--sequenceLength', dest='seq_length', type=int, default=40, help='Length of sequences to be extracted from the corpus. Default: 40')
parser.add_argument('--step', dest='step', type=int, default=3, help='Steps by which the corpus will be cut into sequences. Default: 3')
parser.add_argument('--lstmUnits', dest='lstm_units', type=int, default=128, help='Number of LSTM units. Default: 128')
parser.add_argument('--epochs', dest='epochs', type=int, default=60, help='Number of training epochs. Default: 60')
parser.add_argument('--learningRate', dest='learning_rate', type=float, default=0.01, help='Learning rate used by the RMSprop optimizer. Default: 0.01')
parser.add_argument('--batchSize', dest='batch_size', type=int, default=128, help='Mini-batch size. Default: 128')
parser.add_argument('--temperature', dest='temperature', type=float, default=0.5, help='Controls the randomness level in the generation of new sequences. Default: 0.5')
parser.add_argument('--outputSequenceLength', dest='output_seq_length', type=int, default=400, help='Length of the generated text to be displayed after each epoch. Default: 400')
args = parser.parse_args()
# retrieve dumped data
titles = []
with open(args.dump_file_path, 'r') as data_dump_file:
for line in data_dump_file:
titles.append(line.rstrip())
corpus = '. '.join(titles)
print('Corpus length:', len(corpus))
chars = sorted(list(set(corpus)))
print('Total chars:', len(chars))
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
# cut the text in semi-redundant sequences of maxlen characters
maxlen = args.seq_length
step = args.step
sequences = []
next_chars = []
for i in range(0, len(corpus) - maxlen, step):
sequences.append(corpus[i: i + maxlen])
next_chars.append(corpus[i + maxlen])
print('Number of sequences:', len(sequences))
print('One-hot vectorization...')
X = np.zeros((len(sequences), maxlen, len(chars)), dtype=np.bool)
y = np.zeros((len(sequences), len(chars)), dtype=np.bool)
for i, sequence in enumerate(sequences):
for t, char in enumerate(sequence):
X[i, t, char_indices[char]] = 1
y[i, char_indices[next_chars[i]]] = 1
# build the model: a single LSTM
print('Build model...')
model = Sequential()
model.add(LSTM(args.lstm_units, input_shape=(maxlen, len(chars))))
model.add(Dense(len(chars)))
model.add(Activation('softmax'))
optimizer = RMSprop(lr=args.learning_rate)
model.compile(loss='categorical_crossentropy', optimizer=optimizer)
def sample(preds, temperature=1.0):
# helper function to sample an index from a probability array
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)
# train the model, output generated text after each epoch
for epoch in range(1, args.epochs):
print()
print('-' * 50)
print('Epoch ', epoch)
model.fit(X, y, batch_size=args.batch_size, nb_epoch=1)
start_index = random.randint(0, len(corpus) - maxlen - 1)
generated = ''
sequence = corpus[start_index: start_index + maxlen]
generated += sequence
print('----- Generating with seed: "' + sequence + '"')
sys.stdout.write(generated)
for i in range(args.output_seq_length):
x = np.zeros((1, maxlen, len(chars)))
for t, char in enumerate(sequence):
x[0, t, char_indices[char]] = 1.
preds = model.predict(x, verbose=0)[0]
next_index = sample(preds, args.temperature)
next_char = indices_char[next_index]
generated += next_char
sequence = sequence[1:] + next_char
sys.stdout.write(next_char)
sys.stdout.flush()
print()
```
|
{
"source": "jcreigh/pysurfshark",
"score": 2
}
|
#### File: pysurfshark/surfshark/common.py
```python
import json
from collections import UserDict
class JsonObject(UserDict):
def __init__(self, data):
super().__init__(data)
def __getattr__(self, attr):
if attr in self.data:
return self.data[attr]
return self.__getattribute__(attr)
#def __setattr__(self, attr, value):
# if attr != "data" and attr in self.data:
# self.data[attr] = value
# else:
# object.__setattr__(self, attr, value)
def getMainPublicKey():
import subprocess
try:
main_pubkey = subprocess.check_output(["dig", "+short", "TXT", "wgs.prod.surfshark.com"]).strip()[13:-2].decode()
except FileNotFoundError:
try:
output = subprocess.check_output(["nslookup", "-q=txt", "wgs.prod.surfshark.com"])
main_pubkey = output.split(b'public_key', 1)[1].split(b']', 1)[0][1:]
except FileNotFoundError:
raise RuntimeError('Could not lookup Surfshark\'s main public key. Install dig or nslookup')
return main_pubkey
```
#### File: pysurfshark/surfshark/ConnectionInfo.py
```python
from .common import JsonObject
def ConnectionInfo(JsonObject):
def __init__(self, data):
super().__init__(data)
def __str__(self):
out = f"ConnectionInfo(ip={self.ip}, isp={self.isp}, countryCode={self.countryCode}, country={self.country}, "
out += f"city={self.city}, secured={self.secured}, restricted={self.restricted})"
return out
```
|
{
"source": "jcreighton669/ProjectEuler100-Challenge",
"score": 4
}
|
#### File: ProjectEuler100-Challenge/003: Largest Prime/LargestPrimeFactor.py
```python
def main():
num = int(input("Enter the number to get its largest prime factor: "))
print(largest_factor(num))
def largest_factor(number):
largest_prime = 0
factors = []
prime_factors = []
for i in range(2, number):
if number % i == 0:
factors.append(i)
for num in factors:
if is_prime(num) == True and num > largest_prime:
prime_factors.append(num)
# print(prime_factors)
return max(prime_factors)
def is_prime(factor):
prime = True
for i in range(2, factor):
if factor % i == 0:
prime = False
break
i += 1
return prime
main()
```
#### File: ProjectEuler100-Challenge/004: Palindromic Product/LargestPalindromicProduct.py
```python
import math
def main():
digits = int(input("Enter number of digits for multiplier: "))
count = 0
first_value = 0
second_value = 0
product = 0
palindromic_product = 0
lower_limit = int(math.pow(10, digits - 1))
upper_limit = int(math.pow(10, digits))
for i in range(lower_limit, upper_limit):
for j in range(lower_limit, upper_limit):
product = i * j
if isPalindrome(product):
count += 1
first_value = i
second_value = j
palindromic_product = product
print(first_value, "*", second_value, "=", palindromic_product)
print(count)
def isPalindrome(product):
remain = 0
reverse_product = 0
temp = product
palindrome = False
while temp != 0:
remain = temp % 10
reverse_product = reverse_product * 10 + remain
temp = int(temp / 10)
if product == reverse_product:
palindrome = True
return palindrome
main()
```
#### File: ProjectEuler100-Challenge/007: 10001st Prime/NthPrime.py
```python
import math
import sys
def main():
count = 0
nth = int(input("Enter which prime you want: "))
nthPrime = 0
max_int = sys.maxsize
for i in range(2, max_int):
isPrime = is_prime(i)
if isPrime:
count += 1
nthPrime = i
if nth == count:
break
print(nthPrime, "is the", nth, "prime number")
def is_prime(factor):
prime = True
for i in range(2, factor):
if factor % i == 0:
prime = False
break
i += 1
return prime
main()
```
#### File: ProjectEuler100-Challenge/010: Summation of Primes/SummationOfPrimes.py
```python
def primes(number):
prime = []
sum = 0
for i in range(number):
prime.append(True)
for p in range(2, number):
if prime[p] == True:
for i in range(p * 2, number):
prime[i] = False
for i in range(2, number):
if prime[i] == True:
sum += i
return sum
def main():
upper_limit = int(input("Enter an upper limit: "))
print(primes(upper_limit))
main()
```
|
{
"source": "jcreinhold/counterfactualms",
"score": 2
}
|
#### File: counterfactualms/arch/hierarchical.py
```python
from functools import partial
import numpy as np
import torch
from torch import nn
from counterfactualms.arch.layers import Conv2d, ConvTranspose2d
class HierarchicalEncoder(nn.Module):
def __init__(self, num_convolutions=3, filters=(16,32,64,128,256), latent_dim=100,
input_size=(1,128,128), use_weight_norm=False, use_spectral_norm=False,
hierarchical_layers=(1,3,5), div_factor=8):
super().__init__()
self.num_convolutions = num_convolutions
self.filters = filters
if use_weight_norm and use_spectral_norm:
raise ValueError('Cannot use both weight norm and spectral norm.')
self.use_weight_norm = use_weight_norm
self.use_spectral_norm = use_spectral_norm
self.hierarchical_layers = hierarchical_layers
self.div_factor = div_factor
self.down_layers = nn.ModuleList([])
self.resolution_layers = nn.ModuleList([])
self.intermediate_shapes = []
self.out_layers = nn.ModuleList([])
cur_channels = input_size[0]
for i, c in enumerate(filters):
resolution_layer = []
for _ in range(0, num_convolutions - 1):
resolution_layer += self._conv_layer(cur_channels, c)
cur_channels = c
self.resolution_layers.append(nn.Sequential(*resolution_layer))
if i in self.hierarchical_layers:
out_channels = max(cur_channels // div_factor, 1)
self.out_layers.append(self._conv(cur_channels, out_channels, 1, bias=True))
self.intermediate_shapes.append(np.array(input_size) // (2 ** i))
self.intermediate_shapes[-1][0] = out_channels
self.down_layers.append(nn.Sequential(*self._down_conv_layer(cur_channels, c)))
cur_channels = c
if len(filters) in self.hierarchical_layers:
self.intermediate_shapes.append(np.array(input_size) // (2 ** len(filters)))
self.intermediate_shapes[-1][0] = cur_channels
self.fc = nn.Sequential(
nn.Linear(np.prod(self.intermediate_shapes[-1]), latent_dim, bias=False),
nn.BatchNorm1d(latent_dim),
nn.LeakyReLU(.1, inplace=True)
)
@property
def _conv(self):
return partial(Conv2d, use_weight_norm=self.use_weight_norm, use_spectral_norm=self.use_spectral_norm)
def _conv_layer(self, ci, co):
return [self._conv(ci, co, 3, 1, 1, bias=False),
nn.BatchNorm2d(co, momentum=0.05),
nn.LeakyReLU(.1, inplace=True)]
def _down_conv_layer(self, ci, co):
return [self._conv(ci, co, 4, 2, 1, bias=False),
nn.BatchNorm2d(co, momentum=0.05),
nn.LeakyReLU(.1, inplace=True)]
def forward(self, x):
out = []
c = 0
for i, (conv, down) in enumerate(zip(self.resolution_layers, self.down_layers)):
x = conv(x)
if i in self.hierarchical_layers:
out.append(self.out_layers[c](x))
c += 1
x = down(x)
if len(self.filters) in self.hierarchical_layers:
x = x.view(-1, np.prod(self.intermediate_shapes[-1]))
out.append(self.fc(x))
return out
class HierarchicalDecoder(nn.Module):
def __init__(self, num_convolutions=3, filters=(256,128,64,32,16), latent_dim=100, output_size=(1,128,128),
upconv=False, use_weight_norm=False, use_spectral_norm=False, hierarchical_layers=(1,3,5),
context_dim=4, div_factor=8):
super().__init__()
self.num_convolutions = num_convolutions
self.filters = filters
self.upconv = upconv
if use_weight_norm and use_spectral_norm:
raise ValueError('Cannot use both weight norm and spectral norm.')
self.use_weight_norm = use_weight_norm
self.use_spectral_norm = use_spectral_norm
self.hierarchical_layers = hierarchical_layers
hierarchical_layers_ = [h for h in hierarchical_layers if h != len(filters)]
self.context_dim = context_dim
self.div_factor = div_factor
self.resolution_layers = nn.ModuleList([])
self.up_layers = nn.ModuleList([])
self.intermediate_shapes = []
self.context_attention = nn.ModuleList([])
cur_channels = filters[0]
self.start_context_attention = self._attn(cur_channels)
self.start_up_layer = nn.Sequential(*self._upsample_layer(cur_channels, cur_channels))
if len(filters) in hierarchical_layers:
self.intermediate_shapes.append(np.array(output_size) // (2 ** (len(filters))))
self.intermediate_shapes[-1][0] = cur_channels
for i, c in enumerate(filters[1:], 1):
resolution_layer = []
i = (len(filters) - i)
input_layer = i in hierarchical_layers_
in_channels = max(cur_channels // div_factor, 1)
for j in range(0, num_convolutions - 1):
ci = (in_channels+cur_channels) if j == 0 and input_layer else cur_channels
resolution_layer += self._conv_layer(ci, cur_channels)
self.resolution_layers.append(nn.Sequential(*resolution_layer))
self.context_attention.append(self._attn(cur_channels))
self.up_layers.append(nn.Sequential(*self._upsample_layer(cur_channels, c)))
if input_layer:
self.intermediate_shapes.append(np.array(output_size) // (2 ** i))
self.intermediate_shapes[-1][0] = in_channels
cur_channels = c
final_layer = self._conv_layer(cur_channels, cur_channels)
final_layer.append(self._conv(cur_channels, output_size[0], 1, 1, bias=True))
self.final_layer = nn.Sequential(*final_layer)
self.fc = nn.Sequential(
nn.Linear(latent_dim, np.prod(self.intermediate_shapes[0]), bias=False),
nn.BatchNorm1d(np.prod(self.intermediate_shapes[0])),
nn.LeakyReLU(.1, inplace=True)
)
@property
def _conv(self):
return partial(Conv2d, use_weight_norm=self.use_weight_norm, use_spectral_norm=self.use_spectral_norm)
@property
def _conv_transpose(self):
return partial(ConvTranspose2d, use_weight_norm=self.use_weight_norm, use_spectral_norm=self.use_spectral_norm)
def _conv_layer(self, ci, co):
return [self._conv(ci, co, 3, 1, 1, bias=False),
nn.BatchNorm2d(co, momentum=0.05),
nn.LeakyReLU(.1, inplace=True)]
def _upsample_layer(self, ci, co):
if self.upconv:
layer = [nn.Upsample(scale_factor=2, mode='nearest'),
self._conv(ci, co, kernel_size=5, stride=1, padding=2, bias=False)]
else:
layer = [self._conv_transpose(ci, co, kernel_size=4, stride=2, padding=1, bias=False)]
layer += [nn.BatchNorm2d(co, momentum=0.05),
nn.LeakyReLU(.1, inplace=True)]
return layer
def _attn(self, co):
hidden_dim = max(co // 4, self.context_dim)
return nn.Sequential(nn.Linear(self.context_dim, hidden_dim),
nn.LeakyReLU(0.1, inplace=True),
nn.Linear(hidden_dim, co),
nn.Sigmoid())
def forward(self, x, ctx):
assert x[0].size(0) == ctx.size(0)
batch_size = ctx.size(0)
layers = zip(self.resolution_layers, self.up_layers, self.context_attention)
ctx_attn = self.start_context_attention(ctx).view(batch_size, -1, 1, 1)
y = self.fc(x.pop()).view(-1, *self.intermediate_shapes[0])
y = self.start_up_layer(y) * ctx_attn
for i, (conv, up, attn) in enumerate(layers, 1):
i = len(self.filters) - i
output_layer = i in self.hierarchical_layers
ctx_attn = attn(ctx).view(batch_size, -1, 1, 1)
if output_layer:
y = torch.cat([y, x.pop()], 1)
y = conv(y) * ctx_attn
y = up(y)
y = self.final_layer(y)
return y
if __name__ == "__main__":
hl = (1, 2, 3, 4, 5)
filters = [20, 40, 80, 160, 320]
div_factor = 80
img_shape = (3,128,128)
enc = HierarchicalEncoder(
hierarchical_layers=hl, filters=filters,
div_factor=div_factor, input_size=img_shape
)
dec = HierarchicalDecoder(
hierarchical_layers=hl, filters=filters[::-1],
div_factor=div_factor, output_size=img_shape
)
print(enc.intermediate_shapes)
print(dec.intermediate_shapes)
ctx = torch.randn(2, 4)
x = torch.randn(2, *img_shape)
y = enc(x)
z = dec(y, ctx)
assert z.shape == x.shape
print(enc)
print(dec)
```
#### File: counterfactualms/arch/layers.py
```python
import torch.nn as nn
from torch.nn.utils import spectral_norm, weight_norm
class Conv2d(nn.Module):
def __init__(self, *args, use_weight_norm=True, use_spectral_norm=False, **kwargs):
super().__init__()
self.conv = nn.Conv2d(*args, **kwargs)
self.use_weight_norm = use_weight_norm
self.use_spectral_norm = use_spectral_norm
if use_weight_norm:
self.conv = weight_norm(self.conv)
if use_spectral_norm:
self.conv = spectral_norm(self.conv)
def forward(self, x):
return self.conv(x)
def _norm_str(self):
norm = ''
if self.use_weight_norm:
norm += f', weight_norm={self.use_weight_norm}'
if self. use_spectral_norm:
norm += f', spectral_norm={self.use_spectral_norm}'
norm += ')'
return norm
def __repr__(self):
return self.conv.__repr__()[:-1] + self._norm_str()
def __str__(self):
return self.conv.__str__()[:-1] + self._norm_str()
class ConvTranspose2d(nn.Module):
def __init__(self, *args, use_weight_norm=True, use_spectral_norm=False, **kwargs):
super().__init__()
self.conv = nn.ConvTranspose2d(*args, **kwargs)
self.use_weight_norm = use_weight_norm
self.use_spectral_norm = use_spectral_norm
if use_weight_norm:
self.conv = weight_norm(self.conv)
if use_spectral_norm:
self.conv = spectral_norm(self.conv)
def forward(self, x):
return self.conv(x)
def _norm_str(self):
norm = ''
if self.use_weight_norm:
norm += f', weight_norm={self.use_weight_norm}'
if self. use_spectral_norm:
norm += f', spectral_norm={self.use_spectral_norm}'
norm += ')'
return norm
def __repr__(self):
return self.conv.__repr__()[:-1] + self._norm_str()
def __str__(self):
return self.conv.__str__()[:-1] + self._norm_str()
```
#### File: counterfactualms/arch/nvae.py
```python
import numpy as np
from torch import nn
from counterfactualms.arch.layers import Conv2d
from counterfactualms.arch.thirdparty.neural_operations import ConvBNSwish
from counterfactualms.arch.thirdparty.utils import get_arch_cells
from counterfactualms.arch.thirdparty.cells import Cell
from counterfactualms.arch.thirdparty.batchnormswish import BatchNormSwish
class Encoder(nn.Module):
def __init__(self, num_convolutions=3, filters=(16,32,64,128,256), latent_dim:int=100,
input_size=(1,128,128), arch='res_mbconv'):
super().__init__()
self.filters = filters
self.latent_dim = latent_dim
self.arch_instance = get_arch_cells(arch)
self.arch = arch
layers = []
n_resolutions = len(filters)
filters = (filters[0],) + tuple(filters)
layers += [Conv2d(input_size[0], filters[0], 3, padding=1, use_weight_norm=False)]
cur_channels = filters[0]
for ci, co in zip(filters, filters[1:]):
cell_type = 'normal_pre'
arch = self.arch_instance[cell_type]
for _ in range(0, num_convolutions - 1):
layers += [Cell(cur_channels, ci, cell_type=cell_type, arch=arch, use_se=True)]
cur_channels = ci
cell_type = 'down_pre'
arch = self.arch_instance[cell_type]
layers += [Cell(ci, co, cell_type=cell_type, arch=arch, use_se=True)]
cur_channels = co
self.cnn = nn.Sequential(*layers)
self.intermediate_shape = np.array(input_size) // (2 ** n_resolutions)
self.intermediate_shape[0] = cur_channels
self.fc = nn.Sequential(
nn.Linear(np.prod(self.intermediate_shape), latent_dim),
BatchNormSwish(latent_dim, momentum=0.05)
)
def forward(self, x):
x = self.cnn(x).view(-1, np.prod(self.intermediate_shape))
return self.fc(x)
class Decoder(nn.Module):
def __init__(self, num_convolutions=3, filters=(256,128,64,32,16), latent_dim:int=100,
output_size=(1,128,128), arch='res_mbconv'):
super().__init__()
self.filters = filters
self.latent_dim = latent_dim
self.arch_instance = get_arch_cells(arch)
self.arch = arch
self.intermediate_shape = np.array(output_size) // (2 ** len(filters))
self.intermediate_shape[0] = filters[0]
self.fc = nn.Linear(latent_dim, np.prod(self.intermediate_shape), bias=False)
layers = []
cur_channels = filters[0]
cell_type = 'up_post'
arch = self.arch_instance[cell_type]
layers += [Cell(cur_channels, cur_channels, cell_type=cell_type, arch=arch, use_se=True)]
for c in filters[1:]:
cell_type = 'normal_post'
arch = self.arch_instance[cell_type]
for _ in range(0, num_convolutions - 1):
layers += [Cell(cur_channels, cur_channels, cell_type=cell_type, arch=arch, use_se=True)]
cell_type = 'up_post'
arch = self.arch_instance[cell_type]
layers += [Cell(cur_channels, c, cell_type=cell_type, arch=arch, use_se=True)]
cur_channels = c
layers += [ConvBNSwish(cur_channels, cur_channels), Conv2d(cur_channels, output_size[0], 1, 1)]
self.cnn = nn.Sequential(*layers)
def forward(self, x):
x = self.fc(x).view(-1, *self.intermediate_shape)
return self.cnn(x)
```
#### File: arch/thirdparty/batchnormswish.py
```python
from __future__ import division
from torch.nn.modules.batchnorm import _BatchNorm
import torch.nn.functional as F
from counterfactualms.arch.thirdparty.swish import Swish as swish
class BatchNormSwish(_BatchNorm):
def _check_input_dim(self, input):
if input.dim() < 2:
raise ValueError('expected at least 2D input (got {}D input)'
.format(input.dim()))
def forward(self, input):
self._check_input_dim(input)
# exponential_average_factor is set to self.momentum
# (when it is available) only so that it gets updated
# in ONNX graph when this node is exported to ONNX.
if self.momentum is None:
exponential_average_factor = 0.0
else:
exponential_average_factor = self.momentum
if self.training and self.track_running_stats:
# TODO: if statement only here to tell the jit to skip emitting this when it is None
if self.num_batches_tracked is not None:
self.num_batches_tracked = self.num_batches_tracked + 1
if self.momentum is None: # use cumulative moving average
exponential_average_factor = 1.0 / float(self.num_batches_tracked)
else: # use exponential moving average
exponential_average_factor = self.momentum
r"""
Decide whether the mini-batch stats should be used for normalization rather than the buffers.
Mini-batch stats are used in training mode, and in eval mode when buffers are None.
"""
if self.training:
bn_training = True
else:
bn_training = (self.running_mean is None) and (self.running_var is None)
r"""
Buffers are only updated if they are to be tracked and we are in training mode. Thus they only need to be
passed when the update should occur (i.e. in training mode when they are tracked), or when buffer stats are
used for normalization (i.e. in eval mode when buffers are not None).
"""
out = F.batch_norm(
input,
# If buffers are not to be tracked, ensure that they won't be updated
self.running_mean if not self.training or self.track_running_stats else None,
self.running_var if not self.training or self.track_running_stats else None,
self.weight, self.bias, bn_training, exponential_average_factor, self.eps)
return swish.apply(out)
```
#### File: arch/thirdparty/cells.py
```python
import torch
import torch.nn as nn
from counterfactualms.arch.thirdparty.neural_operations import OPS, get_skip_connection, SE
from counterfactualms.arch.thirdparty.neural_ar_operations import ARInvertedResidual, MixLogCDFParam, mix_log_cdf_flow
from counterfactualms.arch.thirdparty.neural_ar_operations import ELUConv as ARELUConv
from counterfactualms.arch.thirdparty.utils import get_stride_for_cell_type
class Cell(nn.Module):
def __init__(self, Cin, Cout, cell_type, arch, use_se=True):
super(Cell, self).__init__()
self.cell_type = cell_type
stride = get_stride_for_cell_type(self.cell_type)
self.skip = get_skip_connection(Cin, Cout, stride)
self.use_se = use_se
self._num_nodes = len(arch)
self._ops = nn.ModuleList()
for i in range(self._num_nodes):
stride = get_stride_for_cell_type(self.cell_type) if i == 0 else 1
C = Cin if i == 0 else Cout
primitive = arch[i]
op = OPS[primitive](C, Cout, stride)
self._ops.append(op)
if self.use_se:
self.se = SE(Cout, Cout)
def forward(self, s):
# skip branch
skip = self.skip(s)
for i in range(self._num_nodes):
s = self._ops[i](s)
s = self.se(s) if self.use_se else s
return skip + 0.1 * s
class CellAR(nn.Module):
def __init__(self, num_z, num_ftr, num_c, arch, mirror):
super(CellAR, self).__init__()
assert num_c % num_z == 0
self.cell_type = 'ar_nn'
# s0 will the random samples
ex = 6
self.conv = ARInvertedResidual(num_z, num_ftr, ex=ex, mirror=mirror)
self.use_mix_log_cdf = False
if self.use_mix_log_cdf:
self.param = MixLogCDFParam(num_z, num_mix=3, num_ftr=self.conv.hidden_dim, mirror=mirror)
else:
# 0.1 helps bring mu closer to 0 initially
self.mu = ARELUConv(self.conv.hidden_dim, num_z, kernel_size=1, padding=0, masked=True, zero_diag=False,
weight_init_coeff=0.1, mirror=mirror)
def forward(self, z, ftr):
s = self.conv(z, ftr)
if self.use_mix_log_cdf:
logit_pi, mu, log_s, log_a, b = self.param(s)
new_z, log_det = mix_log_cdf_flow(z, logit_pi, mu, log_s, log_a, b)
else:
mu = self.mu(s)
new_z = (z - mu)
log_det = torch.zeros_like(new_z)
return new_z, log_det
class PairedCellAR(nn.Module):
def __init__(self, num_z, num_ftr, num_c, arch=None):
super(PairedCellAR, self).__init__()
self.cell1 = CellAR(num_z, num_ftr, num_c, arch, mirror=False)
self.cell2 = CellAR(num_z, num_ftr, num_c, arch, mirror=True)
def forward(self, z, ftr):
new_z, log_det1 = self.cell1(z, ftr)
new_z, log_det2 = self.cell2(new_z, ftr)
log_det1 += log_det2
return new_z, log_det1
```
#### File: arch/thirdparty/neural_operations.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributed as dist
from counterfactualms.arch.thirdparty.swish import Swish as SwishFN
from counterfactualms.arch.thirdparty.batchnormswish import BatchNormSwish
from counterfactualms.arch.layers import Conv2d
from collections import OrderedDict
BN_EPS = 1e-5
SYNC_BN = False
OPS = OrderedDict([
('res_elu', lambda Cin, Cout, stride: ELUConv(Cin, Cout, 3, stride, 1)),
('res_bnelu', lambda Cin, Cout, stride: BNELUConv(Cin, Cout, 3, stride, 1)),
('res_bnswish', lambda Cin, Cout, stride: BNSwishConv(Cin, Cout, 3, stride, 1)),
('res_bnswish5', lambda Cin, Cout, stride: BNSwishConv(Cin, Cout, 3, stride, 2, 2)),
('mconv_e6k5g0', lambda Cin, Cout, stride: InvertedResidual(Cin, Cout, stride, ex=6, dil=1, k=5, g=0)),
('mconv_e3k5g0', lambda Cin, Cout, stride: InvertedResidual(Cin, Cout, stride, ex=3, dil=1, k=5, g=0)),
('mconv_e3k5g8', lambda Cin, Cout, stride: InvertedResidual(Cin, Cout, stride, ex=3, dil=1, k=5, g=8)),
('mconv_e6k11g0', lambda Cin, Cout, stride: InvertedResidual(Cin, Cout, stride, ex=6, dil=1, k=11, g=0)),
])
def average_tensor(t, is_distributed):
if is_distributed:
size = float(dist.get_world_size())
dist.all_reduce(t.data, op=dist.ReduceOp.SUM)
t.data /= size
def get_skip_connection(Cin, Cout, stride):
if stride == 1:
return Identity()
elif stride == 2:
return FactorizedReduce(Cin, Cout)
elif stride == -1:
return nn.Sequential(UpSample(), Conv2d(Cin, Cout, kernel_size=1))
def norm(t, dim):
return torch.sqrt(torch.sum(t * t, dim))
def logit(t):
return torch.log(t) - torch.log(1 - t)
def act(t):
# The following implementation has lower memory.
return SwishFN.apply(t)
class Swish(nn.Module):
def __init__(self):
super(Swish, self).__init__()
def forward(self, x):
return act(x)
@torch.jit.script
def normalize_weight_jit(log_weight_norm, weight):
n = torch.exp(log_weight_norm)
wn = torch.sqrt(torch.sum(weight * weight, dim=[1, 2, 3])) # norm(w)
weight = n * weight / (wn.view(-1, 1, 1, 1) + 1e-5)
return weight
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
class SyncBatchNorm(nn.Module):
def __init__(self, *args, **kwargs):
super(SyncBatchNorm, self).__init__()
self.bn = nn.SyncBatchNorm(*args, **kwargs)
def forward(self, x):
# Sync BN only works with distributed data parallel with 1 GPU per process. I don't use DDP, so I need to let
# Sync BN to know that I have 1 gpu per process.
self.bn.ddp_gpu_size = 1
return self.bn(x)
# quick switch between multi-gpu, single-gpu batch norm
def get_batchnorm(*args, **kwargs):
if SYNC_BN:
return SyncBatchNorm(*args, **kwargs)
else:
return nn.BatchNorm2d(*args, **kwargs)
class ELUConv(nn.Module):
def __init__(self, C_in, C_out, kernel_size, stride=1, padding=0, dilation=1):
super(ELUConv, self).__init__()
self.upsample = stride == -1
stride = abs(stride)
self.conv_0 = Conv2d(C_in, C_out, kernel_size, stride=stride, padding=padding, bias=True, dilation=dilation)
def forward(self, x):
out = F.elu(x)
if self.upsample:
out = F.interpolate(out, scale_factor=2, mode='nearest')
out = self.conv_0(out)
return out
class BNELUConv(nn.Module):
def __init__(self, C_in, C_out, kernel_size, stride=1, padding=0, dilation=1):
super(BNELUConv, self).__init__()
self.upsample = stride == -1
stride = abs(stride)
self.bn = get_batchnorm(C_in, eps=BN_EPS, momentum=0.05)
self.conv_0 = Conv2d(C_in, C_out, kernel_size, stride=stride, padding=padding, bias=True, dilation=dilation)
def forward(self, x):
x = self.bn(x)
out = F.elu(x)
if self.upsample:
out = F.interpolate(out, scale_factor=2, mode='nearest')
out = self.conv_0(out)
return out
class BNSwishConv(nn.Module):
"""ReLU + Conv2d + BN."""
def __init__(self, C_in, C_out, kernel_size, stride=1, padding=0, dilation=1):
super(BNSwishConv, self).__init__()
self.upsample = stride == -1
stride = abs(stride)
self.bn_act = BatchNormSwish(C_in, eps=BN_EPS, momentum=0.05)
self.conv_0 = Conv2d(C_in, C_out, kernel_size, stride=stride, padding=padding, bias=True, dilation=dilation)
def forward(self, x):
"""
Args:
x (torch.Tensor): of size (B, C_in, H, W)
"""
out = self.bn_act(x)
if self.upsample:
out = F.interpolate(out, scale_factor=2, mode='nearest')
out = self.conv_0(out)
return out
class FactorizedReduce(nn.Module):
def __init__(self, C_in, C_out):
super(FactorizedReduce, self).__init__()
assert C_out % 2 == 0
self.conv_1 = Conv2d(C_in, C_out // 4, 1, stride=2, padding=0, bias=True)
self.conv_2 = Conv2d(C_in, C_out // 4, 1, stride=2, padding=0, bias=True)
self.conv_3 = Conv2d(C_in, C_out // 4, 1, stride=2, padding=0, bias=True)
self.conv_4 = Conv2d(C_in, C_out - 3 * (C_out // 4), 1, stride=2, padding=0, bias=True)
def forward(self, x):
out = act(x)
conv1 = self.conv_1(out)
conv2 = self.conv_2(out[:, :, 1:, 1:])
conv3 = self.conv_3(out[:, :, :, 1:])
conv4 = self.conv_4(out[:, :, 1:, :])
out = torch.cat([conv1, conv2, conv3, conv4], dim=1)
return out
class UpSample(nn.Module):
def __init__(self):
super(UpSample, self).__init__()
pass
def forward(self, x):
return F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=True)
class EncCombinerCell(nn.Module):
def __init__(self, Cin1, Cin2, Cout, cell_type):
super(EncCombinerCell, self).__init__()
self.cell_type = cell_type
# Cin = Cin1 + Cin2
self.conv = Conv2d(Cin2, Cout, kernel_size=1, stride=1, padding=0, bias=True)
def forward(self, x1, x2):
x2 = self.conv(x2)
out = x1 + x2
return out
# original combiner
class DecCombinerCell(nn.Module):
def __init__(self, Cin1, Cin2, Cout, cell_type):
super(DecCombinerCell, self).__init__()
self.cell_type = cell_type
self.conv = Conv2d(Cin1 + Cin2, Cout, kernel_size=1, stride=1, padding=0, bias=True)
def forward(self, x1, x2):
out = torch.cat([x1, x2], dim=1)
out = self.conv(out)
return out
class ConvBNSwish(nn.Module):
def __init__(self, Cin, Cout, k=3, stride=1, groups=1, dilation=1):
padding = dilation * (k - 1) // 2
super(ConvBNSwish, self).__init__()
self.conv = nn.Sequential(
Conv2d(Cin, Cout, k, stride, padding, groups=groups, bias=False, dilation=dilation, use_weight_norm=False),
BatchNormSwish(Cout, eps=BN_EPS, momentum=0.05) # drop in replacement for BN + Swish
)
def forward(self, x):
return self.conv(x)
class SE(nn.Module):
def __init__(self, Cin, Cout):
super(SE, self).__init__()
num_hidden = max(Cout // 16, 4)
self.se = nn.Sequential(nn.Linear(Cin, num_hidden), nn.ReLU(inplace=True),
nn.Linear(num_hidden, Cout), nn.Sigmoid())
def forward(self, x):
se = torch.mean(x, dim=[2, 3])
se = se.view(se.size(0), -1)
se = self.se(se)
se = se.view(se.size(0), -1, 1, 1)
return x * se
class InvertedResidual(nn.Module):
def __init__(self, Cin, Cout, stride, ex, dil, k, g):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2, -1]
hidden_dim = int(round(Cin * ex))
self.use_res_connect = self.stride == 1 and Cin == Cout
self.upsample = self.stride == -1
self.stride = abs(self.stride)
groups = hidden_dim if g == 0 else g
layers0 = [nn.UpsamplingNearest2d(scale_factor=2)] if self.upsample else []
layers = [get_batchnorm(Cin, eps=BN_EPS, momentum=0.05),
ConvBNSwish(Cin, hidden_dim, k=1),
ConvBNSwish(hidden_dim, hidden_dim, stride=self.stride, groups=groups, k=k, dilation=dil),
Conv2d(hidden_dim, Cout, 1, 1, 0, bias=False, use_weight_norm=False),
get_batchnorm(Cout, momentum=0.05)]
layers0.extend(layers)
self.conv = nn.Sequential(*layers0)
def forward(self, x):
return self.conv(x)
```
#### File: arch/thirdparty/utils.py
```python
import logging
import os
import shutil
import time
from datetime import timedelta
import sys
import torch
import torch.nn as nn
import numpy as np
import torch.distributed as dist
import torch.nn.functional as F
#from tensorboardX import SummaryWriter
class AvgrageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.avg = 0
self.sum = 0
self.cnt = 0
def update(self, val, n=1):
self.sum += val * n
self.cnt += n
self.avg = self.sum / self.cnt
class ExpMovingAvgrageMeter(object):
def __init__(self, momentum=0.9):
self.momentum = momentum
self.reset()
def reset(self):
self.avg = 0
def update(self, val):
self.avg = (1. - self.momentum) * self.avg + self.momentum * val
class DummyDDP(nn.Module):
def __init__(self, model):
super(DummyDDP, self).__init__()
self.module = model
def forward(self, *input, **kwargs):
return self.module(*input, **kwargs)
def count_parameters_in_M(model):
return np.sum(np.prod(v.size()) for name, v in model.named_parameters() if "auxiliary" not in name) / 1e6
def save_checkpoint(state, is_best, save):
filename = os.path.join(save, 'checkpoint.pth.tar')
torch.save(state, filename)
if is_best:
best_filename = os.path.join(save, 'model_best.pth.tar')
shutil.copyfile(filename, best_filename)
def save(model, model_path):
torch.save(model.state_dict(), model_path)
def load(model, model_path):
model.load_state_dict(torch.load(model_path))
def create_exp_dir(path, scripts_to_save=None):
if not os.path.exists(path):
os.makedirs(path, exist_ok=True)
print('Experiment dir : {}'.format(path))
if scripts_to_save is not None:
if not os.path.exists(os.path.join(path, 'scripts')):
os.mkdir(os.path.join(path, 'scripts'))
for script in scripts_to_save:
dst_file = os.path.join(path, 'scripts', os.path.basename(script))
shutil.copyfile(script, dst_file)
class Logger(object):
def __init__(self, rank, save):
self.rank = rank
if self.rank == 0:
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(save, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
self.start_time = time.time()
def info(self, string, *args):
if self.rank == 0:
elapsed_time = time.time() - self.start_time
elapsed_time = time.strftime(
'(Elapsed: %H:%M:%S) ', time.gmtime(elapsed_time))
if isinstance(string, str):
string = elapsed_time + string
else:
logging.info(elapsed_time)
logging.info(string, *args)
class Writer(object):
def __init__(self, rank, save):
self.rank = rank
if self.rank == 0:
self.writer = None #SummaryWriter(log_dir=save, flush_secs=20)
def add_scalar(self, *args, **kwargs):
if self.rank == 0:
self.writer.add_scalar(*args, **kwargs)
def add_figure(self, *args, **kwargs):
if self.rank == 0:
self.writer.add_figure(*args, **kwargs)
def add_image(self, *args, **kwargs):
if self.rank == 0:
self.writer.add_image(*args, **kwargs)
def add_histogram(self, *args, **kwargs):
if self.rank == 0:
self.writer.add_histogram(*args, **kwargs)
def add_histogram_if(self, write, *args, **kwargs):
if write and False: # Used for debugging.
self.add_histogram(*args, **kwargs)
def close(self, *args, **kwargs):
if self.rank == 0:
self.writer.close()
def reduce_tensor(tensor, world_size):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.ReduceOp.SUM)
rt /= world_size
return rt
def get_stride_for_cell_type(cell_type):
if cell_type.startswith('normal') or cell_type.startswith('combiner'):
stride = 1
elif cell_type.startswith('down'):
stride = 2
elif cell_type.startswith('up'):
stride = -1
else:
raise NotImplementedError(cell_type)
return stride
def get_cout(cin, stride):
if stride == 1:
cout = cin
elif stride == -1:
cout = cin // 2
elif stride == 2:
cout = 2 * cin
return cout
def kl_balancer_coeff(num_scales, groups_per_scale, fun):
if fun == 'equal':
coeff = torch.cat([torch.ones(groups_per_scale[num_scales - i - 1]) for i in range(num_scales)], dim=0).cuda()
elif fun == 'linear':
coeff = torch.cat([(2 ** i) * torch.ones(groups_per_scale[num_scales - i - 1]) for i in range(num_scales)],
dim=0).cuda()
elif fun == 'sqrt':
coeff = torch.cat(
[np.sqrt(2 ** i) * torch.ones(groups_per_scale[num_scales - i - 1]) for i in range(num_scales)],
dim=0).cuda()
elif fun == 'square':
coeff = torch.cat(
[np.square(2 ** i) / groups_per_scale[num_scales - i - 1] * torch.ones(groups_per_scale[num_scales - i - 1])
for i in range(num_scales)], dim=0).cuda()
else:
raise NotImplementedError
# convert min to 1.
coeff /= torch.min(coeff)
return coeff
def kl_per_group(kl_all):
kl_vals = torch.mean(kl_all, dim=0)
kl_coeff_i = torch.abs(kl_all)
kl_coeff_i = torch.mean(kl_coeff_i, dim=0, keepdim=True) + 0.01
return kl_coeff_i, kl_vals
def kl_balancer(kl_all, kl_coeff=1.0, kl_balance=False, alpha_i=None):
if kl_balance and kl_coeff < 1.0:
alpha_i = alpha_i.unsqueeze(0)
kl_all = torch.stack(kl_all, dim=1)
kl_coeff_i, kl_vals = kl_per_group(kl_all)
total_kl = torch.sum(kl_coeff_i)
kl_coeff_i = kl_coeff_i / alpha_i * total_kl
kl_coeff_i = kl_coeff_i / torch.mean(kl_coeff_i, dim=1, keepdim=True)
kl = torch.sum(kl_all * kl_coeff_i.detach(), dim=1)
# for reporting
kl_coeffs = kl_coeff_i.squeeze(0)
else:
kl_all = torch.stack(kl_all, dim=1)
kl_vals = torch.mean(kl_all, dim=0)
kl = torch.sum(kl_all, dim=1)
kl_coeffs = torch.ones(size=(len(kl_vals),))
return kl_coeff * kl, kl_coeffs, kl_vals
def kl_coeff(step, total_step, constant_step, min_kl_coeff):
return max(min((step - constant_step) / total_step, 1.0), min_kl_coeff)
def log_iw(decoder, x, log_q, log_p, crop=False):
recon = reconstruction_loss(decoder, x, crop)
return - recon - log_q + log_p
def reconstruction_loss(decoder, x, crop=False):
from counterfactualms.arch.thirdparty.distributions import Normal, DiscMixLogistic
recon = decoder.log_prob(x)
if crop:
recon = recon[:, :, 2:30, 2:30]
if isinstance(decoder, DiscMixLogistic):
return - torch.sum(recon, dim=[1, 2]) # summation over RGB is done.
else:
return - torch.sum(recon, dim=[1, 2, 3])
def tile_image(batch_image, n):
assert n * n == batch_image.size(0)
channels, height, width = batch_image.size(1), batch_image.size(2), batch_image.size(3)
batch_image = batch_image.view(n, n, channels, height, width)
batch_image = batch_image.permute(2, 0, 3, 1, 4) # n, height, n, width, c
batch_image = batch_image.contiguous().view(channels, n * height, n * width)
return batch_image
def average_gradients(params, is_distributed):
""" Gradient averaging. """
if is_distributed:
size = float(dist.get_world_size())
for param in params:
if param.requires_grad:
dist.all_reduce(param.grad.data, op=dist.ReduceOp.SUM)
param.grad.data /= size
def average_params(params, is_distributed):
""" parameter averaging. """
if is_distributed:
size = float(dist.get_world_size())
for param in params:
dist.all_reduce(param.data, op=dist.ReduceOp.SUM)
param.data /= size
def average_tensor(t, is_distributed):
if is_distributed:
size = float(dist.get_world_size())
dist.all_reduce(t.data, op=dist.ReduceOp.SUM)
t.data /= size
def one_hot(indices, depth, dim):
indices = indices.unsqueeze(dim)
size = list(indices.size())
size[dim] = depth
y_onehot = torch.zeros(size).cuda()
y_onehot.zero_()
y_onehot.scatter_(dim, indices, 1)
return y_onehot
def num_output(dataset):
if dataset == 'mnist':
return 28 * 28
elif dataset == 'cifar10':
return 3 * 32 * 32
elif dataset.startswith('celeba') or dataset.startswith('imagenet') or dataset.startswith('lsun'):
size = int(dataset.split('_')[-1])
return 3 * size * size
elif dataset == 'ffhq':
return 3 * 256 * 256
else:
raise NotImplementedError
def get_input_size(dataset):
if dataset == 'mnist':
return 32
elif dataset == 'cifar10':
return 32
elif dataset.startswith('celeba') or dataset.startswith('imagenet') or dataset.startswith('lsun'):
size = int(dataset.split('_')[-1])
return size
elif dataset == 'ffhq':
return 256
else:
raise NotImplementedError
def pre_process(x, num_bits):
if num_bits != 8:
x = torch.floor(x * 255 / 2 ** (8 - num_bits))
x /= (2 ** num_bits - 1)
return x
def get_arch_cells(arch_type):
if arch_type == 'res_elu':
arch_cells = dict()
arch_cells['normal_enc'] = ['res_elu', 'res_elu']
arch_cells['down_enc'] = ['res_elu', 'res_elu']
arch_cells['normal_dec'] = ['res_elu', 'res_elu']
arch_cells['up_dec'] = ['res_elu', 'res_elu']
arch_cells['normal_pre'] = ['res_elu', 'res_elu']
arch_cells['down_pre'] = ['res_elu', 'res_elu']
arch_cells['normal_post'] = ['res_elu', 'res_elu']
arch_cells['up_post'] = ['res_elu', 'res_elu']
arch_cells['ar_nn'] = ['']
elif arch_type == 'res_bnelu':
arch_cells = dict()
arch_cells['normal_enc'] = ['res_bnelu', 'res_bnelu']
arch_cells['down_enc'] = ['res_bnelu', 'res_bnelu']
arch_cells['normal_dec'] = ['res_bnelu', 'res_bnelu']
arch_cells['up_dec'] = ['res_bnelu', 'res_bnelu']
arch_cells['normal_pre'] = ['res_bnelu', 'res_bnelu']
arch_cells['down_pre'] = ['res_bnelu', 'res_bnelu']
arch_cells['normal_post'] = ['res_bnelu', 'res_bnelu']
arch_cells['up_post'] = ['res_bnelu', 'res_bnelu']
arch_cells['ar_nn'] = ['']
elif arch_type == 'res_bnswish':
arch_cells = dict()
arch_cells['normal_enc'] = ['res_bnswish', 'res_bnswish']
arch_cells['down_enc'] = ['res_bnswish', 'res_bnswish']
arch_cells['normal_dec'] = ['res_bnswish', 'res_bnswish']
arch_cells['up_dec'] = ['res_bnswish', 'res_bnswish']
arch_cells['normal_pre'] = ['res_bnswish', 'res_bnswish']
arch_cells['down_pre'] = ['res_bnswish', 'res_bnswish']
arch_cells['normal_post'] = ['res_bnswish', 'res_bnswish']
arch_cells['up_post'] = ['res_bnswish', 'res_bnswish']
arch_cells['ar_nn'] = ['']
elif arch_type == 'mbconv_sep':
arch_cells = dict()
arch_cells['normal_enc'] = ['mconv_e6k5g0']
arch_cells['down_enc'] = ['mconv_e6k5g0']
arch_cells['normal_dec'] = ['mconv_e6k5g0']
arch_cells['up_dec'] = ['mconv_e6k5g0']
arch_cells['normal_pre'] = ['mconv_e3k5g0']
arch_cells['down_pre'] = ['mconv_e3k5g0']
arch_cells['normal_post'] = ['mconv_e3k5g0']
arch_cells['up_post'] = ['mconv_e3k5g0']
arch_cells['ar_nn'] = ['']
elif arch_type == 'mbconv_sep11':
arch_cells = dict()
arch_cells['normal_enc'] = ['mconv_e6k11g0']
arch_cells['down_enc'] = ['mconv_e6k11g0']
arch_cells['normal_dec'] = ['mconv_e6k11g0']
arch_cells['up_dec'] = ['mconv_e6k11g0']
arch_cells['normal_pre'] = ['mconv_e3k5g0']
arch_cells['down_pre'] = ['mconv_e3k5g0']
arch_cells['normal_post'] = ['mconv_e3k5g0']
arch_cells['up_post'] = ['mconv_e3k5g0']
arch_cells['ar_nn'] = ['']
elif arch_type == 'res_mbconv':
arch_cells = dict()
arch_cells['normal_enc'] = ['res_bnswish', 'res_bnswish']
arch_cells['down_enc'] = ['res_bnswish', 'res_bnswish']
arch_cells['normal_dec'] = ['mconv_e6k5g0']
arch_cells['up_dec'] = ['mconv_e6k5g0']
arch_cells['normal_pre'] = ['res_bnswish', 'res_bnswish']
arch_cells['down_pre'] = ['res_bnswish', 'res_bnswish']
arch_cells['normal_post'] = ['mconv_e3k5g0']
arch_cells['up_post'] = ['mconv_e3k5g0']
arch_cells['ar_nn'] = ['']
elif arch_type == 'res53_mbconv':
arch_cells = dict()
arch_cells['normal_enc'] = ['res_bnswish5', 'res_bnswish']
arch_cells['down_enc'] = ['res_bnswish5', 'res_bnswish']
arch_cells['normal_dec'] = ['mconv_e6k5g0']
arch_cells['up_dec'] = ['mconv_e6k5g0']
arch_cells['normal_pre'] = ['res_bnswish5', 'res_bnswish']
arch_cells['down_pre'] = ['res_bnswish5', 'res_bnswish']
arch_cells['normal_post'] = ['mconv_e3k5g0']
arch_cells['up_post'] = ['mconv_e3k5g0']
arch_cells['ar_nn'] = ['']
elif arch_type == 'res35_mbconv':
arch_cells = dict()
arch_cells['normal_enc'] = ['res_bnswish', 'res_bnswish5']
arch_cells['down_enc'] = ['res_bnswish', 'res_bnswish5']
arch_cells['normal_dec'] = ['mconv_e6k5g0']
arch_cells['up_dec'] = ['mconv_e6k5g0']
arch_cells['normal_pre'] = ['res_bnswish', 'res_bnswish5']
arch_cells['down_pre'] = ['res_bnswish', 'res_bnswish5']
arch_cells['normal_post'] = ['mconv_e3k5g0']
arch_cells['up_post'] = ['mconv_e3k5g0']
arch_cells['ar_nn'] = ['']
elif arch_type == 'res55_mbconv':
arch_cells = dict()
arch_cells['normal_enc'] = ['res_bnswish5', 'res_bnswish5']
arch_cells['down_enc'] = ['res_bnswish5', 'res_bnswish5']
arch_cells['normal_dec'] = ['mconv_e6k5g0']
arch_cells['up_dec'] = ['mconv_e6k5g0']
arch_cells['normal_pre'] = ['res_bnswish5', 'res_bnswish5']
arch_cells['down_pre'] = ['res_bnswish5', 'res_bnswish5']
arch_cells['normal_post'] = ['mconv_e3k5g0']
arch_cells['up_post'] = ['mconv_e3k5g0']
arch_cells['ar_nn'] = ['']
elif arch_type == 'res_mbconv9':
arch_cells = dict()
arch_cells['normal_enc'] = ['res_bnswish', 'res_bnswish']
arch_cells['down_enc'] = ['res_bnswish', 'res_bnswish']
arch_cells['normal_dec'] = ['mconv_e6k9g0']
arch_cells['up_dec'] = ['mconv_e6k9g0']
arch_cells['normal_pre'] = ['res_bnswish', 'res_bnswish']
arch_cells['down_pre'] = ['res_bnswish', 'res_bnswish']
arch_cells['normal_post'] = ['mconv_e3k9g0']
arch_cells['up_post'] = ['mconv_e3k9g0']
arch_cells['ar_nn'] = ['']
elif arch_type == 'mbconv_res':
arch_cells = dict()
arch_cells['normal_enc'] = ['mconv_e6k5g0']
arch_cells['down_enc'] = ['mconv_e6k5g0']
arch_cells['normal_dec'] = ['res_bnswish', 'res_bnswish']
arch_cells['up_dec'] = ['res_bnswish', 'res_bnswish']
arch_cells['normal_pre'] = ['mconv_e3k5g0']
arch_cells['down_pre'] = ['mconv_e3k5g0']
arch_cells['normal_post'] = ['res_bnswish', 'res_bnswish']
arch_cells['up_post'] = ['res_bnswish', 'res_bnswish']
arch_cells['ar_nn'] = ['']
elif arch_type == 'mbconv_den':
arch_cells = dict()
arch_cells['normal_enc'] = ['mconv_e6k5g0']
arch_cells['down_enc'] = ['mconv_e6k5g0']
arch_cells['normal_dec'] = ['mconv_e6k5g0']
arch_cells['up_dec'] = ['mconv_e6k5g0']
arch_cells['normal_pre'] = ['mconv_e3k5g8']
arch_cells['down_pre'] = ['mconv_e3k5g8']
arch_cells['normal_post'] = ['mconv_e3k5g8']
arch_cells['up_post'] = ['mconv_e3k5g8']
arch_cells['ar_nn'] = ['']
else:
raise NotImplementedError
return arch_cells
def groups_per_scale(num_scales, num_groups_per_scale, is_adaptive, divider=2, minimum_groups=1):
g = []
n = num_groups_per_scale
for s in range(num_scales):
assert n >= 1
g.append(n)
if is_adaptive:
n = n // divider
n = max(minimum_groups, n)
return g
```
#### File: distributions/transforms/affine.py
```python
from pyro.distributions.conditional import ConditionalTransformModule
from pyro.distributions.torch_transform import TransformModule
from pyro.distributions import transforms as pyro_transforms
import torch
from torch.distributions import transforms
class LearnedAffineTransform(TransformModule, transforms.AffineTransform):
def __init__(self, loc=None, scale=None, **kwargs):
super().__init__(loc=loc, scale=scale, **kwargs)
if loc is None:
self.loc = torch.nn.Parameter(torch.zeros([1, ]))
if scale is None:
self.scale = torch.nn.Parameter(torch.ones([1, ]))
def _broadcast(self, val):
dim_extension = tuple(1 for _ in range(val.dim() - 1))
loc = self.loc.view(-1, *dim_extension)
scale = self.scale.view(-1, *dim_extension)
return loc, scale
def _call(self, x):
loc, scale = self._broadcast(x)
return loc + scale * x
def _inverse(self, y):
loc, scale = self._broadcast(y)
return (y - loc) / scale
class ConditionalAffineTransform(ConditionalTransformModule):
def __init__(self, context_nn, event_dim=0, **kwargs):
super().__init__(**kwargs)
self.event_dim = event_dim
self.context_nn = context_nn
def condition(self, context):
loc, log_scale = self.context_nn(context)
scale = torch.exp(log_scale)
ac = transforms.AffineTransform(loc, scale, event_dim=self.event_dim)
return ac
class LowerCholeskyAffine(pyro_transforms.LowerCholeskyAffine):
def log_abs_det_jacobian(self, x, y):
"""
Calculates the elementwise determinant of the log Jacobian, i.e.
log(abs(dy/dx)).
"""
return torch.ones(x.size()[:-1], dtype=x.dtype, layout=x.layout, device=x.device) * \
self.scale_tril.diagonal(dim1=-2, dim2=-1).log().sum(-1).sum(-1)
```
#### File: experiments/classifier/classifier_experiment.py
```python
import logging
import os
import numpy as np
import pytorch_lightning as pl
from pytorch_lightning.metrics.classification import Accuracy
import torch
from torch import nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torch.optim import AdamW
from counterfactualms.datasets.calabresi import CalabresiDataset
from counterfactualms.arch.medical import Encoder
logger = logging.getLogger(__name__)
class ClassifierExperiment(pl.LightningModule):
def __init__(self, hparams):
super().__init__()
self.hparams = hparams
self.train_batch_size = hparams.train_batch_size
self.test_batch_size = hparams.test_batch_size
self.input_size = ((3,) if hparams.pseudo3d else (1,)) + tuple(hparams.resize)
self.classifier = Encoder(num_convolutions=hparams.num_convolutions,
filters=hparams.filters,
input_size=self.input_size,
latent_dim=1,
use_weight_norm=hparams.use_weight_norm,
dropout_rate=hparams.dropout_rate)
self.classifier.fc = nn.Linear(np.prod(self.classifier.intermediate_shape), 1, bias=True)
self.train_acc = Accuracy()
self.val_acc = Accuracy()
self.test_acc = Accuracy()
if hparams.validate:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.autograd.set_detect_anomaly(self.hparams.validate)
resize = None if self.hparams.resize == (0,0) else self.hparams.resize
train_crop_type = self.hparams.train_crop_type if hasattr(self.hparams, 'train_crop_type') else 'random'
crop_size = self.hparams.crop_size if hasattr(self.hparams, 'crop_size') else (224, 224)
self.calabresi_train = CalabresiDataset(self.hparams.train_csv, crop_size=crop_size, crop_type=train_crop_type, resize=resize) # noqa: E501
self.calabresi_val = CalabresiDataset(self.hparams.valid_csv, crop_size=crop_size, crop_type='center', resize=resize)
self.calabresi_test = CalabresiDataset(self.hparams.test_csv, crop_size=crop_size, crop_type='center', resize=resize)
@property
def required_data(self):
return {'x', 'type'}
def configure_optimizers(self):
optimizer = AdamW(self.classifier.parameters(), lr=self.hparams.lr,
betas=self.hparams.betas, weight_decay=self.hparams.weight_decay)
return optimizer
def _dataloader_params(self):
num_workers = len(os.sched_getaffinity(0)) // 2 # use half of the available cpus
return {'num_workers': num_workers, 'pin_memory': self.trainer.on_gpu}
def train_dataloader(self):
return DataLoader(self.calabresi_train, batch_size=self.train_batch_size,
shuffle=True, drop_last=True, **self._dataloader_params())
def val_dataloader(self):
return DataLoader(self.calabresi_val, batch_size=self.test_batch_size,
shuffle=False, **self._dataloader_params())
def test_dataloader(self):
return DataLoader(self.calabresi_test, batch_size=self.test_batch_size,
shuffle=False, **self._dataloader_params())
def _theis_noise(self, obs):
""" add noise to discrete variables per Theis 2016 """
if self.training:
obs['x'] += (torch.rand_like(obs['x']) - 0.5)
return obs
def prep_batch(self, batch):
x = batch['image'].float()
out = dict(x=x)
for k in self.required_data:
if k in batch:
out[k] = batch[k].unsqueeze(1).float()
out = self._theis_noise(out)
return out
def _step(self, batch, label=''):
batch = self.prep_batch(batch)
preds = self.classifier(batch['x'])
loss = F.binary_cross_entropy_with_logits(preds, batch['type'])
self.log(f'{label}_loss', loss)
acc = getattr(self, f'{label}_acc')
acc(preds.sigmoid(), batch['type'])
self.log(f'{label}_acc', acc, on_step=False, on_epoch=True)
return loss
def forward(self, x):
return self.classifier(x)
def training_step(self, batch, batch_idx):
loss = self._step(batch, 'train')
return loss
def validation_step(self, batch, batch_idx):
loss = self._step(batch, 'val')
return loss
def test_step(self, batch, batch_idx):
loss = self._step(batch, 'test')
return loss
@classmethod
def add_arguments(cls, parser):
parser.add_argument('--train-csv', default="/iacl/pg20/jacobr/calabresi/png/csv/train_png.csv", type=str, help="csv for training data (default: %(default)s)") # noqa: E501
parser.add_argument('--valid-csv', default="/iacl/pg20/jacobr/calabresi/png/csv/valid_png.csv", type=str, help="csv for validation data (default: %(default)s)") # noqa: E501
parser.add_argument('--test-csv', default="/iacl/pg20/jacobr/calabresi/png/csv/test_png.csv", type=str, help="csv for testing data (default: %(default)s)") # noqa: E501
parser.add_argument('--crop-size', default=(224,224), type=int, nargs=2, help="size of patch to take from image (default: %(default)s)")
parser.add_argument('--resize', default=(128,128), type=int, nargs=2, help="resize cropped image to this size (use 0,0 for no resize) (default: %(default)s)")
parser.add_argument('--train-crop-type', default='random', choices=['random', 'center'], help="how to crop training images (default: %(default)s)")
parser.add_argument('--train-batch-size', default=128, type=int, help="train batch size (default: %(default)s)")
parser.add_argument('--test-batch-size', default=256, type=int, help="test batch size (default: %(default)s)")
parser.add_argument('--lr', default=1e-3, type=float, help="lr of deep part (default: %(default)s)")
parser.add_argument('--weight-decay', default=0., type=float, help="weight decay for adam (default: %(default)s)")
parser.add_argument('--dropout-rate', default=0., type=float, help="dropout rate for classifier (default: %(default)s)")
parser.add_argument('--betas', default=(0.9,0.999), type=float, nargs=2, help="betas for adam (default: %(default)s)")
parser.add_argument('--filters', default=[8,16,32,64,128], nargs='+', type=int, help="number of filters in each layer of classifier (default: %(default)s)")
parser.add_argument('--num-convolutions', default=3, type=int, help="number of convolutions in each layer (default: %(default)s)")
parser.add_argument('--use-weight-norm', default=False, action='store_true', help="use weight norm in conv layers (default: %(default)s)")
parser.add_argument('--pseudo3d', default=False, action='store_true', help="use pseudo-3d images (3 channels) (default: %(default)s)")
parser.add_argument('--validate', default=False, action='store_true', help="more verbose validation (default: %(default)s)")
return parser
```
#### File: experiments/classifier/trainer.py
```python
import argparse
import logging
import os
import warnings
import sys
from pytorch_lightning import Trainer, seed_everything
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.loggers import TensorBoardLogger
from counterfactualms.experiments import classifier # noqa: F401
from counterfactualms.experiments.classifier.classifier_experiment import ClassifierExperiment
logger = logging.getLogger(__name__)
def main():
exp_parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
exp_parser.add_argument('--model-path', '-mp', help='pre-trained model to load',
default='/iacl/pg20/jacobr/calabresi/models/pretrained.ckpt', type=str)
exp_parser.add_argument('--seed', default=1337, type=int, help='random seed')
exp_parser.add_argument('-v', '--verbosity', action="count", default=0,
help="increase output verbosity (e.g., -vv is more than -v)")
exp_args, other_args = exp_parser.parse_known_args()
seed_everything(exp_args.seed)
if exp_args.verbosity == 1:
level = logging.getLevelName('INFO')
elif exp_args.verbosity >= 2:
level = logging.getLevelName('DEBUG')
else:
level = logging.getLevelName('WARNING')
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=level)
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser = Trainer.add_argparse_args(parser)
parser.set_defaults(checkpoint_callback=True)
parser._action_groups[1].title = 'lightning_options'
experiment_group = parser.add_argument_group('experiment')
ClassifierExperiment.add_arguments(experiment_group)
args = parser.parse_args(other_args)
if args.gpus is not None and isinstance(args.gpus, int):
# Make sure that it only uses a single GPU
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpus)
args.gpus = 1
groups = {}
for group in parser._action_groups:
group_dict = {a.dest: getattr(args, a.dest, None) for a in group._group_actions}
groups[group.title] = argparse.Namespace(**group_dict)
lightning_args = groups['lightning_options']
tb_logger = TensorBoardLogger(lightning_args.default_root_dir, name=f'ClassifierExperiment/')
lightning_args.logger = tb_logger
hparams = groups['experiment']
setattr(hparams, 'n_epochs', args.max_epochs)
setattr(hparams, 'verbosity', exp_args.verbosity)
callbacks = [ModelCheckpoint(
monitor='val_loss',
save_top_k=10,
save_last=True,
mode='min',
filename='{epoch}-{val_acc:.2f}-{val_loss:.2f}'
)]
trainer = Trainer.from_argparse_args(lightning_args, callbacks=callbacks)
experiment = ClassifierExperiment(hparams)
warning_level = "once" if hparams.validate else "ignore"
with warnings.catch_warnings():
warnings.simplefilter(warning_level)
trainer.fit(experiment)
if __name__ == '__main__':
sys.exit(main())
```
|
{
"source": "jcreinhold/lesion-metrics",
"score": 2
}
|
#### File: lesion-metrics/tests/test_metrics.py
```python
import builtins
import pathlib
import medio.image as mioi
import pytest
import lesion_metrics.metrics as lmm
import lesion_metrics.typing as lmt
import lesion_metrics.volume as lmv
backends = ["numpy"]
try:
import torch
backends.append("torch")
except (ModuleNotFoundError, ImportError):
pass
@pytest.fixture
def cwd() -> pathlib.Path:
cwd = pathlib.Path.cwd().resolve()
if cwd.name == "tests":
return cwd
cwd = (cwd / "tests").resolve(strict=True)
return cwd
@pytest.fixture(params=backends)
def backend(request) -> builtins.str: # type: ignore[no-untyped-def]
_backend: str = request.param
return _backend
@pytest.fixture
def pred_filename(cwd: pathlib.Path) -> pathlib.Path:
return cwd / "test_data" / "pred" / "pred.nii.gz"
@pytest.fixture
def pred(backend: builtins.str, pred_filename: pathlib.Path) -> lmt.Label:
pred_data: lmt.Label = mioi.Image.from_path(pred_filename)
if backend == "torch":
pred_data = torch.from_numpy(pred_data) # type: ignore[assignment]
return pred_data
@pytest.fixture
def truth_filename(cwd: pathlib.Path) -> pathlib.Path:
return cwd / "test_data" / "truth" / "truth.nii.gz"
@pytest.fixture
def truth(backend: builtins.str, truth_filename: pathlib.Path) -> lmt.Label:
truth_data: lmt.Label = mioi.Image.from_path(truth_filename)
if backend == "torch":
truth_data = torch.from_numpy(truth_data) # type: ignore[assignment]
return truth_data
def test_dice(pred: lmt.Label, truth: lmt.Label) -> None:
dice_coef = lmm.dice(pred, truth)
correct = 2 * (3 / ((8 + 1 + 1) + (2 + 1 + 1)))
assert dice_coef == correct
def test_jaccard(pred: lmt.Label, truth: lmt.Label) -> None:
jaccard_idx = lmm.jaccard(pred, truth)
correct = 3 / ((8 + 1 + 1) + 1)
assert jaccard_idx == correct
def test_ppv(pred: lmt.Label, truth: lmt.Label) -> None:
ppv_score = lmm.ppv(pred, truth)
correct = 3 / (2 + 1 + 1)
assert ppv_score == correct
def test_tpr(pred: lmt.Label, truth: lmt.Label) -> None:
tpr_score = lmm.tpr(pred, truth)
correct = 3 / (8 + 1 + 1)
assert tpr_score == correct
def test_lfdr(pred: lmt.Label, truth: lmt.Label) -> None:
lfpr_score = lmm.lfdr(pred, truth)
correct = 1 / 3
assert lfpr_score == correct
def test_ltpr(pred: lmt.Label, truth: lmt.Label) -> None:
ltpr_score = lmm.ltpr(pred, truth)
correct = 2 / 3
assert ltpr_score == correct
def test_avd(pred: lmt.Label, truth: lmt.Label) -> None:
avd_score = lmm.avd(pred, truth)
correct = 0.6
assert avd_score == correct
def test_corr(pred: lmt.Label, truth: lmt.Label) -> None:
ps = pred.sum()
ts = truth.sum()
eps = 0.1
pred_vols = [ps, ps + eps, ps - eps]
truth_vols = [ts, ts + eps, ts - eps]
corr_score = lmm.corr(pred_vols, truth_vols)
correct = 1.0
assert pytest.approx(corr_score, 1e-3) == correct
def test_isbi15_score(pred: lmt.Label, truth: lmt.Label) -> None:
isbi15 = lmm.isbi15_score(pred, truth)
correct = 0.6408730158730158
assert isbi15 == pytest.approx(correct, 1e-3)
def test_segmentation_volume(pred_filename: pathlib.Path) -> None:
sv = lmv.SegmentationVolume.from_filename(pred_filename)
vol = sv.volume()
assert vol == 4.0
@pytest.mark.skip("Not implemented.")
def test_assd(pred: lmt.Label, truth: lmt.Label) -> None:
pass
```
|
{
"source": "jcreinhold/lesionqc",
"score": 3
}
|
#### File: lesionqc/lesionqc/metrics.py
```python
__all__ = ['dice',
'jaccard',
'ppv',
'tpr',
'lfpr',
'ltpr',
'avd',
'assd',
'corr',
'isbi15_score']
import numpy as np
from scipy.stats import pearsonr
from skimage.measure import label
def dice(pred, truth):
""" dice coefficient between predicted and true binary masks """
p, t = (pred > 0), (truth > 0)
intersection = (p & t).sum()
cardinality = p.sum() + t.sum()
if cardinality == 0.: return np.nan
return 2 * intersection / cardinality
def jaccard(pred, truth):
""" jaccard index (IoU) between predicted and true binary masks """
p, t = (pred > 0), (truth > 0)
intersection = (p & t).sum()
union = (p | t).sum()
if union == 0.: return np.nan
return intersection / union
def ppv(pred, truth):
""" positive predictive value (precision) between predicted and true binary masks """
p, t = (pred > 0), (truth > 0)
intersection = (p & t).sum()
denom = p.sum()
if denom == 0.: return np.nan
return intersection / denom
def tpr(pred, truth):
""" true positive rate (sensitivity) between predicted and true binary masks """
p, t = (pred > 0), (truth > 0)
intersection = (p & t).sum()
denom = t.sum()
if denom == 0.: return np.nan
return intersection / denom
def lfpr(pred, truth):
""" lesion false positive rate between predicted and true binary masks """
p, t = (pred > 0), (truth > 0)
cc, n = label(p, return_num=True)
if n == 0: return np.nan
count = 0
for i in range(1, n+1):
if ((cc == i) & t).sum() == 0:
count += 1
return count / n
def ltpr(pred, truth):
""" lesion true positive rate between predicted and true binary masks """
p, t = (pred > 0), (truth > 0)
cc, n = label(t, return_num=True)
if n == 0: return np.nan
count = 0
for i in range(1, n+1):
if ((cc == i) & p).sum() > 0:
count += 1
return count / n
def avd(pred, truth):
""" absolute volume difference between predicted and true binary masks """
p, t = (pred > 0), (truth > 0)
numer = np.abs(p.sum() - t.sum())
denom = t.sum()
if denom == 0.: return np.nan
return numer / denom
def assd(pred, truth):
""" average symmetric surface difference between predicted and true binary masks """
raise NotImplementedError
def corr(pred_vols, truth_vols):
""" pearson correlation coefficient between list of predicted and true binary volumes """
return pearsonr(pred_vols, truth_vols)[0]
def isbi15_score(pred, truth, reweighted=True):
"""
report the score (minus volume correlation)
for a given prediction as described in [1]
reweighted flag puts the score (minus
volume correlation) between 0 and 1
References:
[1] Carass, Aaron, et al. "Longitudinal multiple sclerosis
lesion segmentation: resource and challenge." NeuroImage
148 (2017): 77-102.
"""
score = (dice(pred, truth) / 8 +
ppv(pred, truth) / 8 +
(1 - lfpr(pred, truth)) / 4 +
ltpr(pred, truth) / 4)
if reweighted:
score *= (4 / 3)
return score
```
|
{
"source": "jcreinhold/msseg",
"score": 2
}
|
#### File: msseg/msseg/cutmix.py
```python
__all__ = ['cutmix2d',
'cutmix3d',
'CutMixCollator']
import numpy as np
import torch
def cutmix2d(batch, alpha:float=1.):
data, targets = batch
indices = torch.randperm(data.size(0))
shuffled_data = data[indices]
shuffled_targets = targets[indices]
lam = np.random.beta(alpha, alpha)
image_h, image_w = data.shape[2:]
cx = np.random.uniform(0, image_w)
cy = np.random.uniform(0, image_h)
w = image_w * np.sqrt(1 - lam)
h = image_h * np.sqrt(1 - lam)
x0 = int(np.round(max(cx - w / 2, 0)))
x1 = int(np.round(min(cx + w / 2, image_w)))
y0 = int(np.round(max(cy - h / 2, 0)))
y1 = int(np.round(min(cy + h / 2, image_h)))
data[:, :, y0:y1, x0:x1] = shuffled_data[:, :, y0:y1, x0:x1]
targets[:,:,y0:y1,x0:x1] = shuffled_targets[:,:,y0:y1,x0:x1]
return data, targets
def cutmix3d(batch, alpha:float=1.):
data, targets = batch
indices = torch.randperm(data.size(0))
shuffled_data = data[indices]
shuffled_targets = targets[indices]
lam = np.random.beta(alpha, alpha)
image_d, image_h, image_w = data.shape[2:]
cx = np.random.uniform(0, image_w)
cy = np.random.uniform(0, image_h)
cz = np.random.uniform(0, image_d)
w = image_w * np.sqrt(1 - lam)
h = image_h * np.sqrt(1 - lam)
d = image_d * np.sqrt(1 - lam)
x0 = int(np.round(max(cx - w / 2, 0)))
x1 = int(np.round(min(cx + w / 2, image_w)))
y0 = int(np.round(max(cy - h / 2, 0)))
y1 = int(np.round(min(cy + h / 2, image_h)))
z0 = int(np.round(max(cz - d / 2, 0)))
z1 = int(np.round(min(cz + d / 2, image_d)))
data[:,:,z0:z1,y0:y1,x0:x1] = shuffled_data[:,:,z0:z1,y0:y1,x0:x1]
targets[:,:,z0:z1,y0:y1,x0:x1] = shuffled_targets[:,:,z0:z1,y0:y1,x0:x1]
return data, targets
class CutMixCollator:
def __init__(self, alpha:float=1., dim:int=3):
assert 0. < alpha
self.alpha = alpha
assert 1 < dim < 4
self.dim = dim
def __call__(self, batch):
batch = torch.utils.data.dataloader.default_collate(batch)
if self.dim == 2:
batch = cutmix2d(batch, self.alpha)
elif self.dim == 3:
batch = cutmix3d(batch, self.alpha)
else:
raise NotImplementedError('Only 2 and 3 dimensional images are supported.')
return batch
```
#### File: msseg/tests/test_tiramisu3d.py
```python
import contextlib
import os
from os.path import join
import shutil
import tempfile
import unittest
import warnings
from pytorch_lightning import Trainer, seed_everything
with open(os.devnull, "w") as f:
with contextlib.redirect_stdout(f):
import torchio
import msseg
from msseg.loss import binary_combo_loss
from msseg.data import csv_to_subjectlist
from msseg.util import n_dirname
from _test_configs import test_lightningtiramisu3d_config
from _test_lightningtiramisu import (
_create_test_csv,
LightningTiramisuTester
)
seed_everything(1337)
msseg_dir = n_dirname(msseg.__file__, 2)
DATA_DIR = join(msseg_dir, "tests/test_data/")
class LightningTiramisu3d(LightningTiramisuTester):
def training_step(self, batch, batch_idx):
x = batch['t1'][torchio.DATA]
y = batch['label'][torchio.DATA]
y_hat = self.forward(x)
loss = self.criterion(y_hat, y)
tensorboard_logs = {'train_loss': loss}
return {'loss': loss, 'log': tensorboard_logs}
def validation_step(self, batch, batch_idx):
x = batch['t1'][torchio.DATA]
y = batch['label'][torchio.DATA]
y_hat = self.forward(x)
loss = self.criterion(y_hat, y)
return {'val_loss': loss}
class TestTiramisu3d(unittest.TestCase):
def setUp(self):
self.out_dir = tempfile.mkdtemp()
csv = join(self.out_dir, "data.csv")
_create_test_csv(csv, DATA_DIR)
subject_list = csv_to_subjectlist(csv)
self.net = LightningTiramisu3d(
test_lightningtiramisu3d_config,
subject_list)
def tearDown(self):
shutil.rmtree(self.out_dir)
del self.net
def test_fit(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
trainer = Trainer(
default_root_dir=self.out_dir,
fast_dev_run=True,
progress_bar_refresh_rate=0)
trainer.fit(self.net)
def test_weight(self):
csv = join(self.out_dir, "data.csv")
_create_test_csv(csv, DATA_DIR, weight=True)
subject_list = csv_to_subjectlist(csv)
self.net = LightningTiramisu3d(
test_lightningtiramisu3d_config,
subject_list)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
trainer = Trainer(
default_root_dir=self.out_dir,
fast_dev_run=True,
progress_bar_refresh_rate=0)
trainer.fit(self.net)
def test_combo_loss(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.net.criterion = binary_combo_loss
trainer = Trainer(
default_root_dir=self.out_dir,
fast_dev_run=True,
progress_bar_refresh_rate=0)
trainer.fit(self.net)
if __name__ == "__main__":
unittest.main()
```
|
{
"source": "jcreinhold/nimanifold",
"score": 3
}
|
#### File: nimanifold/nimanifold/types.py
```python
__all__ = [
'Array',
'Axes',
'DataFrame',
'DataLocSlice',
'Grid',
'Loc',
'Number',
'Sample',
'Shape',
'SubGrid'
]
from typing import *
from copy import copy
import h5py
from matplotlib.axes import Axes
import numpy as np
import pandas as pd
Array = np.ndarray
DataFrame = pd.DataFrame
DataLocSlice = Tuple[List[Array], List[Array], List[Array]]
Grid = Tuple[Array, Array, Array]
Loc = Tuple[int, int, int]
Number = Union[int, float]
Shape = Tuple[int, int, int]
SubGrid = Tuple[Array, Array, Array]
class Sample:
def __init__(self,
data: Array,
locs: Array,
pids: Array,
slices: Array,
sites: Optional[Array] = None,
contrasts: Optional[Array] = None):
self.data = data
self.locs = locs
self.pids = pids
self.slices = slices
self.sites = sites
self.contrasts = contrasts
self.is_valid()
def __len__(self):
return self.data.shape[0]
def __repr__(self):
return f"{len(self)} Samples"
def is_valid(self):
N = len(self)
assert (self.locs.shape[0] == N)
assert (self.locs.shape[1] == 3)
assert (self.pids.shape[0] == N)
assert (self.pids.shape[1] == 3)
assert (self.slices.shape[0] == N)
if self.sites is not None:
assert (self.sites.shape[0] == N)
assert (self.sites.shape[1] == 3)
if self.contrasts is not None:
assert (self.contrasts.shape[0] == N)
assert (self.contrasts.shape[1] == 3)
def new_data(self, data: Array):
sample = copy(self)
sample.data = data
sample.is_valid()
return sample
def subsample(self, n: int):
N = len(self)
assert (n <= N)
idxs = np.random.choice(N, size=n, replace=False)
sample = Sample(
self.data[idxs],
self.locs[idxs],
self.pids[idxs],
self.slices[idxs],
self.sites[idxs] if self.sites is not None else None,
self.contrasts[idxs] if self.contrasts is not None else None,
)
return sample
def to_hdf5(self, filename: str):
with h5py.File(filename, "w") as f:
f.create_dataset('data', data=self.data)
f.create_dataset('locs', data=self.locs)
f.create_dataset('pids', data=self.pids)
f.create_dataset('slices', data=self.slices)
if self.sites is not None:
f.create_dataset('sites', data=self.sites)
if self.contrasts is not None:
f.create_dataset('contrasts', data=self.contrasts)
@classmethod
def from_hdf5(cls, filename: str):
with h5py.File(filename, "r") as f:
data = np.asarray(f['data'])
locs = np.asarray(f['locs'])
pids = np.asarray(f['pids'])
slices = np.asarray(f['slices'])
sites = np.asarray(f['sites']) if 'sites' in f else None
contrasts = np.asarray(f['contrasts']) if 'contrasts' in f else None
return cls(data, locs, pids, slices, sites, contrasts)
```
|
{
"source": "jcreinhold/pymedio",
"score": 2
}
|
#### File: pymedio/pymedio/image.py
```python
from __future__ import annotations
__all__ = ["Image"]
import builtins
import typing
import zipfile
import numpy as np
import numpy.typing as npt
try:
import nibabel as nib
import SimpleITK as sitk
except ImportError as imp_exn:
imp_exn_msg = f"NiBabel and SimpleITK must be installed to use {__name__}."
raise ImportError(imp_exn_msg) from imp_exn
import pymedio.base as miob
import pymedio.dicom as miod
import pymedio.functional as miof
import pymedio.typing as miot
class Image(miob.BasicImage[typing.Any, miot.DType]): # type: ignore[type-arg]
@property
def repr_properties(self) -> builtins.list[builtins.str]:
props = super().repr_properties
props += [f"orientation: {''.join(self.orientation)}+"]
return props
@property
def orientation(self) -> builtins.tuple[builtins.str, builtins.str, builtins.str]:
"""Orientation codes."""
codes: builtins.tuple[builtins.str, builtins.str, builtins.str]
codes = nib.aff2axcodes(self.affine)
return codes
@property
def bounds(self) -> npt.NDArray[np.float64]:
"""Position of centers of voxels in smallest and largest indices."""
ini = 0, 0, 0
fin: np.ndarray = np.asarray(self.shape) - 1
point_ini = nib.affines.apply_affine(self.affine, ini)
point_fin = nib.affines.apply_affine(self.affine, fin)
return np.asarray((point_ini, point_fin))
def axis_name_to_index(self, axis: builtins.str) -> builtins.int:
# Top and bottom are used for the vertical 2D axis as the use of
# Height vs Horizontal might be ambiguous
if not isinstance(axis, str):
raise ValueError("Axis must be a string")
axis = axis[0].upper()
if axis in "TB": # Top, Bottom
return -2
else:
try:
index = self.orientation.index(axis)
except ValueError:
index = self.orientation.index(self.flip_axis(axis))
# Return negative indices so that it does not matter whether we
# refer to spatial dimensions or not
index = -3 + index
return index
@staticmethod
def flip_axis(axis: builtins.str) -> builtins.str:
labels = "LRPAISTBDV"
first = labels[::2]
last = labels[1::2]
flip_dict = {a: b for a, b in zip(first + last, last + first)}
axis = axis[0].upper()
flipped_axis = flip_dict.get(axis)
if flipped_axis is None:
values = ", ".join(labels)
message = f"Axis not understood. Please use one of: {values}"
raise ValueError(message)
return flipped_axis
def get_bounds(self) -> miot.Bounds:
"""Get minimum and maximum world coordinates occupied by the image."""
first_index = 3 * (-0.5,)
last_index: np.ndarray = np.asarray(self.shape) - 0.5
first_point = nib.affines.apply_affine(self.affine, first_index)
last_point = nib.affines.apply_affine(self.affine, last_index)
array: np.ndarray = np.asarray((first_point, last_point))
bounds_x, bounds_y, bounds_z = array.T.tolist()
return bounds_x, bounds_y, bounds_z
def save(self, path: miot.PathLike, *, squeeze: builtins.bool = True) -> None:
miof.write_image(np.array(self), self.affine, path, squeeze=squeeze)
def to_filename(self, path: miot.PathLike) -> None:
self.save(path, squeeze=False)
def get_center(self, lps: builtins.bool = False) -> miot.TripletFloat:
"""Get image center in RAS+ or LPS+ coordinates"""
size: np.ndarray = np.asarray(self.shape)
center_index = (size - 1) / 2
r, a, s = nib.affines.apply_affine(self.affine, center_index)
return (-r, -a, s) if lps else (r, a, s)
@classmethod
def from_path(
cls: typing.Type[Image],
path: miot.PathLike,
*,
dtype: typing.Type[miot.DType] | None = None,
eager: builtins.bool = True,
) -> Image[miot.DType]:
data, affine = miof.read_image(path, dtype=dtype, eager=eager)
return cls(data=data, affine=affine)
@classmethod
def from_stream(
cls: typing.Type[Image],
data_stream: typing.IO,
*,
dtype: typing.Type[miot.DType] | None = None,
gzipped: builtins.bool = False,
image_class: miof.NibabelImageClass | None = None,
) -> Image[miot.DType]:
data, affine = miof.read_image_from_stream(
data_stream, dtype=dtype, gzipped=gzipped, image_class=image_class
)
return cls(data=data, affine=affine)
@classmethod
def from_zipped_stream(
cls: typing.Type[Image],
data_stream: typing.IO,
*,
dtype: typing.Type[miot.DType] | None = None,
gzipped: builtins.bool = False,
image_class: miof.NibabelImageClass | None = None,
**zip_kwargs: typing.Any,
) -> Image[miot.DType]:
with zipfile.ZipFile(data_stream, "r", **zip_kwargs) as zf:
names = [name for name in zf.namelist() if not name.endswith("/")]
if (n := len(names)) != 1:
msg = f"{n} files in zipped archive. This constructor requires only 1."
raise RuntimeError(msg)
name = names[0]
with zf.open(name, mode="r") as f:
return cls.from_stream(
typing.cast(typing.BinaryIO, f),
dtype=dtype,
gzipped=gzipped,
image_class=image_class,
)
@classmethod
def from_sitk(
cls: typing.Type[Image],
sitk_image: sitk.Image,
*,
dtype: typing.Type[miot.DType] | None = None,
) -> Image:
data, affine = miof.sitk_to_array(sitk_image, dtype=dtype)
return cls(data=data, affine=affine)
@classmethod
def from_dicom_image(
cls: typing.Type[Image], dicom_image: miod.DICOMImage
) -> Image:
return cls(data=dicom_image, affine=dicom_image.affine)
@classmethod
def from_dicom_zipped_stream(
cls: typing.Type[Image],
data_stream: typing.IO,
*,
max_nonuniformity: builtins.float = 5e-4,
fail_outside_max_nonuniformity: builtins.bool = True,
remove_anomalous_images: builtins.bool = True,
encryption_key: builtins.bytes | builtins.str | None = None,
rescale: builtins.bool | None = None,
rescale_dtype: typing.Type[miot.DType] | None = None,
) -> Image[miot.DType]:
dicom_image = miod.DICOMImage.from_zipped_stream(
data_stream,
max_nonuniformity=max_nonuniformity,
fail_outside_max_nonuniformity=fail_outside_max_nonuniformity,
remove_anomalous_images=remove_anomalous_images,
encryption_key=encryption_key,
rescale=rescale,
rescale_dtype=rescale_dtype,
)
return cls.from_dicom_image(dicom_image)
def to_sitk(self, **kwargs: builtins.bool) -> sitk.Image:
"""Get the image as an instance of :class:`sitk.Image`."""
return miof.array_to_sitk(np.array(self), self.affine, **kwargs)
def to_nibabel(self) -> nib.Nifti1Image:
return nib.Nifti1Image(np.array(self), self.affine)
```
|
{
"source": "jcreinhold/synthqc",
"score": 2
}
|
#### File: synthqc/tests/test_plot.py
```python
import os
import shutil
import tempfile
import unittest
from synthqc import directory_view, plot_dir_synth_quality
class TestPlot(unittest.TestCase):
def setUp(self):
wd = os.path.dirname(os.path.abspath(__file__))
self.data_dir = os.path.join(wd, 'test_data', 'images')
self.mask_dir = os.path.join(wd, 'test_data', 'masks')
self.out_dir = tempfile.mkdtemp()
def test_directory_view(self):
directory_view(self.data_dir, out_dir=self.out_dir, trim=False)
def test_synth_quality(self):
plot_dir_synth_quality(self.data_dir, self.data_dir, mask_dir=self.mask_dir, out_dir=self.out_dir)
def test_synth_quality_mean(self):
plot_dir_synth_quality(self.data_dir, self.data_dir, mask_dir=self.mask_dir, out_dir=self.out_dir, mean=True)
def tearDown(self):
shutil.rmtree(self.out_dir)
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jcreinhold/synthtorch",
"score": 2
}
|
#### File: synthtorch/exec/nn_predict.py
```python
import logging
import os
import random
import sys
import warnings
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=FutureWarning)
warnings.filterwarnings('ignore', category=UserWarning)
import numpy as np
import torch
from niftidataset import glob_imgs, split_filename
from synthtorch import Learner, SynthtorchError
from synthtorch.exec.exec import get_args, setup_log, determine_ext
######### Main routine ###########
def main(args=None):
args, no_config_file = get_args(args)
setup_log(args.verbosity)
logger = logging.getLogger(__name__)
try:
# set random seeds for reproducibility
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
# since prediction only uses one gpu (at most), make the batch size small enough to fit
if args.n_gpus > 1: args.batch_size = args.batch_size // args.n_gpus
learner = Learner.predict_setup(args)
# determine how many samples we will use in prediction
nsyn = (args.monte_carlo or 1) if (args.nn_arch not in ('hotnet', 'unburnnet', 'ocnet1', 'ocnet2')) else 1
# get relevant prediction directories and determine extension
predict_dir = args.predict_dir or args.valid_source_dir
output_dir = args.predict_out or os.getcwd() + '/syn_'
ext = determine_ext(predict_dir[0])
# setup and start prediction loop
axis = args.sample_axis or 0
if axis < 0 or axis > 2 and not isinstance(axis, int):
raise ValueError('sample_axis must be an integer between 0 and 2 inclusive')
n_imgs = len(glob_imgs(predict_dir[0], ext))
if n_imgs == 0: raise SynthtorchError('Prediction directory does not contain valid images.')
if any([len(glob_imgs(pd, ext)) != n_imgs for pd in predict_dir]):
raise SynthtorchError(
'Number of images in prediction directories must have an equal number of images in each '
'directory (e.g., so that img_t1_1 aligns with img_t2_1 etc. for multimodal synth)')
predict_fns = zip(*[glob_imgs(pd, ext) for pd in predict_dir])
if args.dim == 3 and args.patch_size is not None and args.calc_var:
raise SynthtorchError('Patch-based 3D variance calculation not currently supported.')
for k, fn in enumerate(predict_fns):
_, base, ext = split_filename(fn[0])
if 'png' in ext: ext = '.tif' # force tif output in this case
if 'nii' in ext: ext = '.nii.gz' # force compressed output in this case
logger.info(f'Starting synthesis of image: {base} ({k + 1}/{n_imgs})')
out_imgs = learner.predict(fn, nsyn, args.calc_var)
for i, oin in enumerate(out_imgs):
out_fn = output_dir + f'{k}_{i}{ext}'
if hasattr(oin, 'to_filename'):
oin.to_filename(out_fn)
else:
oin.save(out_fn)
logger.info(f'Finished synthesis. Saved as: {out_fn}')
return 0
except Exception as e:
logger.exception(e)
return 1
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
```
#### File: synthtorch/learn/learner.py
```python
__all__ = ['get_data_augmentation',
'get_dataloader',
'get_device',
'get_model',
'Learner']
from dataclasses import dataclass
from typing import List, Tuple, Union
import logging
import os
import random
import nibabel as nib
import numpy as np
from PIL import Image
import torch
from torch import nn
from torch.optim.lr_scheduler import CosineAnnealingWarmRestarts, CyclicLR
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
from torchvision.transforms import Compose
from niftidataset import MultimodalNiftiDataset, MultimodalImageDataset, split_filename
import niftidataset.transforms as niftitfms
from synthtorch.errors import SynthtorchError
from synthtorch.plot.loss import plot_loss
from synthtorch.learn.predict import Predictor
from synthtorch.util.config import ExperimentConfig
from synthtorch.util.helper import get_optim, init_weights
try:
from torch.utils.tensorboard import SummaryWriter
except (ImportError, ModuleNotFoundError):
SummaryWriter = None
try:
from apex import amp
except (ImportError, ModuleNotFoundError):
amp = None
logger = logging.getLogger(__name__)
class Learner:
def __init__(self, model, device=None, train_loader=None, valid_loader=None, optimizer=None,
predictor=None, config=None):
self.model = model
self.model_name = model.__class__.__name__.lower()
self.train_loader = train_loader
self.valid_loader = valid_loader
self.optimizer = optimizer
self.device = device
self.predictor = predictor
self.config = config
self.record = None
self.use_fp16 = False
@classmethod
def train_setup(cls, config: Union[str, ExperimentConfig]):
if isinstance(config, str):
config = ExperimentConfig.load_json(config)
if isinstance(config.kernel_size, int):
config.kernel_size = tuple([config.kernel_size for _ in range(config.dim)])
device, use_cuda = get_device(config.disable_cuda)
if config.color: config.n_input, config.n_output = config.n_input * 3, config.n_output * 3
model = get_model(config, True, False)
if config.color: config.n_input, config.n_output = config.n_input // 3, config.n_output // 3
logger.debug(model)
logger.info(f'Number of trainable parameters in model: {num_params(model)}')
load_chkpt = os.path.isfile(config.trained_model)
checkpoint = torch.load(config.trained_model, map_location=device) if load_chkpt else None
if load_chkpt:
logger.info(f"Loading checkpoint: {config.trained_model} (epoch {checkpoint['epoch']})")
model.load_state_dict(checkpoint['model'])
model = model.to(device)
else:
logger.info(f'Initializing weights with {config.init}')
init_weights(model, config.init, config.init_gain)
if use_cuda: model.cuda(device=device)
train_loader, valid_loader = get_dataloader(config)
if config.lr_scheduler is None: logger.info(f'LR: {config.learning_rate:.2e}')
def gopt(name, mp, **kwargs):
return get_optim(name)(mp, lr=config.learning_rate, weight_decay=config.weight_decay, **kwargs)
try:
optimizer = gopt(config.optimizer, model.parameters(), betas=config.betas)
except TypeError:
try:
optimizer = gopt(config.optimizer, model.parameters(), momentum=config.betas[0])
except TypeError:
optimizer = gopt(config.optimizer, model.parameters())
if load_chkpt and not config.no_load_opt:
optimizer.load_state_dict(checkpoint['optimizer'])
model.train()
if config.freeze: model.freeze()
predictor = Predictor(model, config.patch_size, config.batch_size, device, config.sample_axis,
config.dim, config.mean, config.std, config.tfm_x, config.tfm_y)
return cls(model, device, train_loader, valid_loader, optimizer, predictor, config)
@classmethod
def predict_setup(cls, config: Union[str, ExperimentConfig]):
if isinstance(config, str):
config = ExperimentConfig.load_json(config)
if isinstance(config.kernel_size, int):
config.kernel_size = tuple([config.kernel_size for _ in range(config.dim)])
device, use_cuda = get_device(config.disable_cuda)
nsyn = config.monte_carlo or 1
if config.color: config.n_input, config.n_output = config.n_input * 3, config.n_output * 3
model = get_model(config, nsyn > 1 and config.dropout_prob > 0, True)
logger.debug(model)
checkpoint = torch.load(config.trained_model, map_location=device)
model.load_state_dict(checkpoint['model'])
model = model.to(device)
if use_cuda: model.cuda(device=device)
model.eval()
predictor = Predictor(model, config.patch_size, config.batch_size, device, config.sample_axis,
config.dim, config.mean, config.std, config.tfm_x, config.tfm_y)
return cls(model, device, predictor=predictor, config=config)
def fit(self, n_epochs, clip: float = None, checkpoint: int = None, trained_model: str = None):
""" training loop for neural network """
self.model.train()
use_tb = self.config.tensorboard and SummaryWriter is not None
if use_tb: writer = SummaryWriter()
use_valid = self.valid_loader is not None
use_scheduler = hasattr(self, 'scheduler')
use_restarts = self.config.lr_scheduler == 'cosinerestarts'
train_losses, valid_losses = [], []
n_batches = len(self.train_loader)
for t in range(1, n_epochs + 1):
# training
t_losses = []
if use_valid: self.model.train(True)
for i, (src, tgt) in enumerate(self.train_loader):
logger.debug(f'Epoch {t} - training iteration {i} - '
f'Src. shape: {src.shape}; Tgt. shape: {tgt.shape}')
src, tgt = src.to(self.device), tgt.to(self.device)
self.optimizer.zero_grad()
out = self.model(src)
loss = self._criterion(out, tgt)
t_losses.append(loss.item())
if self.use_fp16:
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
if clip is not None: nn.utils.clip_grad_norm_(self.model.parameters(), clip)
self.optimizer.step()
if use_scheduler: self.scheduler.step(((t - 1) + (i / n_batches)) if use_restarts else None)
if use_tb:
if i % 20 == 0: writer.add_scalar('Loss/train', loss.item(), ((t - 1) * n_batches) + i)
del loss # save memory by removing ref to gradient tree
train_losses.append(t_losses)
if checkpoint is not None:
if t % checkpoint == 0:
path, base, ext = split_filename(trained_model)
fn = os.path.join(path, base + f'_chk_{t}' + ext)
self.save(fn, t)
# validation
v_losses = []
if use_valid:
self.model.train(False)
with torch.no_grad():
for i, (src, tgt) in enumerate(self.valid_loader):
logger.debug(f'Epoch {t} - validation iteration {i} - '
f'Src. shape: {src.shape}; Tgt. shape: {tgt.shape}')
src, tgt = src.to(self.device), tgt.to(self.device)
out = self.model(src)
loss = self._criterion(out, tgt)
if use_tb:
if i % 20 == 0: writer.add_scalar('Loss/valid', loss.item(), ((t - 1) * n_batches) + i)
do_plot = i == 0 and ((t - 1) % 5) == 0
if do_plot and self.model.dim == 2:
writer.add_images('source', src[:8], t, dataformats='NCHW')
outimg = out[0][:8] if isinstance(out, tuple) else out[:8]
if self.config.color: outimg = torch.round(outimg)
writer.add_images('target', outimg, t, dataformats='NCHW')
if do_plot: self._histogram_weights(writer, t)
v_losses.append(loss.item())
valid_losses.append(v_losses)
if not np.all(np.isfinite(t_losses)): raise SynthtorchError(
'NaN or Inf in training loss, cannot recover. Exiting.')
if logger is not None:
log = f'Epoch: {t} - Training Loss: {np.mean(t_losses):.2e}'
if use_valid: log += f', Validation Loss: {np.mean(v_losses):.2e}'
if use_scheduler: log += f', LR: {self.scheduler.get_last_lr()[0]:.2e}'
logger.info(log)
self.record = Record(train_losses, valid_losses)
if use_tb: writer.close()
def predict(self, fn: str, nsyn: int = 1, calc_var: bool = False):
self.model.eval()
f = fn[0].lower()
if f.endswith('.nii') or f.endswith('.nii.gz'):
img_nib = nib.load(fn[0])
img = np.stack([nib.load(f).get_fdata(dtype=np.float32) for f in fn])
out = self.predictor.predict(img, nsyn, calc_var)
out_img = [nib.Nifti1Image(o, img_nib.affine, img_nib.header) for o in out]
elif f.split('.')[-1] in ('tif', 'tiff', 'png', 'jpg', 'jpeg'):
out_img = self._img_predict(fn, nsyn, calc_var)
else:
raise SynthtorchError(f'File: {fn[0]}, not supported.')
return out_img
def _img_predict(self, fn, nsyn, calc_var):
img = np.stack([np.asarray(Image.open(f), dtype=np.float32) for f in fn])
if self.config.color: img = img.transpose((0, 3, 1, 2))
out = self.predictor.img_predict(img, nsyn, calc_var)
if self.config.color:
out = out.transpose((1, 2, 0)) # only support one color image as output
out = [np.around(out[..., 0:3]).astype(np.uint8)] + [out[..., i] for i in range(3, out.shape[-1])] \
if self.config.nn_arch not in ('nconv', 'unet', 'densenet') else \
np.around(out[None, ...]).astype(np.uint8)
return [Image.fromarray(o) for o in out]
def _criterion(self, out, tgt):
""" helper function to handle multiple outputs in model evaluation """
c = self.model.module.criterion if isinstance(self.model, nn.DataParallel) else self.model.criterion
return c(out, tgt)
def fp16(self):
""" import and initialize mixed precision training package """
if amp is not None:
self.model, self.optimizer = amp.initialize(self.model, self.optimizer, opt_level='O1')
self.use_fp16 = True
else:
logger.info('Mixed precision training (i.e., the package `apex`) not available.')
def multigpu(self):
""" put the model on the GPU if available and desired """
n_gpus = torch.cuda.device_count()
if n_gpus <= 1:
logger.warning('Multi-GPU functionality is not available on your system.')
else:
logger.info(f'Enabling use of {n_gpus} gpus')
self.model = torch.nn.DataParallel(self.model)
def lr_scheduler(self, n_epochs, lr_scheduler='cyclic', restart_period=None, t_mult=None,
num_cycles=1, cycle_mode='triangular', momentum_range=(0.85, 0.95), div_factor=25, pct_start=0.3,
**kwargs):
lr = self.config.learning_rate
if lr_scheduler == 'cyclic':
logger.info(f'Enabling cyclic LR scheduler with {num_cycles} cycle(s)')
ss = int((n_epochs * len(self.train_loader)) / num_cycles)
ssu = int(pct_start * ss)
ssd = ss - ssu
cycle_momentum = self.config.optimizer in ('sgd', 'sgdw', 'nsgd', 'nsgdw', 'rmsprop')
momentum_kwargs = {'cycle_momentum': cycle_momentum}
if not cycle_momentum and momentum_range is not None:
logger.warning(f'{self.config.optimizer} not compatible with momentum cycling, disabling.')
elif momentum_range is not None:
momentum_kwargs.update({'base_momentum': momentum_range[0], 'max_momentum': momentum_range[1]})
self.scheduler = CyclicLR(self.optimizer, lr / div_factor, lr, step_size_up=ssu, step_size_down=ssd,
mode=cycle_mode, **momentum_kwargs)
elif lr_scheduler == 'cosinerestarts':
logger.info('Enabling cosine annealing with restarts LR scheduler')
self.scheduler = CosineAnnealingWarmRestarts(self.optimizer, restart_period, T_mult=t_mult,
eta_min=lr / div_factor)
else:
raise SynthtorchError(f'Invalid type {type} for scheduler.')
logger.info(f'Max LR: {lr:.2e}, Min LR: {lr / div_factor:.2e}')
def load(self, fn):
checkpoint = torch.load(fn, map_location=self.device)
logger.info(f"Loaded checkpoint: {fn} (epoch {checkpoint['epoch']})")
if 'amp' in checkpoint.keys():
amp.initialize(self.model, self.optimizer, opt_level='O1')
amp.load_state_dict(checkpoint['amp'])
self.model.load_state_dict(checkpoint['model']).to(self.device)
self.optimizer.load_state_dict(checkpoint['optimizer'])
def save(self, fn, epoch=0):
""" save a model, an optimizer state and the epoch number to a file """
model = self.model.module.state_dict() if hasattr(self.model, 'module') else self.model.state_dict()
state = {'epoch': epoch, 'model': model, 'optimizer': self.optimizer.state_dict()}
if self.use_fp16: state['amp'] = amp.state_dict()
torch.save(state, fn)
def _histogram_weights(self, writer, epoch):
""" write histogram of weights to tensorboard """
for (name, values) in self.model.named_parameters():
writer.add_histogram(tag='weights/' + name, values=values.clone().detach().cpu(), global_step=epoch)
def num_params(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def get_model(config: ExperimentConfig, enable_dropout: bool = True, inplace: bool = False):
"""
instantiate a model based on an ExperimentConfig class instance
Args:
config (ExperimentConfig): instance of the ExperimentConfig class
enable_dropout (bool): enable dropout in the model (usually for training)
Returns:
model: instance of one of the available models in the synthtorch package
"""
if config.nn_arch == 'nconv':
from ..models.nconvnet import SimpleConvNet
logger.warning('The nconv network is for basic testing.')
model = SimpleConvNet(**config)
elif config.nn_arch == 'unet':
from ..models.unet import Unet
model = Unet(enable_dropout=enable_dropout, inplace=inplace, **config)
elif config.nn_arch == 'vae':
from ..models.vae import VAE
model = VAE(**config)
elif config.nn_arch == 'densenet':
from ..models.densenet import DenseNet
model = DenseNet(**config)
elif config.nn_arch == 'ordnet':
try:
from annom.models import OrdNet
except (ImportError, ModuleNotFoundError):
raise SynthtorchError('Cannot use the OrdNet without the annom toolbox.')
model = OrdNet(enable_dropout=enable_dropout, inplace=inplace, **config)
elif config.nn_arch == 'hotnet':
try:
from annom.models import HotNet
except (ImportError, ModuleNotFoundError):
raise SynthtorchError('Cannot use the HotNet without the annom toolbox.')
model = HotNet(inplace=inplace, **config)
elif config.nn_arch == 'burnnet':
try:
from annom.models import BurnNet
except (ImportError, ModuleNotFoundError):
raise SynthtorchError('Cannot use the BurnNet without the annom toolbox.')
model = BurnNet(inplace=inplace, **config)
elif config.nn_arch == 'burn2net':
try:
from annom.models import Burn2Net
except (ImportError, ModuleNotFoundError):
raise SynthtorchError('Cannot use the Burn2Net without the annom toolbox.')
model = Burn2Net(inplace=inplace, **config)
elif config.nn_arch == 'burn2netp12':
try:
from annom.models import Burn2NetP12
except (ImportError, ModuleNotFoundError):
raise SynthtorchError('Cannot use the Burn2NetP12 without the annom toolbox.')
model = Burn2NetP12(inplace=inplace, **config)
elif config.nn_arch == 'burn2netp21':
try:
from annom.models import Burn2NetP21
except (ImportError, ModuleNotFoundError):
raise SynthtorchError('Cannot use the Burn2NetP21 without the annom toolbox.')
model = Burn2NetP21(inplace=inplace, **config)
elif config.nn_arch == 'unburnnet':
try:
from annom.models import UnburnNet
except (ImportError, ModuleNotFoundError):
raise SynthtorchError('Cannot use the UnburnNet without the annom toolbox.')
model = UnburnNet(inplace=inplace, **config)
elif config.nn_arch == 'unburn2net':
try:
from annom.models import Unburn2Net
except (ImportError, ModuleNotFoundError):
raise SynthtorchError('Cannot use the Unburn2Net without the annom toolbox.')
model = Unburn2Net(inplace=inplace, **config)
elif config.nn_arch == 'lavanet':
try:
from annom.models import LavaNet
except (ImportError, ModuleNotFoundError):
raise SynthtorchError('Cannot use the LavaNet without the annom toolbox.')
model = LavaNet(inplace=inplace, **config)
elif config.nn_arch == 'lava2net':
try:
from annom.models import Lava2Net
except (ImportError, ModuleNotFoundError):
raise SynthtorchError('Cannot use the Lava2Net without the annom toolbox.')
model = Lava2Net(inplace=inplace, **config)
elif config.nn_arch == 'lautonet':
try:
from annom.models import LAutoNet
except (ImportError, ModuleNotFoundError):
raise SynthtorchError('Cannot use the LAutoNet without the annom toolbox.')
model = LAutoNet(enable_dropout=enable_dropout, inplace=inplace, **config)
elif config.nn_arch == 'ocnet1':
try:
from annom.models import OCNet1
except (ImportError, ModuleNotFoundError):
raise SynthtorchError('Cannot use the OCNet without the annom toolbox.')
model = OCNet1(enable_dropout=enable_dropout, inplace=inplace if config.dropout_prob == 0 else False, **config)
elif config.nn_arch == 'ocnet2':
try:
from annom.models import OCNet2
except (ImportError, ModuleNotFoundError):
raise SynthtorchError('Cannot use the OCNet without the annom toolbox.')
model = OCNet2(enable_dropout=enable_dropout, inplace=inplace if config.dropout_prob == 0 else False, **config)
else:
raise SynthtorchError(f'Invalid NN type: {config.nn_arch}. '
f'{{nconv,unet,vae,densenet,ordnet,hotnet,burnnet,burn2netp12,burn2netp21,'
f'unburnnet,unburn2net,lavanet,lava2net,lautonet,ocnet1,ocnet2}} '
f'are the only supported options.')
return model
def get_device(disable_cuda=False):
""" get the device(s) for tensors to be put on """
cuda_avail = torch.cuda.is_available()
use_cuda = cuda_avail and not disable_cuda
if use_cuda: torch.backends.cudnn.benchmark = True
if not cuda_avail and not disable_cuda: logger.warning('CUDA does not appear to be available on your system.')
device = torch.device("cuda" if use_cuda else "cpu")
return device, use_cuda
def get_dataloader(config: ExperimentConfig, tfms: Tuple[List, List] = None):
""" get the dataloaders for training/validation """
if config.dim > 1:
# get data augmentation if not defined
train_tfms, valid_tfms = get_data_augmentation(config) if tfms is None else tfms
# check number of jobs requested and CPUs available
num_cpus = os.cpu_count()
if num_cpus < config.n_jobs:
logger.warning(f'Requested more workers than available (n_jobs={config.n_jobs}, # cpus={num_cpus}). '
f'Setting n_jobs={num_cpus}.')
config.n_jobs = num_cpus
# define dataset and split into training/validation set
use_nii_ds = config.ext is None or 'nii' in config.ext
dataset = MultimodalNiftiDataset.setup_from_dir(config.source_dir, config.target_dir, Compose(train_tfms),
preload=config.preload) if use_nii_ds else \
MultimodalImageDataset.setup_from_dir(config.source_dir, config.target_dir, Compose(train_tfms),
ext='*.' + config.ext, color=config.color, preload=config.preload)
logger.info(f'Number of training images: {len(dataset)}')
if config.valid_source_dir is not None and config.valid_target_dir is not None:
valid_dataset = MultimodalNiftiDataset.setup_from_dir(config.valid_source_dir, config.valid_target_dir,
Compose(valid_tfms),
preload=config.preload) if use_nii_ds else \
MultimodalImageDataset.setup_from_dir(config.valid_source_dir, config.valid_target_dir,
Compose(valid_tfms),
ext='*.' + config.ext, color=config.color, preload=config.preload)
logger.info(f'Number of validation images: {len(valid_dataset)}')
train_loader = DataLoader(dataset, batch_size=config.batch_size, num_workers=config.n_jobs, shuffle=True,
pin_memory=config.pin_memory, worker_init_fn=init_fn)
valid_loader = DataLoader(valid_dataset, batch_size=config.batch_size, num_workers=config.n_jobs,
pin_memory=config.pin_memory, worker_init_fn=init_fn)
else:
# setup training and validation set
num_train = len(dataset)
indices = list(range(num_train))
split = int(config.valid_split * num_train)
valid_idx = np.random.choice(indices, size=split, replace=False)
train_idx = list(set(indices) - set(valid_idx))
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
# set up data loader for nifti images
train_loader = DataLoader(dataset, sampler=train_sampler, batch_size=config.batch_size,
num_workers=config.n_jobs, pin_memory=config.pin_memory, worker_init_fn=init_fn)
valid_loader = DataLoader(dataset, sampler=valid_sampler, batch_size=config.batch_size,
num_workers=config.n_jobs, pin_memory=config.pin_memory, worker_init_fn=init_fn)
else:
try:
from altdataset import CSVDataset
except (ImportError, ModuleNotFoundError):
raise SynthtorchError('Cannot use 1D ConvNet in CLI without the altdataset toolbox.')
train_dataset, valid_dataset = CSVDataset(config.source_dir[0]), CSVDataset(config.valid_source_dir[0])
train_loader = DataLoader(train_dataset, batch_size=config.batch_size, num_workers=config.n_jobs, shuffle=True,
pin_memory=config.pin_memory)
valid_loader = DataLoader(valid_dataset, batch_size=config.batch_size, num_workers=config.n_jobs,
pin_memory=config.pin_memory)
return train_loader, valid_loader
def init_fn(worker_id):
random.seed((torch.initial_seed() + worker_id) % (2 ** 32))
np.random.seed((torch.initial_seed() + worker_id) % (2 ** 32))
def get_data_augmentation(config: ExperimentConfig):
""" get all data augmentation transforms for training """
train_tfms, valid_tfms = [], []
# add data augmentation if desired
if config.prob is not None:
logger.info('Adding data augmentation transforms')
train_tfms.extend(
niftitfms.get_transforms(config.prob, config.tfm_x, config.tfm_y, config.rotate, config.translate,
config.scale, config.vflip, config.hflip, config.gamma, config.gain,
config.noise_pwr, config.block, config.threshold, config.dim == 3,
config.mean, config.std, config.color))
if config.mean is not None and config.std is not None:
valid_tfms.extend([niftitfms.ToTensor(config.color),
niftitfms.Normalize(config.mean, config.std, config.tfm_x, config.tfm_y,
config.dim == 3)])
else:
logger.info('No data augmentation will be used')
train_tfms.append(niftitfms.ToTensor(config.color))
valid_tfms.append(niftitfms.ToTensor(config.color))
# control random cropping patch size (or if used at all)
if (config.ext is None or config.ext == 'nii') and config.patch_size is not None:
cropper = niftitfms.RandomCrop3D(config.patch_size, config.threshold, config.sample_pct,
config.sample_axis) if config.dim == 3 else \
niftitfms.RandomCrop2D(config.patch_size, config.sample_axis, config.threshold)
train_tfms.append(cropper if config.patch_size is not None and config.dim == 3 else \
niftitfms.RandomSlice(config.sample_axis))
valid_tfms.append(cropper if config.patch_size is not None and config.dim == 3 else \
niftitfms.RandomSlice(config.sample_axis))
else:
if config.patch_size is not None:
train_tfms.append(niftitfms.RandomCrop(config.patch_size, config.threshold))
valid_tfms.append(niftitfms.RandomCrop(config.patch_size, config.threshold))
logger.debug(f'Training transforms: {train_tfms}')
return train_tfms, valid_tfms
@dataclass
class Record:
train_loss: List[List[float]]
valid_loss: List[List[float]]
def plot_loss(self, fn: str = None, plot_error: bool = False):
""" plot training and validation losses on the same plot (with or without error bars) """
ax = plot_loss(self.train_loss, ecolor='darkorchid', label='Train', plot_error=plot_error)
_ = plot_loss(self.valid_loss, filename=fn, ecolor='firebrick', ax=ax, label='Validation',
plot_error=plot_error)
def write_csv(self, fn: str):
""" write training and validation losses to a csv file """
import csv
head = ['epochs', 'avg train', 'std train', 'avg valid', 'std valid']
epochs = list(range(1, len(self.train_loss) + 1))
avg_tl = [np.mean(losses) for losses in self.train_loss]
std_tl = [np.std(losses) for losses in self.train_loss]
avg_vl = [np.mean(losses) for losses in self.valid_loss]
std_vl = [np.std(losses) for losses in self.valid_loss]
out = np.vstack([epochs, avg_tl, std_tl, avg_vl, std_vl]).T
with open(fn, "w") as f:
wr = csv.writer(f)
wr.writerow(head)
wr.writerows(out)
```
#### File: synthtorch/learn/loss.py
```python
__all__ = ['CosineProximityLoss',
'VAELoss']
import torch
from torch import nn
class CosineProximityLoss(nn.Module):
""" minimize the cosine proximity between an input and a target """
def forward(self, y_hat: torch.Tensor, y: torch.Tensor):
cp = torch.dot(y_hat.flatten(), y.flatten()) / (torch.norm(y_hat) * torch.norm(y))
return 1 - cp
class VAELoss(nn.Module):
def __init__(self):
super(VAELoss, self).__init__()
self.mse_loss = nn.MSELoss(reduction="sum")
def forward(self, xhat, x):
recon_x, mu, logvar = xhat
MSE = self.mse_loss(recon_x, x)
KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
return MSE + KLD
```
#### File: synthtorch/tests/test_exec.py
```python
import os
import unittest
import torch
torch.autograd.set_detect_anomaly(True)
from synthtorch.exec.nn_train import main as nn_train
from synthtorch.exec.nn_predict import main as nn_predict
from ._test_funcs import TestCLI
class TestNConv(TestCLI):
def test_nconv_nopatch_cli(self):
args = self.train_args + (f'-o {self.out_dir}/nconv_nopatch.mdl -na nconv -ne 1 -nl 2 -bs 2 -dm 3 '
f'--plot-loss {self.out_dir}/loss.png -ocf {self.jsonfn} '
f'-vsd {self.train_dir} -vtd {self.train_dir} -v').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_nconv_patch_cli(self):
args = self.train_args + (f'-o {self.out_dir}/nconv_patch.mdl -na nconv -ne 1 -nl 1 -ps 16 16 '
f'-ocf {self.jsonfn} -bs 2').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_nconv_preload_cli(self):
args = self.train_args + (f'-o {self.out_dir}/nconv_patch.mdl -na nconv -ne 1 -nl 1 -ps 16 16 '
f'-ocf {self.jsonfn} -bs 2 -pr').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_nconv_swish_cli(self):
args = self.train_args + (f'-o {self.out_dir}/nconv_patch.mdl -na nconv -ne 1 -nl 1 -ps 16 16 '
f'-ocf {self.jsonfn} -bs 2 -ac swish').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_nconv_checkpoint_and_load_cli(self):
args = self.train_args + (f'-o {self.out_dir}/nconv.mdl -na nconv -ne 2 -nl 1 -ps 16 16 '
f'-ocf {self.jsonfn} -bs 2 -chk 1').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
args = self.train_args + (f'-o {self.out_dir}/nconv.mdl -na nconv -ne 2 -nl 1 -ps 16 16 '
f'-ocf {self.jsonfn} -bs 2').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_nconv_cyclic_cli(self):
args = self.train_args + (f'-o {self.out_dir}/nconv_patch.mdl -na nconv -ne 3 -nl 1 -ps 16 16 '
f'-ocf {self.jsonfn} -bs 2 -lrs cyclic -v -opt sgdw').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_nconv_restarts_cli(self):
args = self.train_args + (f'-o {self.out_dir}/nconv_patch.mdl -na nconv -ne 3 -nl 1 -ps 16 16 '
f'-ocf {self.jsonfn} -bs 2 -lrs cosinerestarts -tm 2 -rp 2 -v').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_nconv_amsgrad_cli(self):
args = self.train_args + (f'-o {self.out_dir}/nconv_patch.mdl -na nconv -ne 3 -nl 1 -ps 16 16 '
f'-ocf {self.jsonfn} -bs 2 -v -opt amsgrad').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_nconv_nesterov_cli(self):
args = self.train_args + (f'-o {self.out_dir}/nconv_patch.mdl -na nconv -ne 3 -nl 1 -ps 16 16 '
f'-ocf {self.jsonfn} -bs 2 -v -opt nsgd').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_nconv_nesterovw_cli(self):
args = self.train_args + (f'-o {self.out_dir}/nconv_patch.mdl -na nconv -ne 3 -nl 1 -ps 16 16 '
f'-ocf {self.jsonfn} -bs 2 -v -opt nsgdw').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_nconv_sgdw_cli(self):
args = self.train_args + (f'-o {self.out_dir}/nconv_patch.mdl -na nconv -ne 3 -nl 1 -ps 16 16 '
f'-ocf {self.jsonfn} -bs 2 -v -opt sgdw').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_nconv_weightdecay_cli(self):
args = self.train_args + (f'-o {self.out_dir}/nconv_patch.mdl -na nconv -ne 3 -nl 1 -ps 16 16 '
f'-ocf {self.jsonfn} -bs 2 -v -wd 0.1').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_nconv_writecsv_cli(self):
args = self.train_args + (f'-o {self.out_dir}/nconv_patch.mdl -na nconv -ne 3 -nl 1 -ps 16 16 '
f'-ocf {self.jsonfn} -bs 2 -v -csv {self.out_dir}/test.csv').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
def test_nconv_data_aug_2d_cli(self):
train_args = f'-s {self.train_dir}/tif/ -t {self.train_dir}/tif/'.split()
args = train_args + (f'-o {self.out_dir}/nconv_nopatch.mdl -na nconv -ne 1 -nl 2 -bs 2 '
f'--plot-loss {self.out_dir}/loss.png -ocf {self.jsonfn} -e tif '
f'-p 1 1 1 1 1 -r 10 -ts 0.5 -sc 0.1 -mean 1 -std 1 '
f'-hf -vf -g 0.1 -gn 0.2 -pwr 1 -tx -ty -blk 5 6 -th 0').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_nconv_data_aug_3d_cli(self):
args = self.train_args + (f'-o {self.out_dir}/nconv_nopatch.mdl -na nconv -ne 1 -nl 2 -bs 2 '
f'--plot-loss {self.out_dir}/loss.png -ocf {self.jsonfn} -dm 3 '
f'-vsd {self.train_dir} -vtd {self.train_dir} -p 0 0 1 1 1 '
f'-g 0.01 -gn 0 -pwr 1 -tx -ty -blk 5 10 -mean 1 -std 1').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_nconv_clip_cli(self):
args = self.train_args + (f'-o {self.out_dir}/nconv_patch.mdl -na nconv -ne 1 -nl 1 -ps 16 16 '
f'-ocf {self.jsonfn} -bs 2 -c 0.25').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_nconv_whole_img_cli(self):
args = self.train_args + (f'-o {self.out_dir}/nconv_patch.mdl -na nconv -ne 1 -nl 1 -dm 3 '
f'-ocf {self.jsonfn} -bs 1').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_nconv_2d_crop_cli(self):
train_args = f'-s {self.train_dir}/tif/ -t {self.train_dir}/tif/'.split()
args = train_args + (f'-o {self.out_dir}/nconv.mdl -na nconv -ne 1 -nl 1 -cbp 1 -bs 2 -e tif -ps 8 8 '
f'-ocf {self.jsonfn}').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_nconv_2d_var_cli(self):
train_args = f'-s {self.train_dir}/tif/ -t {self.train_dir}/tif/'.split()
args = train_args + (f'-o {self.out_dir}/nconv.mdl -na nconv -ne 1 -nl 1 -cbp 1 -bs 2 -e tif '
f'-ocf {self.jsonfn}').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn, calc_var=True)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_nconv_png_cli(self):
train_args = f'-s {self.train_dir}/png/ -t {self.train_dir}/png/'.split()
args = train_args + (f'-o {self.out_dir}/nconv.mdl -na nconv -ne 1 -nl 1 -cbp 1 -bs 2 -e png '
f'-ocf {self.jsonfn} -p 1 1 0 0 0 ').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn, calc_var=True, png_out=True)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_nconv_color_cli(self):
train_args = f'-s {self.train_dir}/color/ -t {self.train_dir}/color/'.split()
args = train_args + (f'-o {self.out_dir}/nconv.mdl -na nconv -ne 1 -nl 1 -cbp 1 -bs 2 -e png -co -dm 2 '
f'-ocf {self.jsonfn}').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn, color_out=True, bs=1)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
@unittest.skipIf("TRAVIS" in os.environ and os.environ["TRAVIS"] == "true", "Skipping this test on Travis CI.")
def test_nconv_color_tb_cli(self):
train_args = f'-s {self.train_dir}/color/ -t {self.train_dir}/color/'.split()
args = train_args + (f'-o {self.out_dir}/nconv.mdl -na nconv -ne 1 -nl 1 -cbp 1 -bs 2 -e png -co -dm 2 '
f'-ocf {self.jsonfn} -tb').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn, color_out=True, bs=1)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_nconv_tif_predict_cli(self):
train_args = f'-s {self.train_dir}/tif/ -t {self.train_dir}/tif/'.split()
args = train_args + (f'-o {self.out_dir}/nconv.mdl -na nconv -ne 1 -nl 1 -cbp 1 -bs 2 -e tif '
f'-ocf {self.jsonfn}').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn, calc_var=True, tif_out=True)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_nconv_3d_var_cli(self):
args = self.train_args + (f'-o {self.out_dir}/nconv.mdl -na nconv -ne 1 -nl 1 -cbp 1 -bs 1 -dm 3 '
f'-ocf {self.jsonfn}').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn, calc_var=True)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_nconv_multimodal_cli(self):
train_args = f'-s {self.train_dir} {self.train_dir} -t {self.train_dir} {self.train_dir}'.split()
args = train_args + (f'-o {self.out_dir}/nconv_patch.mdl -na nconv -ne 1 -nl 1 -ps 16 16 '
f'-ocf {self.jsonfn} -bs 2').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn, multi=2)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_nconv_multimodal_tiff_cli(self):
train_args = f'-s {self.train_dir}/tif/ {self.train_dir}/tif/ -t {self.train_dir}/tif/ {self.train_dir}/tif/'.split()
args = train_args + (f'-o {self.out_dir}/nconv_patch.mdl -na nconv -ne 1 -nl 1 -ps 16 16 '
f'-ocf {self.jsonfn} -bs 2 -e tif -th 0').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn, multi=2)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
class TestDenseNet(TestCLI):
def test_densenet_cli(self):
train_args = f'-s {self.train_dir}/tif/ -t {self.train_dir}/tif/'.split()
args = train_args + (f'-o {self.out_dir}/densenet.mdl -na densenet -ne 1 -bs 2 -e tif '
f'-ocf {self.jsonfn}').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
class TestUnet(TestCLI):
def test_unet_cli(self):
args = self.train_args + (f'-o {self.out_dir}/unet.mdl -na unet -ne 1 -nl 3 -cbp 1 -ps 16 16 16 -bs 2 -dm 3 '
f'-ocf {self.jsonfn}').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_unet_freeze_cli(self):
args = self.train_args + (f'-o {self.out_dir}/unet.mdl -na unet -ne 1 -nl 3 -cbp 1 -ps 16 16 16 -bs 2 -dm 3 '
f'-ocf {self.jsonfn} -fr').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_unet_ic_cli(self):
args = self.train_args + (f'-o {self.out_dir}/unet.mdl -na unet -ne 1 -nl 3 -cbp 1 -ps 16 16 16 -bs 2 -dm 3 '
f'-ocf {self.jsonfn} -ic').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_unet_sep3d_cli(self):
args = self.train_args + (f'-o {self.out_dir}/unet.mdl -na unet -ne 1 -nl 3 -cbp 1 -ps 16 16 16 -bs 2 -dm 3 '
f'-ocf {self.jsonfn} -sp').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_unet_sep2d_cli(self):
train_args = f'-s {self.train_dir}/tif/ -t {self.train_dir}/tif/'.split()
args = train_args + (f'-o {self.out_dir}/unet.mdl -na unet -ne 1 -nl 3 -cbp 1 -bs 2 -e tif '
f'-ocf {self.jsonfn} -sp').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_unet_cp_cli(self):
args = self.train_args + (f'-o {self.out_dir}/unet.mdl -na unet -ne 1 -nl 1 -cbp 1 -ps 16 16 16 -bs 2 -dm 3 '
f'-ocf {self.jsonfn} -l cp').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_unet_bce_cli(self):
train_args = f'-s {self.train_dir} -t {self.train_dir}/mask/'.split()
args = train_args + (f'-o {self.out_dir}/unet.mdl -na unet -ne 1 -nl 1 -cbp 1 -ps 16 16 16 -bs 2 -dm 3 '
f'-ocf {self.jsonfn} -l bce').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_unet_mae_cli(self):
args = self.train_args + (f'-o {self.out_dir}/unet.mdl -na unet -ne 1 -nl 1 -cbp 1 -ps 16 16 16 -bs 2 -dm 3 '
f'-ocf {self.jsonfn} -l mae').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_unet_layernorm_cli(self):
args = self.train_args + (f'-o {self.out_dir}/unet.mdl -na unet -ne 1 -nl 2 -cbp 1 -ps 16 16 16 -bs 2 -dm 3 '
f'-ocf {self.jsonfn} -nm layer').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_unet_spectral_cli(self):
args = self.train_args + (f'-o {self.out_dir}/unet.mdl -na unet -ne 1 -nl 2 -cbp 1 -ps 16 16 16 -bs 2 -dm 3 '
f'-ocf {self.jsonfn} -nm spectral').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_unet_spectral_ks1_cli(self):
args = self.train_args + (f'-o {self.out_dir}/unet.mdl -na unet -ne 1 -nl 2 -cbp 1 -ps 16 16 16 -bs 2 -dm 3 '
f'-ocf {self.jsonfn} -nm spectral -ks 1 1 1').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_unet_ks331_cli(self):
args = self.train_args + (f'-o {self.out_dir}/unet.mdl -na unet -ne 1 -nl 2 -cbp 1 -ps 16 16 16 -bs 2 -dm 3 '
f'-ocf {self.jsonfn} -ks 3 3 1 -ic').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_unet_ks331_semi3d1_cli(self):
args = self.train_args + (f'-o {self.out_dir}/unet.mdl -na unet -ne 1 -nl 2 -cbp 1 -ps 16 16 16 -bs 2 -dm 3 '
f'-ocf {self.jsonfn} -ks 3 3 1 -ic -s3 1').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_unet_ks331_semi3d2_cli(self):
args = self.train_args + (f'-o {self.out_dir}/unet.mdl -na unet -ne 1 -nl 2 -cbp 1 -ps 16 16 16 -bs 2 -dm 3 '
f'-ocf {self.jsonfn} -ks 3 3 1 -ic -s3 2').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_unet_ks331_acv_cli(self):
args = self.train_args + (f'-o {self.out_dir}/unet.mdl -na unet -ne 1 -nl 2 -cbp 1 -ps 16 16 16 -bs 2 -dm 3 '
f'-ocf {self.jsonfn} -ks 3 3 1 -ic -acv').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_unet_ks331_ns_cli(self):
args = self.train_args + (f'-o {self.out_dir}/unet.mdl -na unet -ne 1 -nl 2 -cbp 1 -ps 16 16 16 -bs 2 -dm 3 '
f'-ocf {self.jsonfn} -ks 3 3 1 -ns -ic').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_unet_weight_cli(self):
args = self.train_args + (f'-o {self.out_dir}/unet.mdl -na unet -ne 1 -nl 2 -cbp 1 -ps 16 16 16 -bs 2 -dm 3 '
f'-ocf {self.jsonfn} -nm weight').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_unet_selfattention_cli(self):
train_args = f'-s {self.train_dir}/tif/ -t {self.train_dir}/tif/'.split()
args = train_args + (f'-o {self.out_dir}/unet.mdl -na unet -ne 1 -nl 2 -cbp 3 -bs 2 -e tif -ps 8 8 '
f'-ocf {self.jsonfn} -at self').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_unet_cwattention_2d_cli(self):
train_args = f'-s {self.train_dir}/tif/ -t {self.train_dir}/tif/'.split()
args = train_args + (f'-o {self.out_dir}/unet.mdl -na unet -ne 1 -nl 2 -cbp 4 -bs 4 -e tif -ps 8 8 '
f'-ocf {self.jsonfn} -at channel').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_unet_cwattention_3d_cli(self):
args = self.train_args + (f'-o {self.out_dir}/unet.mdl -na unet -ne 1 -nl 2 -cbp 4 -bs 4 -ps 8 8 8 -dm 3 '
f'-ocf {self.jsonfn} -at channel').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_unet_cwattention_semi3d_cli(self):
args = self.train_args + (f'-o {self.out_dir}/unet.mdl -na unet -ne 1 -nl 2 -cbp 4 -bs 4 -ps 8 8 8 -dm 3 '
f'-ocf {self.jsonfn} -at channel -ks 3 3 1 -ic -s3 2').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_unet_softmax_cli(self):
train_args = f'-s {self.train_dir}/tif/ -t {self.train_dir}/tif/'.split()
args = train_args + (f'-o {self.out_dir}/unet.mdl -na unet -ne 1 -nl 2 -cbp 3 -bs 2 -e tif -ps 8 8 '
f'-ocf {self.jsonfn} -sx').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_unet_noise_cli(self):
args = self.train_args + (f'-o {self.out_dir}/unet.mdl -na unet -ne 2 -nl 3 -cbp 1 -ps 16 16 16 -bs 2 -dm 3 '
f'-ocf {self.jsonfn} -nz 1').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_unet_no_skip_cli(self):
args = self.train_args + (
f'-o {self.out_dir}/unet.mdl -na unet -ne 1 -nl 3 -cbp 1 -ps 16 16 16 -bs 2 -dm 3 --no-skip '
f'-ocf {self.jsonfn}').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_unet_multimodal_cli(self):
train_args = f'-s {self.train_dir}/tif/ {self.train_dir}/tif/ -t {self.train_dir}/tif/ {self.train_dir}/tif/'.split()
args = train_args + (f'-o {self.out_dir}/unet.mdl -na unet -ne 1 -nl 3 -cbp 1 -ps 16 16 -bs 2 -e tif '
f'-ocf {self.jsonfn}').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn, multi=2)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_unet_allconv_3d_cli(self):
args = self.train_args + (f'-o {self.out_dir}/unet.mdl -na unet -ne 1 -nl 3 -cbp 1 -ps 16 16 16 -bs 2 -dm 3 '
f'-ocf {self.jsonfn} -acv').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_unet_allconv_2d_cli(self):
train_args = f'-s {self.train_dir}/tif/ -t {self.train_dir}/tif/'.split()
args = train_args + (f'-o {self.out_dir}/unet.mdl -na unet -ne 1 -nl 3 -cbp 1 -bs 2 -e tif -ps 16 16 '
f'-ocf {self.jsonfn} -acv').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_unet_color_2d_cli(self):
train_args = f'-s {self.train_dir}/color/ -t {self.train_dir}/color/'.split()
args = train_args + (f'-o {self.out_dir}/unet.mdl -na unet -ne 1 -nl 3 -cbp 1 -bs 2 -e png -dm 2 '
f'-ocf {self.jsonfn} -co').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn, color_out=True, bs=1)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_unet_resblock_2d_cli(self):
train_args = f'-s {self.train_dir}/tif/ -t {self.train_dir}/tif/'.split()
args = train_args + (f'-o {self.out_dir}/unet.mdl -na unet -ne 1 -nl 3 -cbp 1 -bs 2 -e tif -ps 16 16 '
f'-ocf {self.jsonfn} -acv -rb').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_unet_resblock_2d_no_skip_cli(self):
train_args = f'-s {self.train_dir}/tif/ -t {self.train_dir}/tif/'.split()
args = train_args + (f'-o {self.out_dir}/unet.mdl -na unet -ne 1 -nl 3 -cbp 1 -bs 2 -e tif -ps 16 16 '
f'-ocf {self.jsonfn} -acv -rb -ns').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_unet_resblock_3d_cli(self):
args = self.train_args + (f'-o {self.out_dir}/unet.mdl -na unet -ne 1 -nl 3 -cbp 1 -ps 16 16 16 -bs 2 -dm 3 '
f'-ocf {self.jsonfn} -acv -rb').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
class TestVAE(TestCLI):
def test_vae_2d_3l_cli(self):
train_args = f'-s {self.train_dir}/tif/ -t {self.train_dir}/tif/'.split()
args = train_args + (f'-o {self.out_dir}/vae.mdl -na vae -ne 1 -nl 3 -cbp 2 -bs 4 -e tif -ps 32 32 '
f'--img-dim 32 32 --latent-size 10 -ocf {self.jsonfn} -sa 0').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
# TODO: cannot test 2d prediction here because nii needs to be same size as tiff, fix
def test_vae_2d_5l_cli(self):
train_args = f'-s {self.train_dir}/tif/ -t {self.train_dir}/tif/'.split()
args = train_args + (f'-o {self.out_dir}/vae.mdl -na vae -ne 1 -nl 5 -cbp 1 -bs 4 -e tif -ps 32 32 '
f'--img-dim 32 32 --latent-size 10 -ocf {self.jsonfn} -sa 0').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
# TODO: cannot test 2d prediction here because nii needs to be same size as tiff, fix
def test_vae_3d_cli(self):
args = self.train_args + (f'-o {self.out_dir}/vae.mdl -na vae -ne 1 -nl 3 -cbp 1 -ps 16 16 16 -bs 4 -dm 3 '
f'--img-dim 16 16 16 --latent-size 10 -ocf {self.jsonfn}').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
if __name__ == '__main__':
unittest.main()
```
#### File: synthtorch/tests/_test_funcs.py
```python
import json
import os
import shutil
import tempfile
import unittest
from niftidataset import glob_imgs, split_filename
class TestCLI(unittest.TestCase):
def setUp(self):
wd = os.path.dirname(os.path.abspath(__file__))
self.nii_dir = os.path.join(wd, 'test_data', 'nii')
self.mask_dir = os.path.join(wd, 'test_data', 'masks')
self.tif_dir = os.path.join(wd, 'test_data', 'tif')
self.png_dir = os.path.join(wd, 'test_data', 'png')
self.color_dir = os.path.join(wd, 'test_data', 'color')
self.out_dir = tempfile.mkdtemp()
os.mkdir(os.path.join(self.out_dir, 'models'))
self.train_dir = os.path.join(self.out_dir, 'imgs')
os.mkdir(self.train_dir)
os.mkdir(os.path.join(self.train_dir, 'mask'))
os.mkdir(os.path.join(self.train_dir, 'tif'))
os.mkdir(os.path.join(self.train_dir, 'png'))
os.mkdir(os.path.join(self.train_dir, 'color'))
nii = glob_imgs(self.nii_dir)[0]
msk = glob_imgs(self.mask_dir)[0]
tif = os.path.join(self.tif_dir, 'test.tif')
png = os.path.join(self.png_dir, 'test.png')
color = os.path.join(self.color_dir, 'test.png')
path, base, ext = split_filename(nii)
for i in range(8):
shutil.copy(nii, os.path.join(self.train_dir, base + str(i) + ext))
shutil.copy(msk, os.path.join(self.train_dir, 'mask', base + str(i) + ext))
shutil.copy(tif, os.path.join(self.train_dir, 'tif', base + str(i) + '.tif'))
shutil.copy(png, os.path.join(self.train_dir, 'png', base + str(i) + '.png'))
shutil.copy(color, os.path.join(self.train_dir, 'color', base + str(i) + '.png'))
self.train_args = f'-s {self.train_dir} -t {self.train_dir}'.split()
self.predict_args = f'-s {self.train_dir} -o {self.out_dir}/test'.split()
self.jsonfn = f'{self.out_dir}/test.json'
def _modify_ocf(self, jsonfn, multi=1, calc_var=False, mc=None,
png_out=False, tif_out=False, color_out=False, model=None, bs=None):
with open(jsonfn, 'r') as f:
arg_dict = json.load(f)
with open(jsonfn, 'w') as f:
use_nii = not png_out and not tif_out and not color_out
arg_dict['Required']['predict_dir'] = ([f'{self.nii_dir}'] * multi) if use_nii else \
[f'{self.train_dir}/png'] if png_out else \
[f'{self.train_dir}/color'] if color_out else \
[f'{self.train_dir}/tif']
arg_dict['Required']['predict_out'] = f'{self.out_dir}/test'
arg_dict['Prediction Options']['calc_var'] = calc_var
arg_dict['Prediction Options']['monte_carlo'] = mc
if bs is not None: arg_dict['Options']['batch_size'] = bs
if model is not None: arg_dict['Neural Network Options']['nn_arch'] = model
json.dump(arg_dict, f, sort_keys=True, indent=2)
def tearDown(self):
shutil.rmtree(self.out_dir)
```
|
{
"source": "jcreinhold/tiramisu-brulee",
"score": 2
}
|
#### File: tiramisu-brulee/tests/test_lesion_seg.py
```python
import builtins
import pathlib
import sys
import typing
import pytest
from tiramisu_brulee.experiment.cli.predict import predict, predict_image
from tiramisu_brulee.experiment.cli.train import train
@pytest.fixture
def file() -> pathlib.Path:
return pathlib.Path(__file__).resolve()
@pytest.fixture
def cwd(file: pathlib.Path) -> pathlib.Path:
return file.parent
@pytest.fixture
def data_dir(cwd: pathlib.Path) -> pathlib.Path:
return cwd / "test_data"
@pytest.fixture(scope="session")
def temp_dir(tmpdir_factory) -> pathlib.Path: # type: ignore[no-untyped-def]
return pathlib.Path(tmpdir_factory.mktemp("out"))
def _create_csv(
temp_dir: pathlib.Path, data_dir: pathlib.Path, stage: builtins.str
) -> pathlib.Path:
csv_path = temp_dir / f"{stage}.csv"
image_path = data_dir / "img.nii.gz"
label_path = data_dir / "mask.nii.gz"
out_path = temp_dir / "out.nii.gz"
headers = "subject,label,t1,t2"
filenames = [f"subj{i},{label_path},{image_path},{image_path}" for i in range(2)]
if stage == "predict":
headers += ",out\n"
filenames = [fns + f",{out_path}\n" for fns in filenames]
else:
headers += "\n"
filenames = [fns + "\n" for fns in filenames]
with open(csv_path, "w") as f:
f.write(headers)
for fns in filenames:
f.write(fns)
return csv_path
@pytest.fixture
def train_csv(temp_dir: pathlib.Path, data_dir: pathlib.Path) -> pathlib.Path:
return _create_csv(temp_dir, data_dir, "train")
@pytest.fixture
def predict_csv(temp_dir: pathlib.Path, data_dir: pathlib.Path) -> pathlib.Path:
return _create_csv(temp_dir, data_dir, "predict")
@pytest.fixture
def cli_train_args(temp_dir: pathlib.Path) -> typing.List[builtins.str]:
args = []
args += f"--default_root_dir {temp_dir}".split()
args += "--enable_progress_bar false".split()
args += "--num-input 2".split()
args += "--batch-size 2".split()
args += "--queue-length 1".split()
args += "--samples-per-volume 1".split()
args += "--n-epochs 2".split()
args += "--down-blocks 2 2".split()
args += "--up-blocks 2 2".split()
args += "--bottleneck-layers 2".split()
args += "--first-conv-out-channels 2".split()
args += "--num-workers 0".split()
args += "--pos-weight 2.0".split()
return args
@pytest.fixture
def cli_predict_args(
temp_dir: pathlib.Path, predict_csv: pathlib.Path
) -> typing.List[builtins.str]:
args = []
args += f"--default_root_dir {temp_dir}".split()
args += f"--predict-csv {predict_csv}".split()
args += "--enable_progress_bar false".split()
args += "--num-workers 0".split()
return args
def _handle_fast_dev_run(
predict_args: typing.List[builtins.str],
) -> typing.List[builtins.str]:
"""py36-compatible pytorch-lightning has problem parsing fast_dev_run"""
py_version = sys.version_info
assert py_version.major == 3
if py_version.minor > 6:
predict_args += ["--fast_dev_run"]
return predict_args
def _get_and_format_best_model_paths(args: typing.List[builtins.str]) -> builtins.str:
best_model_paths = train(args, return_best_model_paths=True)
assert isinstance(best_model_paths, list)
best_model_paths_strlist = [str(bmp) for bmp in best_model_paths]
best_model_paths_str = " ".join(best_model_paths_strlist)
return best_model_paths_str
def test_cli(
cli_train_args: typing.List[builtins.str],
cli_predict_args: typing.List[builtins.str],
train_csv: pathlib.Path,
) -> None:
csv_ = " ".join([str(csv) for csv in [train_csv] * 2])
cli_train_args += f"--train-csv {csv_}".split()
cli_train_args += f"--valid-csv {csv_}".split()
cli_train_args += "--patch-size 8 8 8".split()
cli_train_args += "--track-metric dice".split()
best_model_paths = _get_and_format_best_model_paths(cli_train_args)
cli_predict_args += f"--model-path {best_model_paths}".split()
cli_predict_args = _handle_fast_dev_run(cli_predict_args)
retcode = predict(cli_predict_args)
assert retcode == 0
def test_reorient_cli(
cli_train_args: typing.List[builtins.str],
cli_predict_args: typing.List[builtins.str],
train_csv: pathlib.Path,
) -> None:
csv_ = " ".join([str(csv) for csv in [train_csv] * 2])
cli_train_args += f"--train-csv {csv_}".split()
cli_train_args += f"--valid-csv {csv_}".split()
cli_train_args += "--patch-size 8 8 8".split()
cli_train_args += ["--reorient-to-canonical"]
best_model_paths = _get_and_format_best_model_paths(cli_train_args)
cli_predict_args += f"--model-path {best_model_paths}".split()
cli_predict_args += ["--reorient-to-canonical"]
cli_predict_args = _handle_fast_dev_run(cli_predict_args)
retcode = predict(cli_predict_args)
assert retcode == 0
def test_mixup_train_cli(
cli_train_args: typing.List[builtins.str], train_csv: pathlib.Path
) -> None:
csv_ = " ".join([str(csv) for csv in [train_csv] * 2])
cli_train_args += f"--train-csv {csv_}".split()
cli_train_args += f"--valid-csv {csv_}".split()
cli_train_args += "--patch-size 8 8 8".split()
cli_train_args += "--mixup".split()
retcode = train(cli_train_args, return_best_model_paths=False)
assert retcode == 0
def test_mlflow_train_cli(
cli_train_args: typing.List[builtins.str],
train_csv: pathlib.Path,
temp_dir: pathlib.Path,
) -> None:
csv_ = " ".join([str(csv) for csv in [train_csv] * 2])
cli_train_args += f"--train-csv {csv_}".split()
cli_train_args += f"--valid-csv {csv_}".split()
cli_train_args += "--patch-size 8 8 8".split()
for i in range(len(cli_train_args)):
if cli_train_args[i] == "--n-epochs":
cli_train_args[i + 1] = "4"
break
cli_train_args += f"--tracking-uri file:./{temp_dir}/ml-runs".split()
retcode = train(cli_train_args, return_best_model_paths=False)
assert retcode == 0
def test_multiclass_train_cli(
cli_train_args: typing.List[builtins.str], train_csv: pathlib.Path
) -> None:
csv_ = " ".join([str(csv) for csv in [train_csv] * 2])
cli_train_args += f"--train-csv {csv_}".split()
cli_train_args += f"--valid-csv {csv_}".split()
cli_train_args += "--patch-size 8 8 8".split()
cli_train_args += "--num-classes 2".split()
retcode = train(cli_train_args, return_best_model_paths=False)
assert retcode == 0
def test_patch_prediction_cli(
cli_train_args: typing.List[builtins.str],
cli_predict_args: typing.List[builtins.str],
train_csv: pathlib.Path,
) -> None:
csv_ = " ".join([str(csv) for csv in [train_csv] * 2])
cli_train_args += f"--train-csv {csv_}".split()
cli_train_args += f"--valid-csv {csv_}".split()
cli_train_args += "--patch-size 8 8 8".split()
cli_train_args += "--pos-sampling-weight 0.8".split()
cli_train_args += ["--label-sampler"]
best_model_paths = _get_and_format_best_model_paths(cli_train_args)
cli_predict_args += f"--model-path {best_model_paths}".split()
cli_predict_args += "--patch-size 32 32 32".split()
cli_predict_args += "--patch-overlap 0 0 0".split()
retcode = predict(cli_predict_args)
assert retcode == 0
@pytest.fixture
def cli_predict_image_args(
temp_dir: pathlib.Path, data_dir: pathlib.Path
) -> typing.List[builtins.str]:
image_path = data_dir / "img.nii.gz"
out_path = temp_dir / "out.nii.gz"
args = []
args += f"--default_root_dir {temp_dir}".split()
args += f"--t1 {image_path}".split()
args += f"--t2 {image_path}".split()
args += f"--out {out_path}".split()
args += "--enable_progress_bar false".split()
args += "--num-workers 0".split()
return args
def test_predict_image_cli(
cli_train_args: typing.List[builtins.str],
cli_predict_image_args: typing.List[builtins.str],
train_csv: pathlib.Path,
) -> None:
csv_ = " ".join([str(csv) for csv in [train_csv] * 2])
cli_train_args += f"--train-csv {csv_}".split()
cli_train_args += f"--valid-csv {csv_}".split()
cli_train_args += "--patch-size 8 8 8".split()
best_model_paths = _get_and_format_best_model_paths(cli_train_args)
cli_predict_image_args += f"--model-path {best_model_paths}".split()
cli_predict_image_args = _handle_fast_dev_run(cli_predict_image_args)
retcode = predict_image(cli_predict_image_args)
assert retcode == 0
def test_pseudo3d_cli(
cli_train_args: typing.List[builtins.str],
cli_predict_args: typing.List[builtins.str],
train_csv: pathlib.Path,
) -> None:
csv_ = " ".join([str(csv) for csv in [train_csv] * 3])
cli_train_args += f"--train-csv {csv_}".split()
cli_train_args += f"--valid-csv {csv_}".split()
cli_train_args += "--patch-size 8 8".split()
cli_train_args += "--pseudo3d-dim 0 1 2".split()
cli_train_args += "--pseudo3d-size 31".split()
best_model_paths = _get_and_format_best_model_paths(cli_train_args)
cli_predict_args += f"--model-path {best_model_paths}".split()
cli_predict_args += "--patch-size None None".split()
cli_predict_args += "--pseudo3d-dim 0 1 2".split()
cli_predict_args += "--pseudo3d-size 31".split()
cli_predict_args += "--patch-overlap 0 0 0".split()
retcode = predict(cli_predict_args)
assert retcode == 0
def test_union_aggregate_cli(
cli_train_args: typing.List[builtins.str],
cli_predict_args: typing.List[builtins.str],
train_csv: pathlib.Path,
) -> None:
csv_ = " ".join([str(csv) for csv in [train_csv] * 2])
cli_train_args += f"--train-csv {csv_}".split()
cli_train_args += f"--valid-csv {csv_}".split()
cli_train_args += "--patch-size 8 8".split()
cli_train_args += "--pseudo3d-dim 0 1".split()
cli_train_args += "--pseudo3d-size 31".split()
best_model_paths = _get_and_format_best_model_paths(cli_train_args)
cli_predict_args += f"--model-path {best_model_paths}".split()
cli_predict_args += "--patch-size None None".split()
cli_predict_args += "--pseudo3d-dim 0 1".split()
cli_predict_args += "--pseudo3d-size 31".split()
cli_predict_args += "--patch-overlap 0 0 0".split()
cli_predict_args += "--aggregation-type union".split()
retcode = predict(cli_predict_args)
assert retcode == 0
def test_vote_aggregate_cli(
cli_train_args: typing.List[builtins.str],
cli_predict_args: typing.List[builtins.str],
train_csv: pathlib.Path,
) -> None:
csv_ = " ".join([str(csv) for csv in [train_csv] * 2])
cli_train_args += f"--train-csv {csv_}".split()
cli_train_args += f"--valid-csv {csv_}".split()
cli_train_args += "--patch-size 8 8".split()
cli_train_args += "--pseudo3d-dim 0 1".split()
cli_train_args += "--pseudo3d-size 31".split()
best_model_paths = _get_and_format_best_model_paths(cli_train_args)
cli_predict_args += f"--model-path {best_model_paths}".split()
cli_predict_args += "--patch-size None None".split()
cli_predict_args += "--pseudo3d-dim 0 1".split()
cli_predict_args += "--pseudo3d-size 31".split()
cli_predict_args += "--patch-overlap 0 0 0".split()
cli_predict_args += "--aggregation-type vote".split()
retcode = predict(cli_predict_args)
assert retcode == 0
def test_pseudo3d_all_cli(
cli_train_args: typing.List[builtins.str],
cli_predict_args: typing.List[builtins.str],
train_csv: pathlib.Path,
) -> None:
cli_train_args += f"--train-csv {train_csv}".split()
cli_train_args += f"--valid-csv {train_csv}".split()
cli_train_args += "--patch-size 8 8".split()
cli_train_args += "--pseudo3d-dim all".split()
cli_train_args += "--pseudo3d-size 31".split()
cli_train_args += ["--random-validation-patches"]
best_model_paths = _get_and_format_best_model_paths(cli_train_args)
cli_predict_args += f"--model-path {best_model_paths}".split()
cli_predict_args += "--patch-size None None".split()
cli_predict_args += "--pseudo3d-dim 0".split()
cli_predict_args += "--pseudo3d-size 31".split()
cli_predict_args += "--patch-overlap 0 0 0".split()
retcode = predict(cli_predict_args)
assert retcode == 0
```
#### File: tiramisu_brulee/experiment/util.py
```python
__all__ = [
"append_num_to_filename",
"BoundingBox3D",
"image_one_hot",
"minmax_scale_batch",
"reshape_for_broadcasting",
"to_np",
"setup_log",
"split_filename",
]
import builtins
import logging
import pathlib
import typing
import numpy as np
import torch
import torch.nn.functional as F
from tiramisu_brulee.experiment.type import Indices
T = typing.TypeVar("T", bound="BoundingBox3D")
def minmax_scale_batch(x: torch.Tensor) -> torch.Tensor:
"""rescale a batch of image PyTorch tensors to be between 0 and 1"""
dims = list(range(1, x.dim()))
xmin = x.amin(dim=dims, keepdim=True)
xmax = x.amax(dim=dims, keepdim=True)
return (x - xmin) / (xmax - xmin)
def to_np(x: torch.Tensor) -> np.ndarray:
"""convert a PyTorch torch.Tensor (potentially on GPU) to a numpy array"""
data = x.detach().cpu().numpy()
assert isinstance(data, np.ndarray)
return data
def image_one_hot(image: torch.Tensor, *, num_classes: builtins.int) -> torch.Tensor:
num_channels = image.shape[1]
if num_channels > 1:
msg = f"Image must only have one channel. Got {num_channels} channels."
raise RuntimeError(msg)
encoded: torch.Tensor = F.one_hot(image.long(), num_classes)
encoded = encoded.transpose(1, -1)[..., 0].type(image.type())
return encoded
class BoundingBox3D:
def __init__(
self,
i_low: builtins.int,
i_high: builtins.int,
j_low: builtins.int,
j_high: builtins.int,
k_low: builtins.int,
k_high: builtins.int,
*,
original_shape: typing.Optional[
typing.Tuple[builtins.int, builtins.int, builtins.int]
] = None,
):
"""bounding box indices and crop/uncrop func for 3d vols"""
self.i = slice(i_low, i_high)
self.j = slice(j_low, j_high)
self.k = slice(k_low, k_high)
self.original_shape = original_shape
def crop_to_bbox(self, tensor: torch.Tensor) -> torch.Tensor:
"""returns the tensor cropped around the saved bbox"""
return tensor[..., self.i, self.j, self.k]
def __call__(self, tensor: torch.Tensor) -> torch.Tensor:
return self.crop_to_bbox(tensor)
def uncrop(self, tensor: torch.Tensor) -> torch.Tensor:
"""places a tensor back into the saved original shape"""
assert tensor.ndim == 3, "expects tensors with shape HxWxD"
assert self.original_shape is not None
out = torch.zeros(self.original_shape, dtype=tensor.dtype, device=tensor.device)
out[self.i, self.j, self.k] = tensor
return out
def uncrop_batch(self, batch: torch.Tensor) -> torch.Tensor:
"""places a batch back into the saved original shape"""
assert batch.ndim == 5, "expects tensors with shape NxCxHxWxD"
assert self.original_shape is not None
batch_size, channel_size = batch.shape[:2]
out_shape = (batch_size, channel_size) + tuple(self.original_shape)
out = torch.zeros(out_shape, dtype=batch.dtype, device=batch.device)
out[..., self.i, self.j, self.k] = batch
return out
@staticmethod
def find_bbox(mask: torch.Tensor, *, pad: builtins.int = 0) -> Indices:
h = torch.where(torch.any(torch.any(mask, dim=1), dim=1))[0]
w = torch.where(torch.any(torch.any(mask, dim=0), dim=1))[0]
d = torch.where(torch.any(torch.any(mask, dim=0), dim=0))[0]
h_low, h_high = h[0].item(), h[-1].item()
w_low, w_high = w[0].item(), w[-1].item()
d_low, d_high = d[0].item(), d[-1].item()
i, j, k = mask.shape
return (
builtins.int(max(h_low - pad, 0)),
builtins.int(min(h_high + pad, i)),
builtins.int(max(w_low - pad, 0)),
builtins.int(min(w_high + pad, j)),
builtins.int(max(d_low - pad, 0)),
builtins.int(min(d_high + pad, k)),
)
@classmethod
def from_image(
cls: typing.Type[T],
image: torch.Tensor,
*,
pad: builtins.int = 0,
foreground_min: builtins.float = 1e-4,
) -> T:
"""find a bounding box for a 3D tensor (with optional padding)"""
foreground_mask = image > foreground_min
assert isinstance(foreground_mask, torch.Tensor)
bbox_idxs = cls.find_bbox(foreground_mask, pad=pad)
original_shape = cls.get_shape(image)
return cls(*bbox_idxs, original_shape=original_shape)
@classmethod
def from_batch(
cls: typing.Type[T],
batch: torch.Tensor,
*,
pad: builtins.int = 0,
channel: builtins.int = 0,
foreground_min: builtins.float = 1e-4,
) -> T:
"""create bbox that works for a batch of 3d vols"""
assert batch.ndim == 5, "expects tensors with shape NxCxHxWxD"
batch_size = batch.shape[0]
assert batch_size > 0
image_shape = batch.shape[2:]
h_low, h_high = image_shape[0], -1
w_low, w_high = image_shape[1], -1
d_low, d_high = image_shape[2], -1
for i in range(batch_size):
image = batch[i, channel, ...]
hl, hh, wl, wh, dl, dh = cls.find_bbox(image > foreground_min, pad=pad)
h_low, h_high = min(hl, h_low), max(hh, h_high)
w_low, w_high = min(wl, w_low), max(wh, w_high)
d_low, d_high = min(dl, d_low), max(dh, d_high)
# noinspection PyUnboundLocalVariable
original_shape = cls.get_shape(image)
return cls(
h_low,
h_high,
w_low,
w_high,
d_low,
d_high,
original_shape=original_shape,
)
@staticmethod
def get_shape(
image: torch.Tensor,
) -> typing.Tuple[builtins.int, builtins.int, builtins.int]:
assert image.ndim == 3
orig_x, orig_y, orig_z = tuple(image.shape)
return (orig_x, orig_y, orig_z)
def reshape_for_broadcasting(
tensor: torch.Tensor, *, ndim: builtins.int
) -> torch.Tensor:
"""expand dimensions of a 0- or 1-dimensional tensor to ndim for broadcast ops"""
assert tensor.ndim <= 1
dims = [1 for _ in range(ndim - 1)]
return tensor.view(-1, *dims)
def split_filename(
filepath: typing.Union[builtins.str, pathlib.Path]
) -> typing.Tuple[pathlib.Path, builtins.str, builtins.str]:
"""split a filepath into the directory, base, and extension"""
filepath = pathlib.Path(filepath).resolve()
path = filepath.parent
_base = pathlib.Path(filepath.stem)
ext = filepath.suffix
if ext == ".gz":
ext2 = _base.suffix
base = str(_base.stem)
ext = ext2 + ext
else:
base = str(_base)
return pathlib.Path(path), base, ext
def append_num_to_filename(
filepath: typing.Union[builtins.str, pathlib.Path], *, num: builtins.int
) -> pathlib.Path:
"""append num to the filename of filepath and return the modified path"""
path, base, ext = split_filename(filepath)
base += f"_{num}"
return path / (base + ext)
def setup_log(verbosity: builtins.int) -> None:
"""set logger with verbosity logging level and message"""
if verbosity == 1:
level = logging.getLevelName("INFO")
elif verbosity >= 2:
level = logging.getLevelName("DEBUG")
else:
level = logging.getLevelName("WARNING")
fmt = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
logging.basicConfig(format=fmt, level=level)
logging.captureWarnings(True)
```
#### File: tiramisu-brulee/tiramisu_brulee/loss.py
```python
__all__ = [
"binary_combo_loss",
"binary_focal_loss",
"deeply_supervised_loss",
"dice_loss",
"l1_segmentation_loss",
"mse_segmentation_loss",
]
import builtins
import typing
import torch
import torch.nn.functional as F
def per_channel_dice(
tensor1: torch.Tensor,
tensor2: torch.Tensor,
*,
eps: builtins.float = 1e-3,
keepdim: builtins.bool = False,
) -> torch.Tensor:
"""compute dice score for each channel separately and reduce"""
spatial_dims = tuple(range(2 - len(tensor1.shape), 0))
intersection = torch.sum(tensor1 * tensor2, dim=spatial_dims, keepdim=keepdim)
x_sum = torch.sum(tensor1, dim=spatial_dims, keepdim=keepdim)
y_sum = torch.sum(tensor2, dim=spatial_dims, keepdim=keepdim)
pc_dice = (2 * intersection + eps) / (x_sum + y_sum + eps)
return pc_dice
def weighted_channel_avg(tensor: torch.Tensor, *, weight: torch.Tensor) -> torch.Tensor:
weight = weight[None, ...].repeat([tensor.shape[0], 1])
weighted = torch.mean(weight * tensor)
return weighted
def dice_loss(
pred: torch.Tensor,
target: torch.Tensor,
*,
weight: typing.Optional[torch.Tensor] = None,
reduction: builtins.str = "mean",
eps: builtins.float = 1e-3,
) -> torch.Tensor:
"""sorensen-dice coefficient loss function"""
keepdim = reduction != "mean"
pc_dice = per_channel_dice(pred, target, eps=eps, keepdim=keepdim)
dice: torch.Tensor
if reduction == "mean":
if weight is None:
dice = torch.mean(pc_dice)
else:
dice = weighted_channel_avg(pc_dice, weight=weight)
elif reduction == "none":
dice = pc_dice
else:
raise NotImplementedError(f"{reduction} not implemented.")
one_minus_dice: torch.Tensor = 1.0 - dice
return one_minus_dice
def binary_focal_loss(
pred: torch.Tensor,
target: torch.Tensor,
*,
pos_weight: typing.Optional[typing.Union[builtins.float, torch.Tensor]] = None,
reduction: builtins.str = "mean",
gamma: builtins.float = 2.0,
) -> torch.Tensor:
"""focal loss for binary classification or segmentation"""
use_focal = gamma > 0.0
bce_reduction = "none" if use_focal else reduction
if use_focal:
bce_pos_weight = None
else:
if pos_weight is not None and isinstance(pos_weight, builtins.float):
bce_pos_weight = torch.tensor(
[pos_weight], dtype=pred.dtype, device=pred.device
)
elif pos_weight is None or isinstance(pos_weight, torch.Tensor):
bce_pos_weight = pos_weight
else:
msg = (
f"pos_weight must be a none, float, or tensor. Got {type(pos_weight)}."
)
raise ValueError(msg)
bce_loss = F.binary_cross_entropy_with_logits(
pred,
target,
reduction=bce_reduction,
pos_weight=bce_pos_weight,
)
loss_val: torch.Tensor
if use_focal:
p = torch.sigmoid(pred)
p_t = p * target + (1 - p) * (1 - target)
loss_val = bce_loss * ((1 - p_t) ** gamma)
else:
loss_val = bce_loss
if pos_weight is not None and use_focal:
weight = pos_weight / (1.0 + pos_weight)
weight_t = weight * target + (1 - weight) * (1 - target)
loss_val = weight_t * loss_val
if use_focal:
if reduction == "mean":
loss_val = loss_val.mean()
elif reduction == "sum":
loss_val = loss_val.sum()
elif reduction == "batchwise_mean":
loss_val = loss_val.sum(0)
elif reduction == "none":
pass
else:
raise NotImplementedError(f"{reduction} not implemented.")
return loss_val
def binary_combo_loss(
pred: torch.Tensor,
target: torch.Tensor,
*,
reduction: builtins.str = "mean",
pos_weight: typing.Optional[builtins.float] = None,
focal_gamma: builtins.float = 0.0,
combo_weight: builtins.float = 0.5,
) -> torch.Tensor:
"""combo loss (dice + focal weighted by combo_weight) for binary labels"""
assert 0.0 <= combo_weight <= 1.0
assert 0.0 <= focal_gamma
f_loss = binary_focal_loss(
pred,
target,
pos_weight=pos_weight,
reduction=reduction,
gamma=focal_gamma,
)
p = torch.sigmoid(pred)
d_loss = dice_loss(p, target, reduction=reduction)
loss = combo_weight * f_loss + (1 - combo_weight) * d_loss
return loss
def combo_loss(
pred: torch.Tensor,
target: torch.Tensor,
*,
num_classes: builtins.int,
reduction: builtins.str = "mean",
combo_weight: builtins.float = 0.5,
) -> torch.Tensor:
"""combo loss (dice + focal weighted by combo_weight) for multi-class labels"""
assert 0.0 <= combo_weight <= 1.0
assert 2 <= num_classes
channel_not_removed = pred.ndim == target.ndim
if channel_not_removed and target.shape[1] > 1:
msg = f"Channel size must be 1 or 0. Got {target.shape[1]}"
raise ValueError(msg)
_target = target[:, 0, ...] if channel_not_removed else target
_target = _target.long()
f_loss = F.cross_entropy(pred, _target, reduction=reduction)
p = torch.softmax(pred, dim=1)
target_one_hot = F.one_hot(_target, num_classes)
target_one_hot = torch.movedim(target_one_hot, -1, 1)
target_one_hot = target_one_hot.float()
d_loss = dice_loss(p, target_one_hot, reduction=reduction)
loss = combo_weight * f_loss + (1 - combo_weight) * d_loss
return loss
def deeply_supervised_loss( # type: ignore[no-untyped-def]
preds: typing.List[torch.Tensor],
target: torch.Tensor,
*,
loss_func: typing.Callable,
level_weights: typing.Union[builtins.float, typing.List[builtins.float]] = 1.0,
**loss_func_kwargs,
) -> torch.Tensor:
"""compute loss_func by comparing multiple same-shape preds to target"""
if isinstance(level_weights, builtins.float):
level_weights = [level_weights] * len(preds)
loss_val = torch.tensor(0.0, dtype=target.dtype, device=target.device)
for lw, x in zip(level_weights, preds):
loss_val += lw * loss_func(x, target, **loss_func_kwargs)
return loss_val
def l1_segmentation_loss(
pred: torch.Tensor,
target: torch.Tensor,
*,
reduction: builtins.str = "mean",
) -> torch.Tensor:
"""l1 loss for segmentation by applying sigmoid to pred -> l1"""
return F.l1_loss(torch.sigmoid(pred), target, reduction=reduction)
def mse_segmentation_loss(
pred: torch.Tensor,
target: torch.Tensor,
*,
reduction: builtins.str = "mean",
) -> torch.Tensor:
"""mse loss for segmentation by applying sigmoid to pred -> mse"""
return F.mse_loss(torch.sigmoid(pred), target, reduction=reduction)
```
#### File: tiramisu_brulee/model/tiramisu.py
```python
__all__ = [
"Tiramisu2d",
"Tiramisu3d",
]
import builtins
import typing
import torch
import torch.nn as nn
from tiramisu_brulee.model.dense import (
Bottleneck2d,
Bottleneck3d,
DenseBlock2d,
DenseBlock3d,
ResizeMethod,
TransitionDown2d,
TransitionDown3d,
TransitionUp2d,
TransitionUp3d,
)
class Tiramisu(nn.Module):
_bottleneck: typing.ClassVar[
typing.Union[typing.Type[Bottleneck2d], typing.Type[Bottleneck3d]]
]
_conv: typing.ClassVar[typing.Union[typing.Type[nn.Conv2d], typing.Type[nn.Conv3d]]]
_denseblock: typing.ClassVar[
typing.Union[typing.Type[DenseBlock2d], typing.Type[DenseBlock3d]]
]
_trans_down: typing.ClassVar[
typing.Union[typing.Type[TransitionDown2d], typing.Type[TransitionDown3d]]
]
_trans_up: typing.ClassVar[
typing.Union[typing.Type[TransitionUp2d], typing.Type[TransitionUp3d]]
]
_first_kernel_size: typing.ClassVar[
typing.Union[
typing.Tuple[builtins.int, builtins.int],
typing.Tuple[builtins.int, builtins.int, builtins.int],
]
]
_final_kernel_size: typing.ClassVar[
typing.Union[
typing.Tuple[builtins.int, builtins.int],
typing.Tuple[builtins.int, builtins.int, builtins.int],
]
]
_padding_mode: typing.ClassVar[builtins.str] = "replicate"
# flake8: noqa: E501
def __init__(
self,
*,
in_channels: builtins.int = 3,
out_channels: builtins.int = 1,
down_blocks: typing.Collection[builtins.int] = (5, 5, 5, 5, 5),
up_blocks: typing.Collection[builtins.int] = (5, 5, 5, 5, 5),
bottleneck_layers: builtins.int = 5,
growth_rate: builtins.int = 16,
first_conv_out_channels: builtins.int = 48,
dropout_rate: builtins.float = 0.2,
resize_method: ResizeMethod = ResizeMethod.CROP,
input_shape: typing.Optional[typing.Tuple[builtins.int, ...]] = None,
static_upsample: builtins.bool = False,
):
"""
Base class for Tiramisu convolutional neural network
See Also:
Jégou, Simon, et al. "The one hundred layers tiramisu: Fully
convolutional densenets for semantic segmentation." CVPR. 2017.
Based on: https://github.com/bfortuner/pytorch_tiramisu
Args:
in_channels (builtins.int): number of input channels
out_channels (builtins.int): number of output channels
down_blocks (typing.Collection[builtins.int]): number of layers in each block in down path
up_blocks (typing.Collection[builtins.int]): number of layers in each block in up path
bottleneck_layers (builtins.int): number of layers in the bottleneck
growth_rate (builtins.int): number of channels to grow by in each layer
first_conv_out_channels (builtins.int): number of output channels in first conv
dropout_rate (builtins.float): dropout rate/probability
resize_method (ResizeMethod): method to resize the image in upsample branch
input_shape: optionally provide shape of the input image (for onnx)
static_upsample: use static upsampling when capable if input_shape provided
(doesn't check upsampled size matches)
"""
super().__init__()
assert len(down_blocks) == len(up_blocks)
self.down_blocks = down_blocks
self.up_blocks = up_blocks
skip_connection_channel_counts: typing.List[builtins.int] = []
if input_shape is not None:
tensor_shape = torch.as_tensor(input_shape)
shapes = [input_shape]
self.first_conv = nn.Sequential(
self._conv(
in_channels,
first_conv_out_channels,
self._first_kernel_size, # type: ignore[arg-type]
bias=False,
padding="same",
padding_mode=self._padding_mode,
),
)
cur_channels_count: builtins.int = first_conv_out_channels
# Downsampling path
self.dense_down = nn.ModuleList([])
self.trans_down = nn.ModuleList([])
for i, n_layers in enumerate(down_blocks, 1):
denseblock = self._denseblock(
in_channels=cur_channels_count,
growth_rate=growth_rate,
n_layers=n_layers,
upsample=False,
dropout_rate=dropout_rate,
)
self.dense_down.append(denseblock)
cur_channels_count += growth_rate * n_layers
skip_connection_channel_counts.insert(0, cur_channels_count)
trans_down_block = self._trans_down(
in_channels=cur_channels_count,
out_channels=cur_channels_count,
dropout_rate=dropout_rate,
)
self.trans_down.append(trans_down_block)
if i < len(down_blocks) and input_shape is not None:
tensor_shape = torch.div(tensor_shape, 2, rounding_mode="floor")
shapes.append(tuple(tensor_shape))
# Bottleneck
self.bottleneck = self._bottleneck(
in_channels=cur_channels_count,
growth_rate=growth_rate,
n_layers=bottleneck_layers,
dropout_rate=dropout_rate,
)
prev_block_channels = growth_rate * bottleneck_layers
cur_channels_count += prev_block_channels
# Upsampling path
self.dense_up = nn.ModuleList([])
self.trans_up = nn.ModuleList([])
up_info = zip(up_blocks, skip_connection_channel_counts)
for i, (n_layers, sccc) in enumerate(up_info, 1):
resize_shape = None if input_shape is None else shapes.pop()
if resize_shape is not None and static_upsample:
_static_upsample = all(x % 2 == 0 for x in resize_shape)
else:
_static_upsample = False
trans_up_block = self._trans_up(
in_channels=prev_block_channels,
out_channels=prev_block_channels,
resize_method=resize_method,
resize_shape=resize_shape,
static=_static_upsample,
)
self.trans_up.append(trans_up_block)
cur_channels_count = prev_block_channels + sccc
upsample = i < len(up_blocks) # do not upsample on last block
denseblock = self._denseblock(
in_channels=cur_channels_count,
growth_rate=growth_rate,
n_layers=n_layers,
upsample=upsample,
dropout_rate=dropout_rate,
)
self.dense_up.append(denseblock)
prev_block_channels = growth_rate * n_layers
cur_channels_count += prev_block_channels
self.final_conv = self._conv(
in_channels=cur_channels_count,
out_channels=out_channels,
kernel_size=self._final_kernel_size, # type: ignore[arg-type]
bias=True,
padding="same",
padding_mode=self._padding_mode,
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
out = self.first_conv(x)
skip_connections = []
for dbd, tdb in zip(self.dense_down, self.trans_down):
out = dbd(out)
skip_connections.append(out)
out = tdb(out)
out = self.bottleneck(out)
for ubd, tub in zip(self.dense_up, self.trans_up):
skip = skip_connections.pop()
out = tub(out, skip=skip)
out = ubd(out)
out = self.final_conv(out)
assert isinstance(out, torch.Tensor)
return out
class Tiramisu2d(Tiramisu):
_bottleneck = Bottleneck2d
_conv = nn.Conv2d
_denseblock = DenseBlock2d
_pad = nn.ReplicationPad2d
_trans_down = TransitionDown2d
_trans_up = TransitionUp2d
_first_kernel_size = (3, 3)
_final_kernel_size = (1, 1)
class Tiramisu3d(Tiramisu):
_bottleneck = Bottleneck3d
_conv = nn.Conv3d
_denseblock = DenseBlock3d
_pad = nn.ReplicationPad3d
_trans_down = TransitionDown3d
_trans_up = TransitionUp3d
_first_kernel_size = (3, 3, 3)
_final_kernel_size = (1, 1, 1)
```
|
{
"source": "jcreinhold/uncertaintorch",
"score": 2
}
|
#### File: uncertaintorch/learn/loss.py
```python
__all__ = ['GaussianDiagLoss',
'L1OnlyLoss',
'LaplacianDiagLoss',
'MSEOnlyLoss',
'BinaryFocalLoss',
'FocalLoss',
'ExtendedCrossEntropy',
'DiceLoss',
'SquaredDiceLoss',
'MonsterLoss',
'MonsterLossAux',
'ExtendedMonsterLoss']
import numpy as np
import torch
from torch import sigmoid
from torch import nn
from torch.nn.modules.loss import _WeightedLoss
import torch.nn.functional as F
from torch.nn.functional import softmax
class MaskLoss(nn.Module):
def __init__(self, beta=25.):
super().__init__()
self.beta = beta
def forward(self, out, y):
if y.size(1) == 1:
loss = self.loss_fn(out, y)
else:
tgt, mask = torch.chunk(y, 2, dim=1)
mask *= self.beta
mask[mask == 0.] = 1.
mask /= self.beta
loss = torch.mean(mask * self.loss_fn(out, tgt, reduction='none'))
return loss
def loss_fn(self, out, y, reduction='mean'):
raise NotImplementedError
class MSEOnlyLoss(MaskLoss):
def loss_fn(self, out, y, reduction='mean'):
yhat, _ = out
return F.mse_loss(yhat, y, reduction=reduction)
class L1OnlyLoss(MaskLoss):
def loss_fn(self, out, y, reduction='mean'):
yhat, _ = out
return F.l1_loss(yhat, y, reduction=reduction)
class GaussianDiagLoss(MaskLoss):
def loss_fn(self, out, y, reduction='mean'):
yhat, s = out
loss = 0.5 * (torch.exp(-s) * F.mse_loss(yhat, y, reduction='none') + s)
if reduction == 'mean': loss = torch.mean(loss)
return loss
class LaplacianDiagLoss(MaskLoss):
def loss_fn(self, out, y, reduction='mean'):
yhat, s = out
loss = (torch.exp(-s) * F.l1_loss(yhat, y, reduction='none')) + s
if reduction == 'mean': loss = torch.mean(loss)
return loss
class MaskLossSegmentation(nn.Module):
def __init__(self, beta=25., use_mask=False):
super().__init__()
self.use_mask = use_mask
self.beta = beta
def forward(self, out, y):
if not self.use_mask:
loss = self.loss_fn(out, y)
else:
tgt, mask = torch.chunk(y, 2, dim=1)
tgt, mask = tgt.squeeze(), mask.squeeze().float()
mask *= self.beta
mask[mask == 0.] = 1.
mask /= self.beta
loss = torch.mean(mask * self.loss_fn(out, tgt, reduction='none'))
return loss
def loss_fn(self, out, y, reduction='mean'):
raise NotImplementedError
class BinaryMaskLossSegmentation(MaskLossSegmentation):
def forward(self, out, y):
if not self.use_mask:
loss = self.loss_fn(out, y)
else:
tgt, mask = torch.chunk(y, 2, dim=1)
mask = mask.float()
mask *= self.beta
mask[mask == 0.] = 1.
mask /= self.beta
loss = torch.mean(mask * self.loss_fn(out, tgt, reduction='none'))
return loss
class BinaryFocalLoss(BinaryMaskLossSegmentation):
def __init__(self, beta=25., use_mask=False, weight=None, gamma=2.):
super().__init__(beta, use_mask)
self.weight = weight
self.gamma = gamma
def loss_fn(self, out, y, reduction='mean'):
""" Taken from: https://github.com/catalyst-team/catalyst/, https://github.com/facebookresearch/fvcore """
pred, _ = out
p = torch.sigmoid(pred)
ce_loss = F.binary_cross_entropy_with_logits(pred, y, reduction="none")
p_t = p * y + (1 - p) * (1 - y)
loss = ce_loss * ((1 - p_t) ** self.gamma)
if self.weight is not None:
weight_t = self.weight * y + (1 - self.weight) * (1 - y)
loss = weight_t * loss
if reduction == "mean":
loss = loss.mean()
if reduction == "sum":
loss = loss.sum()
if reduction == "batchwise_mean":
loss = loss.sum(0)
return loss
class FocalLoss(MaskLossSegmentation):
def __init__(self, beta=25., use_mask=False, weight=None, gamma=2.):
super().__init__(beta, use_mask)
self.weight = weight
self.gamma = gamma
def loss_fn(self, out, y, reduction='mean'):
pred, _ = out
log_prob = F.log_softmax(pred, dim=1)
prob = torch.exp(log_prob)
p = ((1 - prob) ** self.gamma) * log_prob
return F.nll_loss(p, y, weight=self.weight, reduction=reduction)
class ExtendedCrossEntropy(MaskLossSegmentation):
def __init__(self, beta=25., use_mask=False, weight=None, n_samples=10):
super().__init__(beta, use_mask)
self.weight = weight
self.nsamp = n_samples
def loss_fn(self, out, y, reduction='mean'):
""" https://github.com/alainjungo/reliability-challenges-uncertainty """
logits, sigma = out
dist = torch.distributions.Normal(logits, torch.exp(sigma))
x_hat = dist.rsample((self.nsamp,))
mc_prob = F.softmax(x_hat, dim=2).mean(dim=0) # channel dim = 2 b/c samples
return F.nll_loss(mc_prob.log(), y, weight=self.weight, reduction=reduction)
# The below is from Shuo Han's repo: https://gitlab.com/shan-deep-networks/pytorch-metrics/
def prob_encode(input):
"""Apply softmax or sigmoid.
Args:
input (torch.Tensor): Input tensor
Returns:
result (torch.Tensor): The result
"""
result = softmax(input, dim=1) if input.shape[1] > 1 else sigmoid(input)
return result
def one_hot(input, shape):
"""One hot encoding; torch does not have it as the current version
Args:
input (torch.LongTensor): The tensor to encode. The values should be
"normalized" to 0 : num_labels
Returns:
result (torch.FloatTensor): The encoded tensor
"""
result = torch.FloatTensor(shape).zero_()
if input.is_cuda:
result = result.cuda(device=input.device)
result.scatter_(1, input, 1)
return result
def _calc_dices(input, target, eps=0.001, keepdim=False):
"""Calculate dices for each sample and each channel
Args:
input (torch.FloatTensor): The input tensor
target (torch.FloatTensor): The target tensor, one_hot encoded
Returns:
dices (torch.FloatTensor): The dices of each sample (first dim) and each
channel (second dim)
"""
spatial_dims = tuple(range(2 - len(input.shape), 0))
intersection = torch.sum(input * target, dim=spatial_dims, keepdim=keepdim)
sum1 = torch.sum(input, dim=spatial_dims, keepdim=keepdim)
sum2 = torch.sum(target, dim=spatial_dims, keepdim=keepdim)
dices = (2 * intersection + eps) / (sum1 + sum2 + eps)
return dices
def _calc_squared_dices(input, target, eps=0.001):
"""Calculate squared dices for each sample and each channel
Args:
input (torch.FloatTensor): The input tensor
target (torch.FloatTensor): The target tensor, one_hot encoded
eps (float): The smoothing term preventing division by 0
Returns:
dices (torch.FloatTensor): The dices of each sample (first dim) and each
channel (second dim)
"""
spatial_dims = tuple(range(2 - len(input.shape), 0))
intersection = torch.sum(input * target, dim=spatial_dims)
sum1 = torch.sum(input ** 2, dim=spatial_dims)
sum2 = torch.sum(target ** 2, dim=spatial_dims)
dices = (2 * intersection + eps) / (sum1 + sum2 + eps)
return dices
def calc_weighted_average(vals, weight):
"""Calculate weighted average along the second dim of values
Args:
vals (torch.Tensor): The values to weight; the first dim is samples
weight (torch.Tensor): The 1d weights to apply to the second dim of vals
Returns:
result (torch.Tensor): The result
"""
weight = weight[None, ...].repeat([vals.shape[0], 1])
result = torch.mean(weight * vals)
return result
def calc_dice_loss(input, target, weight=None, average=True, eps=0.001):
"""Calculate the dice loss
Args:
input (torch.Tensor): The input tensor
target (torch.Tensor): The target tensor
eps (float): The smoothing term preventing division by 0
Return:
dice (torch.Tensor): The weighted dice
"""
dices = _calc_dices(input, target, eps=eps, keepdim=not average)
if average:
if weight is None:
dice = torch.mean(dices)
else:
dice = calc_weighted_average(dices, weight)
else:
dice = dices
return 1 - dice
def calc_squared_dice_loss(input, target, weight=None, eps=0.001):
"""Calculate the squared dice loss
Args:
input (torch.Tensor): The input tensor
target (torch.Tensor): The target tensor
eps (float): The smoothing term preventing division by 0
Return:
dice (torch.Tensor): The weighted dice
"""
dices = _calc_squared_dices(input, target, eps=eps)
if weight is None:
dice = torch.mean(dices)
else:
dice = calc_weighted_average(dices, weight)
return 1 - dice
def calc_dice(input, target, channel_indices=None, eps=0):
"""Calculate average Dice coefficients across samples and channels
Args:
input (torch.Tensor): The input tensor
target (torch.Tensor): The target tensor
channel_indices (list of int): The channels to calculate dices across.
If None, use all channels
eps (float): Small number preventing division by zero
Returns:
dice (torch.Tensor): The average Dice
"""
input = prob_encode(input)
if input.shape[1] > 2:
input_seg = one_hot(torch.argmax(input, dim=1, keepdim=True), input.shape)
target_onehot = one_hot(target, input.shape)
else:
input_seg = (input >= 0.5).float()
target_onehot = target.float()
if channel_indices is not None:
input_seg = input_seg[:, channel_indices, ...]
target_onehot = target_onehot[:, channel_indices, ...]
dices = _calc_dices(input_seg, target_onehot, eps=eps)
return torch.mean(dices)
class SquaredDiceLoss(_WeightedLoss):
""" Wrapper of squared Dice loss. """
def __init__(self, weight=None):
super().__init__(weight=weight)
def forward(self, input, target):
input = prob_encode(input)
target_onehot = one_hot(target, input.shape)
return calc_squared_dice_loss(input, target_onehot, weight=self.weight)
class DiceLoss(_WeightedLoss):
""" Wrapper of Dice loss. """
def __init__(self, weight=None, average=True):
super().__init__(weight=weight)
self.average = average
def forward(self, input, target):
input = prob_encode(input)
if input.shape[1] > 2:
target_onehot = one_hot(target, input.shape)
else:
target_onehot = target.float()
return calc_dice_loss(input, target_onehot, weight=self.weight,
average=self.average)
class MonsterLoss(BinaryMaskLossSegmentation):
""" use focal, dice, and l1 loss together """
def __init__(self, alpha=(1.,1.,1.), beta=25., use_mask=False, gamma=2., weight=None, use_l2=True):
super().__init__(beta, use_mask)
self.alpha = alpha
self.weight = weight
self.gamma = gamma
self.use_l2 = use_l2
def loss_fn(self, out, y, reduction='mean'):
average = reduction == 'mean'
pred, _ = out
pred_t = torch.tanh(pred)
if self.alpha[0] > 0.:
if self.gamma == 0.:
focal_loss = F.binary_cross_entropy_with_logits(pred, y, pos_weight=self.weight, reduction=reduction)
else:
ce_loss = F.binary_cross_entropy_with_logits(pred, y, reduction="none")
p = (pred_t + 1.) / 2.
p_t = p * y + (1 - p) * (1 - y)
focal_loss = ce_loss * ((1 - p_t) ** self.gamma)
if self.weight is not None:
weight_t = self.weight * y + (1 - self.weight) * (1 - y)
focal_loss = weight_t * focal_loss
if average: focal_loss = focal_loss.mean()
else:
focal_loss = 0.
if self.alpha[1] > 0.:
p = (pred_t + 1.) / 2.
dice_loss = calc_dice_loss(p, y, average=average)
else:
dice_loss = 0.
if self.alpha[2] > 0.:
y = (y * 2.) - 1.
reg_loss = F.mse_loss(pred_t, y, reduction=reduction) if self.use_l2 else \
F.l1_loss(pred_t, y, reduction=reduction)
else:
reg_loss = 0.
return self.alpha[0] * focal_loss + self.alpha[1] * dice_loss + self.alpha[2] * reg_loss
class ExtendedMonsterLoss(BinaryMaskLossSegmentation):
def __init__(self, alpha=(1.,1.,1.), beta=25., use_mask=False, gamma=2., weight=None, n_samples=10,
use_l2=True, extended_regression=False):
super().__init__(beta, use_mask)
self.alpha = alpha
self.weight = weight
self.gamma = gamma
self.nsamp = n_samples
self.mlv = -13.816 # min log variance = ~log(1e-6)
self.use_l2 = use_l2
self.extended_regression = extended_regression
def loss_fn(self, out, y, reduction='mean'):
average = reduction == 'mean'
pred, sigma = out
sigma = torch.clamp_min(sigma, self.mlv)
dist = torch.distributions.Normal(pred, torch.exp(sigma))
mc_logs = dist.rsample((self.nsamp,))
pred_t = torch.tanh(mc_logs.mean(dim=0))
if self.alpha[0] > 0.:
focal_loss = 0.
if self.gamma == 0.:
for mc_log in mc_logs:
focal_loss += F.binary_cross_entropy_with_logits(mc_log, y, pos_weight=self.weight, reduction=reduction)
else:
for mc_log in mc_logs:
ce_loss = F.binary_cross_entropy_with_logits(mc_log, y, reduction="none")
mc_pred_t = torch.tanh(mc_log)
p = (mc_pred_t + 1.) / 2.
p_t = p * y + (1. - p) * (1. - y)
focal_loss += (ce_loss * ((1. - p_t) ** self.gamma)) / self.nsamp
if self.weight is not None:
weight_t = self.weight * y + (1. - self.weight) * (1. - y)
focal_loss *= weight_t
if average: focal_loss = focal_loss.mean()
else:
focal_loss = 0.
if self.alpha[1] > 0.:
p = (pred_t + 1.) / 2.
dice_loss = calc_dice_loss(p, y, average=average)
else:
dice_loss = 0.
if self.alpha[2] > 0.:
y = (y * 2.) - 1.
if self.extended_regression:
if self.use_l2:
reg_loss = 0.5 * (torch.exp(-sigma) * F.mse_loss(pred_t, y, reduction='none') + sigma)
else:
reg_loss = (torch.exp(-sigma) * F.l1_loss(pred_t, y, reduction='none')) + sigma
else:
reg_loss = F.mse_loss(pred_t, y, reduction='none') if self.use_l2 else F.l1_loss(pred_t, y, reduction='none')
if average: reg_loss = torch.mean(reg_loss)
else:
reg_loss = 0.
return self.alpha[0] * focal_loss + self.alpha[1] * dice_loss + self.alpha[2] * reg_loss
class MonsterLossAux(BinaryMaskLossSegmentation):
""" use focal, dice, and l1 loss together """
def __init__(self, alpha=(1.,1.,1.), beta=25., use_mask=False, gamma=2.,
weight=None, use_l2=True, balance_weights=(1.0, 0.4)):
super().__init__(beta, use_mask)
self.alpha = alpha
self.weight = weight
self.gamma = gamma
self.use_l2 = use_l2
self.balance_weights = balance_weights
def loss_fn(self, out, y, reduction='mean'):
average = reduction == 'mean'
loss = 0.
for bw, pred in zip(self.balance_weights, out):
pred_t = torch.tanh(pred)
if self.alpha[0] > 0.:
if self.gamma == 0.:
focal_loss = F.binary_cross_entropy_with_logits(pred, y, pos_weight=self.weight, reduction=reduction)
else:
ce_loss = F.binary_cross_entropy_with_logits(pred, y, reduction="none")
p = (pred_t + 1.) / 2.
p_t = p * y + (1 - p) * (1 - y)
focal_loss = ce_loss * ((1 - p_t) ** self.gamma)
if self.weight is not None:
weight_t = self.weight * y + (1 - self.weight) * (1 - y)
focal_loss = weight_t * focal_loss
if average: focal_loss = focal_loss.mean()
else:
focal_loss = 0.
if self.alpha[1] > 0.:
p = (pred_t + 1.) / 2.
dice_loss = calc_dice_loss(p, y, average=average)
else:
dice_loss = 0.
if self.alpha[2] > 0.:
y = (y * 2.) - 1.
reg_loss = F.mse_loss(pred_t, y, reduction=reduction) if self.use_l2 else \
F.l1_loss(pred_t, y, reduction=reduction)
else:
reg_loss = 0.
loss += bw * (self.alpha[0] * focal_loss + self.alpha[1] * dice_loss + self.alpha[2] * reg_loss)
return loss
```
#### File: uncertaintorch/models/resnet3d.py
```python
__all__ = ['ResNet3d', 'resnet3d18', 'resnet3d34', 'resnet3d50', 'resnet3d101']
import torch
from torch import nn
import torch.nn.functional as F
BASE_WIDTH = 32
EXPANSION = 2
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Sequential(nn.ReplicationPad3d(dilation),
nn.Conv3d(in_planes, out_planes, kernel_size=3, stride=stride,
groups=groups, bias=False, dilation=dilation))
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv3d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class Bottleneck(nn.Module):
expansion = EXPANSION
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=BASE_WIDTH, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm3d
width = int(planes * (base_width / BASE_WIDTH)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet3d(nn.Module):
def __init__(self, block, layers, zero_init_residual=False,
groups=1, width_per_group=BASE_WIDTH, replace_stride_with_dilation=None,
norm_layer=None, in_channels=1, dropout_rate=0., bayesian=False):
super(ResNet3d, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm3d
self._norm_layer = norm_layer
self.p = dropout_rate
self.bayesian = bayesian
self.inplanes = BASE_WIDTH
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Sequential(nn.ReplicationPad3d(3),
nn.Conv3d(in_channels, self.inplanes, kernel_size=7, stride=2, bias=False))
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool3d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, BASE_WIDTH, layers[0])
self.layer2 = self._make_layer(block, BASE_WIDTH*2, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, BASE_WIDTH*(2 if replace_stride_with_dilation[0] else 4),
layers[2], stride=2, dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, BASE_WIDTH*(2 if replace_stride_with_dilation[1] else 8),
layers[3], stride=2, dilate=replace_stride_with_dilation[2])
for m in self.modules():
if isinstance(m, nn.Conv3d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm3d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
def dropout(self, x):
use_dropout = self.training or self.bayesian
return F.dropout3d(x, self.p, training=use_dropout, inplace=False)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x):
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
start = x.clone()
x = self.maxpool(x)
x = self.layer1(x)
mid = x.clone()
x = self.dropout(x)
x = self.dropout(self.layer2(x))
x = self.dropout(self.layer3(x))
x = self.dropout(self.layer4(x))
return x, start, mid
def forward(self, x):
return self._forward_impl(x)
def resnet3d18(**kwargs):
return ResNet3d(Bottleneck, [2, 2, 2, 2], **kwargs)
def resnet3d34(**kwargs):
return ResNet3d(Bottleneck, [3, 4, 6, 3], **kwargs)
def resnet3d50(**kwargs):
return ResNet3d(Bottleneck, [3, 4, 6, 3], **kwargs)
def resnet3d101(**kwargs):
return ResNet3d(Bottleneck, [3, 4, 23, 3], **kwargs)
```
#### File: uncertaintorch/models/uncertainunetv3.py
```python
__all__ = ['UncertainUnetv3']
from torch import nn
import torch.nn.functional as F
from ..learn import *
from .unet_tools import *
from .uncertainty_tools import *
class UncertainUnetv3(UncertainNet):
def __init__(self, s=32, ic=1, oc=1, p=0.15, segnet=True,
laplacian=True, bayesian=True, concat=True, beta=25.):
super().__init__(p, segnet, laplacian, bayesian, concat, beta)
c = 2 if concat else 1
self.start_0, self.start_1 = unet_list(ic,s,s,(7,7,3),(5,5,3),1,(2,2,1))
self.down1_0, self.down1_1 = unet_list(s,s*2,s*2,(3,3,1),(3,3,1),1,(2,2,1))
self.down2_0, self.down2_1 = unet_list(s*2,s*4,s*4,(3,3,1),(3,3,1),1,(2,2,1))
self.bridge = nn.Sequential(*conv(s*4,s*4,(3,3,1)))
self.up2_0, self.up2_1 = unet_up(s*4*c,s*4,s*2,(3,3,1),(3,3,1),1,1,(2,2,1),concat)
self.up1_0, self.up1_1 = unet_up(s*2*c,s*2,s,(3,3,1),(3,3,1),1,1,(2,2,1),concat)
self.end_0, self.end_1 = unet_up(s*c,s,s,(3,3,1),(3,3,1),1,1,(2,2,1),concat)
self.syn = nn.Sequential(*conv(s+ic,s,3,1), nn.Conv3d(s,oc,1))
self.unc = nn.Sequential(*conv(s+ic,s,3,1), nn.Conv3d(s,oc,1))
def fwd_bayesian_segnet(self, x):
orig = x.clone()
x = self.start_0(x)
d1 = x.clone()
x = self.start_1(x)
x = self.down1_0(x)
d2 = x.clone()
x = self.dropout(x)
x = self.down1_1(x)
x = self.down2_0(x)
d3 = x.clone()
x = self.dropout(x)
x = self.down2_1(x)
x = self.bridge(x)
x = self.up2_0(x)
x = self.catadd(x, d3)
x = self.up2_1(x)
x = self.dropout(x)
x = self.up1_0(x)
x = self.catadd(x, d2)
x = self.up1_1(x)
x = self.dropout(x)
x = self.end_0(x)
x = self.catadd(x, d1)
x = self.end_1(x)
x = self.cat(x,orig)
yhat = self.syn(x)
s = self.unc(x)
return yhat, s
def fwd_full_bayesian(self,x):
raise NotImplementedError
```
#### File: uncertaintorch/models/unet_tools.py
```python
__all__ = ['conv','conv2d','unet_block','unet_block2d','unet_list','unet_up']
from functools import partial
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from ..learn import *
from ..util import *
activation = partial(nn.ReLU, inplace=True)
def conv2d(i,o,k=3,s=1):
pad = k//2 if isinstance(k,int) else tuple([ks//2 for p in zip(reversed(k),reversed(k)) for ks in p])
if isinstance(k,int): c = [] if k < 3 else [nn.ReflectionPad2d(pad)]
if isinstance(k,tuple): c = [] if all([p == 0 for p in pad]) else [nn.ReflectionPad2d(pad)]
c.extend([nn.Conv2d(i,o,k,s,bias=False), nn.BatchNorm2d(o), activation()])
return c
def unet_block2d(i,m,o,k1,k2):
return nn.Sequential(*conv2d(i,m,k1),*conv2d(m,o,k2))
def conv(i,o,k=3,s=1):
pad = k//2 if isinstance(k,int) else tuple([ks//2 for p in zip(reversed(k),reversed(k)) for ks in p])
if isinstance(k,int): c = [] if k < 3 else [nn.ReplicationPad3d(pad)]
if isinstance(k,tuple): c = [] if all([p == 0 for p in pad]) else [nn.ReplicationPad3d(pad)]
c.extend([nn.Conv3d(i,o,k,s,bias=False), nn.BatchNorm3d(o), activation()])
return c
def unet_block(i,m,o,k1,k2):
return nn.Sequential(*conv(i,m,k1),*conv(m,o,k2))
def unet_list(i,m,o,k1,k2,s1=1,s2=1):
layers = [nn.Sequential(*conv(i,m,k1,s1)),
nn.Sequential(*conv(m,o,k2,s2))]
return nn.ModuleList(layers)
def unet_up(i,m,o,k1,k2,s1=1,s2=1,scale_factor=2,cat=True,full=False,upk=1):
c = 2 if cat else 1
layers = [Upconv3d(i//c,i//c,scale_factor,full,upk),
nn.Sequential(*conv(i,m,k1,s1), *conv(m,o,k2,s2))]
return nn.ModuleList(layers)
```
#### File: uncertaintorch/util/analysis.py
```python
__all__ = ['tidy_losses', 'tidy_uncertainty']
import pandas as pd
def tidy_losses(train, valid):
out = {'epoch': [], 'type': [], 'value': [], 'phase': []}
for i, (tl,vl) in enumerate(zip(train,valid),1):
for tli in tl:
out['epoch'].append(i)
out['type'].append('loss')
out['value'].append(tli)
out['phase'].append('train')
for vli in vl:
out['epoch'].append(i)
out['type'].append('loss')
out['value'].append(vli)
out['phase'].append('valid')
return pd.DataFrame(out)
def tidy_uncertainty(ep, al, sb):
out = {'epoch': [], 'type': [], 'value': [], 'phase': []}
for i, (epi, ali, sbi) in enumerate(zip(ep, al, sb)):
phase = 'train' if i == 0 else 'valid'
for j, (epij,alij,sbij) in enumerate(zip(epi,ali,sbi),1):
for epijk in epij:
out['epoch'].append(j)
out['type'].append('epistemic')
out['value'].append(epijk)
out['phase'].append(phase)
for alijk in alij:
out['epoch'].append(j)
out['type'].append('aleatoric')
out['value'].append(alijk)
out['phase'].append(phase)
for sbijk in sbij:
out['epoch'].append(j)
out['type'].append('scibilic')
out['value'].append(sbijk)
out['phase'].append(phase)
return pd.DataFrame(out)
```
|
{
"source": "jcrichard/firls",
"score": 3
}
|
#### File: firls/firls/ccd.py
```python
from numba import njit
from numba.types import float64, int64, none, boolean,Tuple,List
import numpy as np
@njit("float64[:,:](float64[:,:])")
def add_constant(data):
"""add constant to the data.
"""
n, p = data.shape
x = np.zeros((n, p + 1))
x[:, 1:] = data
x[:, 0] = 1
return x
@njit("float64(float64,float64)")
def soft_threshold(x, s):
"""Soft thresholding operator.
"""
return np.sign(x) * np.maximum(np.abs(x) - s, 0)
@njit(
"Tuple((float64[:,:], List(int64)))(float64[:,:],float64[:],List(int64),optional(float64[:,:]),float64[:,:],float64[:,:],float64,float64[:],float64,float64)"
)
def _cycle(beta, h, active_set, bounds,Xty,XtX,fit_intercept,sum_sq_X,lambda_l1,lambda_l2):
for j in active_set:
if len(active_set) == 0:
beta = beta * 0
break
beta_j_old = beta[j]
h += beta_j_old * XtX[:, j]
rho = (XtX[:, j].T@ h)
if (fit_intercept) and (j == 0):
beta_j_new = rho / sum_sq_X[j]
else:
beta_j_new = soft_threshold(rho, lambda_l1) / (
sum_sq_X[j] + lambda_l2
)
if bounds is not None:
beta_j_new = np.minimum(
np.maximum(beta_j_new, bounds[j, 0]), bounds[j, 1]
)
if (lambda_l1 > 0.0) & (abs(beta_j_new) == 0.0):
beta[j] = beta_j_new
continue
h -= beta_j_new * XtX[:, j]
beta[j] = beta_j_new
return beta,active_set
@njit(
"Tuple((float64[:,:],int64))(float64[:,:],float64[:,:],optional(float64[:,:]),optional(float64[:,:]),boolean,float64,float64,optional(float64[:,:]),optional(float64[:,:]),int64,float64)",fastmath=True
)
def ccd_pwls(
X,
y,
W=None,
b = None,
fit_intercept=False,
lambda_l1=0.0,
lambda_l2=0.0,
Gamma=None,
bounds=None,
max_iters=1000,
tol=1e-3
):
"""Coordinate descent algorithm for penalized weighted least squared. Please respect the signature."""
if fit_intercept:
X = add_constant(X)
n, p = X.shape
if W is None:
sum_sq_X = np.sum(X ** 2, 0)
else:
sum_sq_X = np.sum((X ** 2) * W, 0)
X = X * W ** 0.5
y = y * W ** 0.5
beta = np.zeros((p,1))
beta_old = np.zeros_like(beta)+1
XtX = X
Xty = np.empty((1,1))
active_set = list(range(p))
h = y.copy().ravel()
for niter in range(max_iters):
beta, active_set = _cycle(beta, h, active_set, bounds, Xty, XtX, fit_intercept, sum_sq_X, lambda_l1, lambda_l2)
if np.sum((beta_old - beta) ** 2) ** 0.5 < tol:
beta, active_set = _cycle(beta, h, list(range(p)), bounds, Xty, XtX, fit_intercept, sum_sq_X, lambda_l1,
lambda_l2)
if np.sum((beta_old - beta) ** 2) ** 0.5 < tol:
break
beta_old = np.copy(beta)
return beta,niter
```
#### File: firls/tests/test_ccd.py
```python
from firls.ccd import ccd_pwls
from firls.tests.simulate import simulate_supervised_gaussian
import numpy as np
import pytest
def test_wlsq():
n = 1000
y, X, true_beta = simulate_supervised_gaussian(n, 40)
w, niters = ccd_pwls(
X,
y.reshape(n, 1),
W=None,
b=None,
fit_intercept=False,
lambda_l1=0.0,
lambda_l2=0.0,
Gamma=None,
bounds=None,
max_iters=10000,
tol=1e-10,
)
w_cf = np.linalg.inv(X.T @ X) @ X.T @ y
np.testing.assert_almost_equal(w.ravel(), w_cf, 4)
```
|
{
"source": "jcrickmer/pyvision",
"score": 3
}
|
#### File: vision/reconstruction/plywriter.py
```python
import cStringIO
def red(value, lower, upper):
red = int((value - lower) / (upper - lower) * 255)
return red, 0, 0
def filtertrue(value, lower, upper):
return True
def filterlower(value, lower, upper):
return value > lower
def filterupper(value, lower, upper):
return value < upper
def write(outfile, data, colormap = red, condition = filtertrue, bounds = None):
f = cStringIO.StringIO()
lower = data.min()
upper = data.max()
count = 0
xs, ys, zs = data.shape
if not bounds:
bounds = (0, xs), (0, ys), (0, zs)
(xmin, xmax), (ymin, ymax), (zmin, zmax) = bounds
for x in range(0, xs):
xi = float(x) / xs * (xmax - xmin) + xmin
for y in range(0, ys):
yi = float(y) / ys * (ymax - ymin) + ymin
for z in range(0, zs):
zi = float(z) / zs * (zmax - zmin) + zmin
value = data[x, y, z]
if not condition(value, lower, upper):
continue
count += 1
r, g, b = colormap(value, lower, upper)
f.write("{0} {1} {2} {3} {4} {5}\n".format(xi, yi, zi,
r, g, b))
outfile.write("ply\n")
outfile.write("format ascii 1.0\n")
outfile.write("element vertex {0}\n".format(count))
outfile.write("property float x\n")
outfile.write("property float y\n")
outfile.write("property float z\n")
outfile.write("property uchar diffuse_red\n")
outfile.write("property uchar diffuse_green\n")
outfile.write("property uchar diffuse_blue\n")
outfile.write("end_header\n")
outfile.write(f.getvalue())
f.close()
```
|
{
"source": "jcrigby/jscfs",
"score": 2
}
|
#### File: jcrigby/jscfs/jscfs.py
```python
import errno
import json
import llfuse
import logging
import os
import stat
import sys
import time
import unittest
from argparse import ArgumentParser
from collections import namedtuple
from grp import getgrnam
from pwd import getpwnam
log = logging.getLogger(__name__)
from enum import Enum
class NodeType(Enum):
File = 'file'
Dir = 'directory'
DirEntry = namedtuple('DirEntry', ['name', 'inode'])
class Node:
def __init__(self, inode, **kwargs):
self.inode = inode
self.name = kwargs.get('name', None)
self.type = NodeType(kwargs.get('type', None))
mode = int(kwargs.get('mode', None), 8)
attr = llfuse.EntryAttributes()
attr.st_size = 0
attr.st_mode = mode
if self.type == NodeType.Dir:
attr.st_mode |= stat.S_IFDIR
self.children = kwargs.get('children', None)
for n, child in enumerate(self.children):
self.children[n] = DirEntry(child.name.encode('utf-8'), child.inode)
elif self.type == NodeType.File:
attr.st_mode |= stat.S_IFREG
self.contents = kwargs.get('contents', None)
if self.contents != None:
self.contents = self.contents.encode('utf-8')
attr.st_size = len(self.contents)
else:
raise llfuse.FUSEError(errno.ENOENT)
stamp = int(time.time() * 1e9)
attr.st_atime_ns = stamp
attr.st_ctime_ns = stamp
attr.st_mtime_ns = stamp
attr.st_uid = getgrnam(kwargs.get('group', None)).gr_gid
attr.st_gid = getpwnam(kwargs.get('user', None)).pw_uid
attr.st_ino = inode
self.attr = attr
def __repr__(self):
s = '({1.st_ino} {0.name} {0.type} {1.st_mode:o} {1.st_uid} {1.st_gid})'.format(self,self.attr)
if self.type == NodeType.Dir and self.children is not None:
s += "\n\t{}".format(self.children)
if self.type == NodeType.File and self.contents is not None:
s += "contents:[{}]".format(self.contents)
return s
class JsonSysClassFS(llfuse.Operations):
def __init__(self, json_str ):
super(JsonSysClassFS, self).__init__()
self.root_inode = llfuse.ROOT_INODE
self.superblock = [None] * (self.root_inode + 1)
self.jdata = json.loads(json_str, object_hook=self.mknode)
def mknode(self, d):
name = d.get('name', None)
assert(name != None)
node = Node(len(self.superblock), **d)
if node.name == '/':
node.inode = self.root_inode
self.superblock[self.root_inode] = node
else:
self.superblock.append(node)
type = d.get('type', None)
assert(type != None)
return node
def __repr__(self):
s = '\n'.join(str(n) for n in self.superblock)
return s
# llfuse.Operations start here
def getattr(self, inode, ctx=None):
node = self.superblock[inode]
attr = node.attr
log.debug('getattr for inode %d:%s', inode, attr)
return attr
def lookup(self, dirinode, name, ctx=None):
log.debug('lookup for %s in %d', name, dirinode)
dirnode = self.superblock[dirinode]
assert dirnode.type == NodeType.Dir
for child in dirnode.children:
if child.name == name:
return self.getattr(child.inode)
raise llfuse.FUSEError(errno.ENOENT)
def opendir(self, inode, ctx):
log.debug('opendir %d', inode)
return inode
def readdir(self, dirinode, off):
log.debug('opendir %d starting at %d', dirinode, off)
dirnode = self.superblock[dirinode]
assert dirnode.type == NodeType.Dir
for n, child in enumerate(dirnode.children[off:]):
yield(child.name, self.getattr(child.inode), n+1)
def open(self, inode, flags, ctx):
log.debug('open %d', inode)
filenode = self.superblock[inode]
assert filenode.type == NodeType.File
#if flags & os.O_RDWR or flags & os.O_WRONLY:
# raise llfuse.FUSEError(errno.EPERM)
return inode
def release(self, inode):
pass
def read(self, inode, off, size):
log.debug('read %d %d:%d', inode, off, size)
filenode = self.superblock[inode]
assert filenode.type == NodeType.File
if filenode.contents != None:
return filenode.contents[off:off+size]
return b''
def write(self, inode, off, buf):
log.debug('write %d %d:%s', inode, off, buf)
filenode = self.superblock[inode]
assert filenode.type == NodeType.File
if filenode.contents == None:
filenode.contents = b''
filenode.contents = filenode.contents[:offset] + buf + filenode.contents[offset+len(buf):]
return len(buf)
class TestJscfsMethods(unittest.TestCase):
test_json_str = """[
{
"name" : "/", "type" : "directory",
"mode" : "0755", "user" : "root", "group" : "root",
"children" : [
{
"name" : "devices", "type" : "directory",
"mode" : "0755", "user" : "root", "group" : "root",
"children" : [
{
"name" : "cpu", "type" : "directory",
"mode" : "0755", "user" : "root", "group" : "root",
"children" : [
{
"name" : "perf_event_mux_interval_ms", "type" : "file",
"mode" : "0644", "user" : "root", "group" : "root"
},
{
"name" : "power", "type" : "directory",
"mode" : "0755", "user" : "root", "group" : "root",
"children" : [
{
"name" : "runtime_active_kids", "type" : "file",
"mode" : "0444", "user" : "root", "group" : "root"
},
{
"name" : "runtime_suspended_time", "type" : "file",
"mode" : "0444", "user" : "root", "group" : "root"
}
]
},
{
"name" : "uevent", "type" : "file",
"mode" : "0644", "user" : "root", "group" : "root"
}
]
},
{
"name" : "breakpoint", "type" : "directory",
"mode" : "0755", "user" : "root", "group" : "root",
"children" : [
{
"name" : "power", "type" : "directory",
"mode" : "0755", "user" : "root", "group" : "root",
"children" : [
{
"name" : "runtime_status", "type" : "file",
"mode" : "0666", "user" : "root", "group" : "root",
"contents" : "contents of runtime_status"
}
]
},
{
"name" : "uevent", "type" : "file",
"mode" : "0644", "user" : "root", "group" : "root"
}
]
}
]
}
]
}
]
"""
expectedjdata = [{
"name" : "/", "type" : "directory",
"mode" : "0755", "user" : "root", "group" : "root",
"children" : [
{
"name" : "devices", "type" : "directory",
"mode" : "0755", "user" : "root", "group" : "root",
"children" : [
{
"name" : "cpu", "type" : "directory",
"mode" : "0755", "user" : "root", "group" : "root",
"children" : [
{
"name" : "perf_event_mux_interval_ms", "type" : "file",
"mode" : "0644", "user" : "root", "group" : "root"
},
{
"name" : "power", "type" : "directory",
"mode" : "0755", "user" : "root", "group" : "root",
"children" : [
{
"name" : "runtime_active_kids", "type" : "file",
"mode" : "0444", "user" : "root", "group" : "root"
},
{
"name" : "runtime_suspended_time", "type" : "file",
"mode" : "0444", "user" : "root", "group" : "root"
}
]
},
{
"name" : "uevent", "type" : "file",
"mode" : "0644", "user" : "root", "group" : "root"
}
]
},
{
"name" : "breakpoint", "type" : "directory",
"mode" : "0755", "user" : "root", "group" : "root",
"children" : [
{
"name" : "power", "type" : "directory",
"mode" : "0755", "user" : "root", "group" : "root",
"children" : [
{
"name" : "runtime_status", "type" : "file",
"mode" : "0666", "user" : "root", "group" : "root",
"contents" : "contents of runtime_status"
}
]
},
{
"name" : "uevent", "type" : "file",
"mode" : "0644", "user" : "root", "group" : "root"
}
]
}
]
}
]
}
]
def test_json_import(self):
jdata = json.loads(self.test_json_str)
self.assertTrue(jdata == self.expectedjdata)
def test_llfuse_rootinode(self):
# make sure our assumption about ROOT_INODE is true
self.assertTrue(llfuse.ROOT_INODE > 0)
def test_jscfs(self):
jscfs = JsonSysClassFS(self.test_json_str)
# there should be as many entries in superblock as json "name" nodes
# plus one
self.assertTrue(self.test_json_str.count("name") == 12)
print(jscfs)
def init_logging(debug=False):
formatter = logging.Formatter('%(asctime)s.%(msecs)03d %(threadName)s: '
'[%(name)s] %(message)s', datefmt="%Y-%m-%d %H:%M:%S")
handler = logging.StreamHandler()
handler.setFormatter(formatter)
root_logger = logging.getLogger()
if debug:
handler.setLevel(logging.DEBUG)
root_logger.setLevel(logging.DEBUG)
else:
handler.setLevel(logging.INFO)
root_logger.setLevel(logging.INFO)
root_logger.addHandler(handler)
def parse_args():
'''Parse command line'''
parser = ArgumentParser()
parser.add_argument('mountpoint', type=str,
help='Where to mount the file system')
parser.add_argument('--debug', action='store_true', default=False,
help='Enable debugging output')
parser.add_argument('--debug-fuse', action='store_true', default=False,
help='Enable FUSE debugging output')
return parser.parse_args()
def main():
options = parse_args()
init_logging(options.debug)
testfs = JsonSysClassFS(TestJscfsMethods.test_json_str)
fuse_options = set(llfuse.default_options)
fuse_options.add('fsname=jscfs')
if options.debug_fuse:
fuse_options.add('debug')
llfuse.init(testfs, options.mountpoint, fuse_options)
try:
llfuse.main(workers=1)
except:
llfuse.close(unmount=False)
raise
llfuse.close()
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1] in ["-t", "--test"]:
del(sys.argv[1])
sys.exit(unittest.main())
main()
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.