max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
backend/www/test/auth_test.py | xuantan/viewfinder | 645 | 12772984 | #!/usr/bin/env python
#
# Copyright 2012 Viewfinder Inc. All Rights Reserved.
"""Account authorization tests for Facebook and Google accounts.
"""
__authors__ = ['<EMAIL> (<NAME>)',
'<EMAIL> (<NAME>)']
import json
import mock
import os
import time
import unittest
import urllib
from copy import deepcopy
from cStringIO import StringIO
from tornado import httpclient, options
from tornado.ioloop import IOLoop
from urlparse import urlparse
from viewfinder.backend.base import message, util
from viewfinder.backend.db.contact import Contact
from viewfinder.backend.db.identity import Identity
from viewfinder.backend.db.notification import Notification
from viewfinder.backend.db.settings import AccountSettings
from viewfinder.backend.db.user import User
from viewfinder.backend.op.fetch_contacts_op import FetchContactsOperation
from viewfinder.backend.www import auth
from viewfinder.backend.www.test import service_base_test
from viewfinder.backend.www.www_util import GzipEncode
@unittest.skipIf('NO_NETWORK' in os.environ, 'no network')
class AuthTestCase(service_base_test.ServiceBaseTestCase):
"""Initializes the test datastore and the viewfinder schema.
"""
def setUp(self):
super(AuthTestCase, self).setUp()
self._CreateSimpleTestAssets()
self._google_user_dict = {'family_name': 'Kimball', 'name': '<NAME>', 'locale': 'en',
'gender': 'male', 'email': '<EMAIL>',
'link': 'https://plus.google.com/id',
'given_name': 'Andrew', 'id': 'id', 'verified_email': True}
self._facebook_user_dict = {'first_name': 'Andrew', 'last_name': 'Kimball', 'name': '<NAME>',
'id': 'id', 'link': 'http://www.facebook.com/andrew.kimball.50',
'timezone':-7, 'locale': 'en_US', 'email': '<EMAIL>',
'picture': {'data': {'url': 'http://foo.com/pic.jpg',
'is_silhouette': False}},
'verified': True}
self._viewfinder_user_dict = {'name': '<NAME>', 'given_name': 'Andrew', 'email': '<EMAIL>'}
self._mobile_device_dict = {'name': 'Andy\'s IPhone', 'version': '1.0', 'platform': 'IPhone 4S',
'os': 'iOS 5.0.1', 'push_token': 'push_token',
'device_uuid': '926744AC-8540-4103-9F3F-C84AA2F6D648',
'test_udid': '7d527095d4e0539aba40c852547db5da00000000',
'country': 'US', 'language': 'en'}
self._prospective_user, _, _ = self._CreateProspectiveUser()
self._register_user_dict = {'email': self._prospective_user.email,
'name': '<NAME>',
'given_name': 'Jimmy',
'family_name': 'John'}
def tearDown(self):
super(AuthTestCase, self).tearDown()
options.options.freeze_new_accounts = False
def testRegisterWithCookie(self):
"""Register user, overriding current logged-in user."""
# Override registered user.
user, device_id = self._tester.RegisterGoogleUser(self._google_user_dict)
google_cookie = self._GetSecureUserCookie(user, device_id)
user2, _ = self._tester.RegisterFacebookUser(self._facebook_user_dict,
self._mobile_device_dict,
user_cookie=google_cookie)
self.assertNotEqual(user.user_id, user2.user_id)
# Override prospective user.
cookie = self._GetSecureUserCookie(self._prospective_user, self._prospective_user.webapp_dev_id)
user, _ = self._tester.RegisterViewfinderUser(self._viewfinder_user_dict, user_cookie=cookie)
self.assertNotEqual(self._prospective_user.user_id, user.user_id)
# Override with registration of prospective user.
user, _ = self._tester.RegisterViewfinderUser(self._register_user_dict, user_cookie=self._cookie)
self.assertNotEqual(user.user_id, self._user.user_id)
def testEmailAlertSettings(self):
"""Test that email/push alert settings are updated properly during registration."""
def _ValidateAlerts(email_alerts, push_alerts):
settings = self._RunAsync(AccountSettings.QueryByUser, self._client, self._prospective_user.user_id, None)
self.assertEqual(settings.email_alerts, email_alerts)
self.assertEqual(settings.sms_alerts, AccountSettings.SMS_NONE)
self.assertEqual(settings.push_alerts, push_alerts)
# Skip cleanup validation of alerts because a new device is created in this test that did not receive
# notifications sent as part of setUp() call.
self._skip_validation_for = ['Alerts']
# Register a prospective user using the web device.
cookie = self._GetSecureUserCookie(self._prospective_user, self._prospective_user.webapp_dev_id)
_ValidateAlerts(AccountSettings.EMAIL_ON_SHARE_NEW, AccountSettings.PUSH_NONE)
user, device_id = self._tester.RegisterViewfinderUser(self._register_user_dict)
_ValidateAlerts(AccountSettings.EMAIL_ON_SHARE_NEW, AccountSettings.PUSH_NONE)
# Update the user's email alert setting and validate the changed setting.
self._tester.UpdateUser(cookie, settings_dict={'email_alerts': 'none'})
_ValidateAlerts(AccountSettings.EMAIL_NONE, AccountSettings.PUSH_NONE)
# Login and register a new mobile device and validate that email alerts were turned off
# and push alerts turned on.
self._tester.UpdateUser(cookie, settings_dict={'email_alerts': 'on_share_new'})
self._tester.LoginViewfinderUser(self._register_user_dict, self._mobile_device_dict)
_ValidateAlerts(AccountSettings.EMAIL_NONE, AccountSettings.PUSH_ALL)
# Turn off push alerts, and then re-login, and validate that they were not turned back on.
self._tester.UpdateUser(cookie, settings_dict={'push_alerts': 'none'})
self._tester.LoginViewfinderUser(self._register_user_dict)
self._tester.LoginViewfinderUser(self._register_user_dict, self._mobile_device_dict)
_ValidateAlerts(AccountSettings.EMAIL_NONE, AccountSettings.PUSH_NONE)
def testSmsAlertSettings(self):
"""Test that SMS/push alert settings are updated properly during registration."""
def _ValidateAlerts(sms_alerts, push_alerts):
settings = self._RunAsync(AccountSettings.QueryByUser, self._client, prospective_user.user_id, None)
self.assertEqual(settings.email_alerts, AccountSettings.EMAIL_NONE)
self.assertEqual(settings.sms_alerts, sms_alerts)
self.assertEqual(settings.push_alerts, push_alerts)
# Skip cleanup validation of alerts because a new device is created in this test that did not receive
# notifications sent as part of setUp() call.
self._skip_validation_for = ['Alerts']
# Create prospective user with mobile phone.
ident_key = 'Phone:+14251234567'
vp_id, ep_ids = self._tester.ShareNew(self._cookie,
[(self._episode_id, self._photo_ids)],
[ident_key])
prospective_ident = self._RunAsync(Identity.Query, self._client, ident_key, None)
prospective_user = self._RunAsync(User.Query, self._client, prospective_ident.user_id, None)
register_user_dict = {'phone': prospective_user.phone,
'name': '<NAME>',
'given_name': 'Jimmy',
'family_name': 'John'}
# Register a prospective user using the web device.
cookie = self._GetSecureUserCookie(prospective_user, prospective_user.webapp_dev_id)
_ValidateAlerts(AccountSettings.SMS_ON_SHARE_NEW, AccountSettings.PUSH_NONE)
user, device_id = self._tester.RegisterViewfinderUser(register_user_dict)
_ValidateAlerts(AccountSettings.SMS_ON_SHARE_NEW, AccountSettings.PUSH_NONE)
# Login and register a new mobile device and validate that SMS alerts were turned off
# and push alerts turned on.
self._tester.LoginViewfinderUser(register_user_dict, self._mobile_device_dict)
_ValidateAlerts(AccountSettings.SMS_NONE, AccountSettings.PUSH_ALL)
# Turn off push alerts, and then re-login, and validate that they were not turned back on.
self._tester.UpdateUser(cookie, settings_dict={'push_alerts': 'none'})
self._tester.LoginViewfinderUser(register_user_dict)
self._tester.LoginViewfinderUser(register_user_dict, self._mobile_device_dict)
_ValidateAlerts(AccountSettings.SMS_NONE, AccountSettings.PUSH_NONE)
def testMultipleAuthorities(self):
"""Test multiple authorities that authenticate same identity."""
# Login as Google user, then as Viewfinder user with same email, then again as same Google user.
self._tester.RegisterGoogleUser({'name': '<NAME>', 'email': '<EMAIL>', 'verified_email': True})
self._tester.LoginViewfinderUser({'email': '<EMAIL>'},
self._mobile_device_dict)
identity = self._RunAsync(Identity.Query, self._client, 'Email:<EMAIL>', None)
self.assertEqual(identity.authority, 'Viewfinder')
self.assertEqual(identity.expires, 0)
self._tester.LoginGoogleUser({'email': '<EMAIL>', 'verified_email': True})
identity = self._RunAsync(Identity.Query, self._client, 'Email:<EMAIL>', None)
self.assertEqual(identity.authority, 'Google')
def testLoginWithCookie(self):
"""Test successful login override of current logged-in user."""
# Login with cookie from same user.
user, device_id = self._tester.RegisterFacebookUser(self._facebook_user_dict, self._mobile_device_dict)
facebook_cookie = self._GetSecureUserCookie(user, device_id)
self._tester.LoginFacebookUser(self._facebook_user_dict, self._mobile_device_dict, user_cookie=facebook_cookie)
# Login with cookie from different user.
user, device_id = self._tester.RegisterGoogleUser(self._google_user_dict)
google_cookie = self._GetSecureUserCookie(user, device_id)
self._tester.LoginFacebookUser(self._facebook_user_dict, self._mobile_device_dict, user_cookie=google_cookie)
# Login with cookie from prospective user.
cookie = self._GetSecureUserCookie(self._prospective_user, self._prospective_user.webapp_dev_id)
self._tester.LoginFacebookUser(self._facebook_user_dict, user_cookie=cookie)
def testErrorFormat(self):
"""Test that error returned by the service handler is properly formed."""
ident_dict = {'key': 'Email:<EMAIL>', 'authority': 'FakeViewfinder'}
auth_info_dict = {'identity': ident_dict['key']}
url = self._tester.GetUrl('/login/viewfinder')
request_dict = _CreateRegisterRequest(self._mobile_device_dict, auth_info_dict, synchronous=False)
response = _SendAuthRequest(self._tester, url, 'POST', request_dict=request_dict, allow_errors=[403])
self.assertEqual(json.loads(response.body),
{'error': {'id': 'NO_USER_ACCOUNT',
'method': 'login',
'message': 'We can\'t find your Viewfinder account. Are you sure you used ' +
'<EMAIL> to sign up?'}})
def testLoginWithProspective(self):
"""ERROR: Try to log into a prospective user account."""
self.assertRaisesHttpError(403, self._tester.LoginViewfinderUser, self._register_user_dict)
def testLinkWithProspective(self):
"""ERROR: Try to link another identity to a prospective user."""
# Link with cookie from prospective user, using Facebook account that is not yet linked.
cookie = self._GetSecureUserCookie(self._prospective_user, self._prospective_user.webapp_dev_id)
self.assertRaisesHttpError(403, self._tester.LinkFacebookUser, self._facebook_user_dict, user_cookie=cookie)
def testLinkAlreadyLinked(self):
"""ERROR: Try to link a Google account that is already linked to a different Viewfinder account."""
user, device_id = self._tester.RegisterFacebookUser(self._facebook_user_dict)
facebook_cookie = self._GetSecureUserCookie(user, device_id)
self._tester.RegisterGoogleUser(self._google_user_dict)
self.assertRaisesHttpError(403, self._tester.LinkGoogleUser, self._google_user_dict,
self._mobile_device_dict, user_cookie=facebook_cookie)
def testUpdateFriendAttribute(self):
"""Update name of a user and ensure that each friend is notified."""
# Create a prospective user by sharing with an email.
vp_id, ep_ids = self._tester.ShareNew(self._cookie,
[(self._episode_id, self._photo_ids)],
['Email:<EMAIL>', self._user2.user_id])
# Register the user and verify friends are notified.
self._tester.RegisterGoogleUser(self._google_user_dict)
response_dict = self._tester.QueryNotifications(self._cookie2, 1, scan_forward=False)
self.assertEqual(response_dict['notifications'][0]['invalidate'], {u'users': [5]})
def testRegisterContact(self):
"""Register an identity that is the target of a contact, which will
be bound to a user_id as a result.
"""
# Create a contact.
user_dict = {'name': '<NAME>', 'email': '<EMAIL>', 'verified_email': True}
identity_key = 'Email:%s' % user_dict['email']
contact_dict = Contact.CreateContactDict(self._user.user_id,
[(identity_key, None)],
util._TEST_TIME,
Contact.GMAIL,
name=user_dict['name'])
self._UpdateOrAllocateDBObject(Contact, **contact_dict)
# Register the new user.
user, device_id = self._tester.RegisterGoogleUser(user_dict)
response_dict = self._tester.QueryNotifications(self._cookie, 1, scan_forward=False)
self.assertEqual([notify_dict['name'] for notify_dict in response_dict['notifications']],
['first register contact'])
def testRegisterProspectiveContact(self):
"""Register an identity that is the target of a contact (that is still a prospective user)."""
for user_id in [self._user.user_id, self._user2.user_id]:
# Create several contacts.
identity_key = 'Email:%s' % self._prospective_user.email
contact_dict = Contact.CreateContactDict(user_id,
[(identity_key, None)],
util._TEST_TIME,
Contact.GMAIL,
name='Mr. John')
self._UpdateOrAllocateDBObject(Contact, **contact_dict)
# Register the prospective user.
user, device_id = self._tester.RegisterViewfinderUser(self._register_user_dict)
# Expect friend & contact notifications.
response_dict = self._tester.QueryNotifications(self._cookie, 2, scan_forward=False)
self.assertEqual([notify_dict['name'] for notify_dict in response_dict['notifications']],
['register friend', 'first register contact'])
# Expect only contact notification.
response_dict = self._tester.QueryNotifications(self._cookie2, 1, scan_forward=False)
self.assertEqual([notify_dict['name'] for notify_dict in response_dict['notifications']],
['first register contact'])
# Expect only friend notification.
cookie = self._GetSecureUserCookie(self._prospective_user, self._prospective_user.webapp_dev_id)
response_dict = self._tester.QueryNotifications(cookie, 2, scan_forward=False)
self.assertEqual([notify_dict['name'] for notify_dict in response_dict['notifications']],
['register friend', 'share_new'])
def testNewIdentityOnly(self):
"""Register existing user and device, but create new identity via link."""
user, device_id = self._tester.RegisterGoogleUser(self._google_user_dict, self._mobile_device_dict)
cookie = self._GetSecureUserCookie(user, device_id)
self._mobile_device_dict['device_id'] = device_id
self._tester.LinkFacebookUser(self._facebook_user_dict, self._mobile_device_dict, cookie)
def testNewDeviceOnly(self):
"""Register existing user and identity, but create new device as part of login."""
self._tester.RegisterGoogleUser(self._google_user_dict)
self._tester.LoginGoogleUser(self._google_user_dict, self._mobile_device_dict)
def testDuplicateToken(self):
"""Register device with push token that is already in use by another device."""
self._tester.RegisterGoogleUser(self._google_user_dict, self._mobile_device_dict)
self._tester.RegisterFacebookUser(self._facebook_user_dict, self._mobile_device_dict)
def testAsyncRequest(self):
"""Send async register request."""
ident_dict = {'key': 'Email:<EMAIL>', 'authority': 'FakeViewfinder'}
auth_info_dict = {'identity': ident_dict['key']}
url = self._tester.GetUrl('/link/fakeviewfinder')
request_dict = _CreateRegisterRequest(self._mobile_device_dict, auth_info_dict, synchronous=False)
response = _SendAuthRequest(self._tester, url, 'POST', request_dict=request_dict, user_cookie=self._cookie)
response_dict = json.loads(response.body)
self._validate = False
# Wait until notification is written by the background fetch_contacts op.
while True:
notification = self._RunAsync(Notification.QueryLast, self._client, response_dict['user_id'])
if notification.name == 'fetch_contacts':
self.assertEqual(notification.op_id, response_dict['headers']['op_id'])
break
self._RunAsync(IOLoop.current().add_timeout, time.time() + .1)
def testDeviceNoUser(self):
"""ERROR: Try to register existing device without existing user."""
user, device_id = self._tester.RegisterGoogleUser(self._google_user_dict, self._mobile_device_dict)
self._mobile_device_dict['device_id'] = device_id
self.assertRaisesHttpError(403, self._tester.RegisterFacebookUser, self._facebook_user_dict,
self._mobile_device_dict)
def testDeviceNotOwned(self):
"""ERROR: Try to register existing device that is not owned by the
existing user.
"""
self._tester.RegisterGoogleUser(self._google_user_dict, self._mobile_device_dict)
self._mobile_device_dict['device_id'] = 1000
self.assertRaisesHttpError(403, self._tester.RegisterGoogleUser, self._google_user_dict,
self._mobile_device_dict)
def testRegisterFreezeNewAccounts(self):
"""ERROR: Verify that attempt to register fails if --freeze_new_accounts
is true. This is the kill switch the server can throw to stop the tide
of incoming account registrations.
"""
options.options.freeze_new_accounts = True
exc = self.assertRaisesHttpError(403, self._tester.RegisterGoogleUser, self._google_user_dict,
self._mobile_device_dict)
error_dict = json.loads(exc.response.body)
self.assertEqual(error_dict['error']['message'], auth._FREEZE_NEW_ACCOUNTS_MESSAGE)
self.assertRaisesHttpError(403, self._tester.RegisterFacebookUser, self._facebook_user_dict)
def testLoginWithUnboundIdentity(self):
"""ERROR: Try to login with an identity that exists, but is not bound to a user."""
self._UpdateOrAllocateDBObject(Identity, key='Email:<EMAIL>')
self.assertRaisesHttpError(403,
self._tester.LoginViewfinderUser,
self._viewfinder_user_dict,
self._mobile_device_dict)
def testBadRequest(self):
"""ERROR: Verify that various malformed and missing register fields result
in a bad request (400) error.
"""
# Missing request dict.
url = self.get_url('/register/facebook') + '?' + urllib.urlencode({'access_token': 'dummy'})
self.assertRaisesHttpError(400, _SendAuthRequest, self._tester, url, 'POST', request_dict='')
# Malformed request dict.
self.assertRaisesHttpError(400, _SendAuthRequest, self._tester, url, 'POST', request_dict={'device': 'foo'})
def testRegisterExisting(self):
"""ERROR: Try to register a user that already exists."""
self._tester.RegisterViewfinderUser(self._viewfinder_user_dict)
self.assertRaisesHttpError(403,
self._tester.RegisterViewfinderUser,
self._viewfinder_user_dict,
self._mobile_device_dict)
def testLogout(self):
"""Ensure that logout sends back a cookie with an expiration time."""
url = self._tester.GetUrl('/logout')
response = _SendAuthRequest(self._tester, url, 'GET', user_cookie=self._cookie)
self.assertEqual(response.code, 302)
self.assertEqual(response.headers['location'], '/')
self.assertIn('user', response.headers['Set-Cookie'])
self.assertIn('expires', response.headers['Set-Cookie'])
self.assertIn('Domain', response.headers['Set-Cookie'])
def testSessionCookie(self):
"""Test "use_session_cookie" option in auth request."""
# First register a user, requesting a session cookie.
auth_info_dict = {'identity': 'Email:<EMAIL>',
'name': '<NAME>',
'given_name': 'Andy',
'password': '<PASSWORD>'}
url = self._tester.GetUrl('/register/viewfinder')
request_dict = _CreateRegisterRequest(None, auth_info_dict)
response = _SendAuthRequest(self._tester, url, 'POST', request_dict=request_dict)
self.assertNotIn('Set-Cookie', response.headers)
identity = self._tester._RunAsync(Identity.Query, self._client, auth_info_dict['identity'], None)
url = self._tester.GetUrl('/verify/viewfinder')
request_dict = {'headers': {'version': message.MAX_SUPPORTED_MESSAGE_VERSION,
'synchronous': True},
'identity': identity.key,
'access_token': identity.access_token,
'use_session_cookie': True}
response = _SendAuthRequest(self._tester, url, 'POST', request_dict=request_dict)
self.assertNotIn('expires', response.headers['Set-Cookie'])
cookie_user_dict = self._tester.DecodeUserCookie(self._tester.GetCookieFromResponse(response))
self.assertTrue(cookie_user_dict.get('is_session_cookie', False))
# Now log in and request a session cookie.
del auth_info_dict['name']
del auth_info_dict['given_name']
url = self._tester.GetUrl('/login/viewfinder')
request_dict = _CreateRegisterRequest(None, auth_info_dict, synchronous=False)
response = _SendAuthRequest(self._tester, url, 'POST', request_dict=request_dict)
self.assertIn('expires', response.headers['Set-Cookie'])
request_dict['use_session_cookie'] = True
response = _SendAuthRequest(self._tester, url, 'POST', request_dict=request_dict)
self.assertNotIn('expires', response.headers['Set-Cookie'])
cookie = self._tester.GetCookieFromResponse(response)
cookie_user_dict = self._tester.DecodeUserCookie(cookie)
self.assertTrue(cookie_user_dict.get('is_session_cookie', False))
# Now use the session cookie to make a service request and verify it's preserved.
request_dict = {'headers': {'version': message.MAX_SUPPORTED_MESSAGE_VERSION, 'synchronous': True}}
headers = {'Content-Type': 'application/json',
'X-Xsrftoken': 'fake_xsrf',
'Cookie': '_xsrf=fake_xsrf;user=%s' % cookie}
response = self._RunAsync(self.http_client.fetch,
self._tester.GetUrl('/service/query_followed'),
method='POST',
body=json.dumps(request_dict),
headers=headers)
cookie_user_dict = self._tester.DecodeUserCookie(self._tester.GetCookieFromResponse(response))
self.assertTrue(cookie_user_dict.get('is_session_cookie', False))
def _CreateRegisterRequest(device_dict=None, auth_info_dict=None, synchronous=True,
version=message.MAX_SUPPORTED_MESSAGE_VERSION):
"""Returns a new AUTH_REQUEST dict that has been populated with information from the
specified dicts.
"""
request_dict = {'headers': {'version': version}}
util.SetIfNotNone(request_dict, 'device', device_dict)
util.SetIfNotNone(request_dict, 'auth_info', auth_info_dict)
if synchronous:
request_dict['headers']['synchronous'] = True
return request_dict
def _AddMockJSONResponse(mock_client, url, response_dict):
"""Add a mapping entry to the mock client such that requests to
"url" will return an HTTP response containing the JSON-formatted
"response_dict".
"""
def _CreateResponse(request):
return httpclient.HTTPResponse(request, 200,
headers={'Content-Type': 'application/json'},
buffer=StringIO(json.dumps(response_dict)))
mock_client.map(url, _CreateResponse)
def _SendAuthRequest(tester, url, http_method, user_cookie=None, request_dict=None, allow_errors=None):
"""Sends request to auth service. If "request_dict" is defined, dumps it as a JSON body.
If "user_cookie" is defined, automatically adds a "Cookie" header. Raises an HTTPError if
an HTTP error is returned, unless the error code is part of the "allow_errors" set. Returns
the HTTP response object on success.
"""
headers = {'Content-Type': 'application/json',
'Content-Encoding': 'gzip'}
if user_cookie is not None:
headers['Cookie'] = 'user=%s' % user_cookie
# All requests are expected to have xsrf cookie/header.
headers['X-Xsrftoken'] = 'fake_xsrf'
headers['Cookie'] = headers['Cookie'] + ';_xsrf=fake_xsrf' if headers.has_key('Cookie') else '_xsrf=fake_xsrf'
with mock.patch.object(FetchContactsOperation, '_SKIP_UPDATE_FOR_TEST', True):
response = tester._RunAsync(tester.http_client.fetch, url, method=http_method,
body=None if request_dict is None else GzipEncode(json.dumps(request_dict)),
headers=headers, follow_redirects=False)
if response.code >= 400:
if allow_errors is None or response.code not in allow_errors:
response.rethrow()
return response
def _AuthFacebookOrGoogleUser(tester, action, user_dict, ident_dict, device_dict, user_cookie):
"""Registers a user, identity, and device using the auth web service. The interface to Facebook
or Google is mocked, with the contents of "user_dict" returned in lieu of what the real service
would return. If "device_dict" is None, then simulates the web experience; else simulates the
mobile device experience. If "user_cookie" is not None, then simulates case where calling user
is already logged in when registering the new user. Returns the HTTP response that was returned
by the auth service.
"""
if device_dict is None:
# Web client.
url = tester.GetUrl('/%s/%s' % (action, ident_dict['authority'].lower()))
response = _SendAuthRequest(tester, url, 'GET', user_cookie=user_cookie)
assert response.code == 302, response.code
# Invoke authentication again, this time sending code."""
url = tester.GetUrl('/%s/%s?code=code' % (action, ident_dict['authority'].lower()))
response = _SendAuthRequest(tester, url, 'GET', user_cookie=user_cookie)
assert response.code == 302, response.code
assert response.headers['location'].startswith('/view')
else:
if ident_dict['authority'] == 'Facebook':
url = tester.GetUrl('/%s/facebook?access_token=access_token' % action)
else:
url = tester.GetUrl('/%s/google?refresh_token=refresh_token' % action)
request_dict = _CreateRegisterRequest(device_dict)
response = _SendAuthRequest(tester, url, 'POST', user_cookie=user_cookie, request_dict=request_dict)
return response
def _ValidateAuthUser(tester, action, user_dict, ident_dict, device_dict, user_cookie, auth_response):
"""Validates an auth action that has taken place and resulted in the HTTP response given
by "auth_response".
"""
validator = tester.validator
# Validate the response from a GET (device_dict is None) or POST to auth service.
if device_dict is None:
# Get the id of the user that should have been created by the registration.
actual_identity = tester._RunAsync(Identity.Query, validator.client, ident_dict['key'], None)
actual_user_id = actual_identity.user_id
else:
# Extract the user_id and device_id from the JSON response.
response_dict = json.loads(auth_response.body)
actual_op_id = response_dict['headers']['op_id']
actual_user_id = response_dict['user_id']
actual_device_id = response_dict.get('device_id', None)
# Verify that the cookie in the response contains the correct information.
cookie_user_dict = tester.DecodeUserCookie(tester.GetCookieFromResponse(auth_response))
assert cookie_user_dict['user_id'] == actual_user_id, (cookie_user_dict, actual_user_id)
assert device_dict is None or 'device_id' not in device_dict or \
cookie_user_dict['device_id'] == device_dict['device_id'], \
(cookie_user_dict, device_dict)
actual_user = tester._RunAsync(User.Query, validator.client, actual_user_id, None)
if device_dict is None:
# If no mobile device was used, then web device id is expected.
actual_device_id = actual_user.webapp_dev_id
# Get notifications that were created. There could be up to 2: a register_user notification and
# a fetch_contacts notification (in link case).
notification_list = tester._RunAsync(Notification.RangeQuery,
tester.validator.client,
actual_user_id,
range_desc=None,
limit=3,
col_names=None,
scan_forward=False)
if device_dict is None:
actual_op_id = notification_list[1 if action == 'link' else 0].op_id
# Determine what the registered user's id should have been.
if user_cookie is None or action != 'link':
expected_user_id = None
else:
expected_user_id, device_id = tester.GetIdsFromCookie(user_cookie)
expected_identity = validator.GetModelObject(Identity, ident_dict['key'], must_exist=False)
if expected_identity is not None:
# Identity already existed, so expect registered user's id to equal the user id of that identity.
expected_user_id = expected_identity.user_id
# Verify that identity is linked to expected user.
assert expected_user_id is None or expected_user_id == actual_user_id, \
(expected_user_id, actual_user_id)
# Validate the device if it should have been created.
if device_dict is None:
expected_device_dict = None
else:
expected_device_dict = deepcopy(device_dict)
if 'device_id' not in device_dict:
expected_device_dict['device_id'] = actual_device_id
# Re-map picture element for Facebook authority (Facebook changed format in Oct 2012).
scratch_user_dict = deepcopy(user_dict)
if ident_dict['authority'] == 'Facebook':
if device_dict is None:
scratch_user_dict['session_expires'] = ['3600']
if 'picture' in scratch_user_dict:
scratch_user_dict['picture'] = scratch_user_dict['picture']['data']['url']
elif ident_dict['authority'] == 'Viewfinder' and action != 'register':
# Only use name in registration case.
scratch_user_dict.pop('name', None)
# Validate the Identity object.
expected_ident_dict = deepcopy(ident_dict)
expected_ident_dict.pop('json_attrs', None)
if ident_dict['authority'] == 'Viewfinder':
identity = tester._RunAsync(Identity.Query, tester.validator.client, ident_dict['key'], None)
expected_ident_dict['access_token'] = identity.access_token
expected_ident_dict['expires'] = identity.expires
# Validate the User object.
expected_user_dict = {}
before_user = validator.GetModelObject(User, actual_user_id, must_exist=False)
before_user_dict = {} if before_user is None else before_user._asdict()
for k, v in scratch_user_dict.items():
user_key = auth.AuthHandler._AUTH_ATTRIBUTE_MAP.get(k, None)
if user_key is not None:
if before_user is None or getattr(before_user, user_key) is None:
expected_user_dict[auth.AuthHandler._AUTH_ATTRIBUTE_MAP[k]] = v
# Set facebook email if it has not yet been set.
if user_key == 'email' and ident_dict['authority'] == 'Facebook':
if before_user is None or getattr(before_user, 'facebook_email') is None:
expected_user_dict['facebook_email'] = v
expected_user_dict['user_id'] = actual_user_id
expected_user_dict['webapp_dev_id'] = actual_user.webapp_dev_id
op_dict = {'op_timestamp': util._TEST_TIME,
'op_id': notification_list[1 if action == 'link' else 0].op_id,
'user_id': actual_user_id,
'device_id': actual_device_id}
if expected_device_dict:
expected_device_dict.pop('device_uuid', None)
expected_device_dict.pop('test_udid', None)
is_prospective = before_user is None or not before_user.IsRegistered()
validator.ValidateUpdateUser('first register contact' if is_prospective else 'link contact',
op_dict,
expected_user_dict,
expected_ident_dict,
device_dict=expected_device_dict)
after_user_dict = validator.GetModelObject(User, actual_user_id)._asdict()
if expected_identity is not None:
expected_ident_dict['user_id'] = expected_identity.user_id
if action == 'link':
ignored_keys = ['user_id', 'webapp_dev_id']
if 'user_id' not in expected_ident_dict and all(k in ignored_keys for k in expected_user_dict.keys()):
# Only notify self if it hasn't been done through Friends.
validator.ValidateUserNotification('register friend self', actual_user_id, op_dict)
# Validate fetch_contacts notification.
op_dict['op_id'] = notification_list[0].op_id
invalidate = {'contacts': {'start_key': Contact.CreateSortKey(None, util._TEST_TIME)}}
validator.ValidateNotification('fetch_contacts', actual_user_id, op_dict, invalidate)
return actual_user, actual_device_id if device_dict is not None else None
|
spider/python/word.py | ferryhang/spider_job | 322 | 12773026 | import jieba #分词库
import jieba.analyse
import pymongo
import redis
import os
import re
import json
client = pymongo.MongoClient(host="127.0.0.1", port=27017)
db = client['job']
collection = db['position']
data = collection.find({})
text = ""
for item in data:
text += item['body']
pwd = os.path.split(os.path.realpath(__file__))[0]
stopWord = pwd+'/stop.txt'
jieba.analyse.set_stop_words(stopWord)
cut_text= jieba.cut(text)
it_text = dict({})
for x in cut_text:
G = re.match('[a-zA-Z]+',x)
if G:
key = G.group()
keys = map(lambda x: x.lower(), it_text.keys())
if key.lower() in keys:
it_text[key.lower()] += 1
else:
it_text[key.lower()] = 1
with open("word.json","w+",encoding="utf-8") as file:
data = file.write(json.dumps((it_text)))
result= "/".join(cut_text)#必须给个符号分隔开分词结果来形成字符串,否则不能绘制词云
data = jieba.analyse.extract_tags(result.replace('/',''), withWeight=False, allowPOS=())
#print(",".join(data))
|
migration/migrator/migrations/course/20190710182514_add_view_date_for_teams.py | elihschiff/Submitty | 411 | 12773100 | """Migration for a given Submitty course database."""
def up(config, database, semester, course):
if not database.table_has_column('teams', 'last_viewed_time'):
database.execute('ALTER TABLE teams ADD COLUMN last_viewed_time timestamp with time zone')
def down(config, database, semester, course):
pass
|
util/build_benchmarks_page.py | wenq1/duktape | 4,268 | 12773101 | #!/usr/bin/env python2
import os
import sys
import re
import json
def main():
# Adapt manually.
duk = '/usr/local/bin/duk'
lzstring = '/home/duktape/duktape/lz-string/libs/lz-string.js'
duktape_repo = '/home/duktape/duktape'
duktape_testrunner_repo = '/home/duktape/duktape-testrunner'
duktape_testclient_config = '/home/duktape/duktape-testclient-config.yaml'
benchmarks_template = '/home/duktape/duktape/website/benchmarks.html'
merge_count = 1000
# Get the hashes we're interested in, in increasing merge order.
# os.system('cd %s && git pull --rebase' % duktape_repo)
os.system('cd %s && git log -n %d --merges --oneline --decorate=no --pretty=format:%%H > /tmp/tmp-hashes.txt' % (duktape_repo, merge_count))
hashes = []
with open('/tmp/tmp-hashes.txt', 'rb') as f:
for line in f:
line = line.strip()
if line != '':
hashes.append(line)
hashes.reverse()
print('%d hashes found' % len(hashes))
# Get any release tags matching the hashes for annotations.
re_release_tag = re.compile('^v\d+\.\d+\.\d+$')
annotations = []
for x,h in enumerate(hashes):
os.system('cd %s && git tag -l --points-at %s > /tmp/tmp-taglog.txt' % (duktape_repo, h))
with open('/tmp/tmp-taglog.txt', 'rb') as f:
for line in f:
line = line.strip()
m = re_release_tag.match(line)
if m is None:
continue
annotations.append({ 'x': x, 'tag': line })
print(json.dumps(annotations, indent=4))
# Get test data for hashed, and pack it into a JSON object embedded
# into the page.
req = { 'repo_full': 'svaarala/duktape', 'sha_list': hashes }
with open('/tmp/tmp-request.json', 'wb') as f:
f.write(json.dumps(req))
os.system('cd %s && cd client-simple-node && nodejs client.js --request-uri /query-commit-simple --config %s --request-file /tmp/tmp-request.json --output-file /tmp/tmp-result.json' % (duktape_testrunner_repo, duktape_testclient_config))
with open('/tmp/tmp-result.json', 'rb') as f:
data = json.loads(f.read())
for commit in data:
for run in commit.get('runs', []):
# Censor some fields which take a lot of space
if run.has_key('output_uri'):
del run['output_uri']
if run.has_key('result') and run['result'].has_key('traceback'):
del run['result']['traceback']
doc = {
'commit_simples': data,
'annotations': annotations
}
with open('/tmp/tmp-graphdata.json', 'wb') as f:
f.write(json.dumps(doc))
# There's a lot of JSON data so use http://pieroxy.net/blog/pages/lz-string/index.html
# to compress it. 'duk' executable can be used to compress data.
with open('/tmp/tmp-script.js', 'wb') as f:
f.write('''
var input = new TextDecoder().decode(readFile('/tmp/tmp-graphdata.json'));
var compressed = LZString.compressToBase64(input);
writeFile('/tmp/tmp-graphdata-compressed.txt', compressed);
''')
os.system('%s %s /tmp/tmp-script.js' % (duk, lzstring))
with open('/tmp/tmp-graphdata-compressed.txt', 'rb') as f:
graphdata = f.read()
# Embed the compressed data into the benchmarks.html template.
with open(benchmarks_template, 'rb') as f:
page = f.read()
page = page.replace('<!-- @DATA@ -->', \
'var rawGraphDataCompressed = "' + graphdata + '";')
with open('/tmp/benchmarks.html', 'wb') as f:
f.write(page)
# Done!
print('done')
if __name__ == '__main__':
main()
|
oandapyV20-examples-master/src/streaming_trans.py | cdibble2011/OANDA | 127 | 12773115 | # -*- coding: utf-8 -*-
"""Simple demo of streaming transaction data."""
from oandapyV20 import API
from oandapyV20.exceptions import V20Error, StreamTerminated
from oandapyV20.endpoints.transactions import TransactionsStream
from exampleauth import exampleAuth
accountID, access_token = exampleAuth()
api = API(access_token=access_token, environment="practice")
s = TransactionsStream(accountID=accountID)
MAXTRANS = 10
print("read from stream until {} transactions received".format(MAXTRANS))
try:
n = 0
for R in api.request(s):
print(R)
n += 1
if n > MAXTRANS:
s.terminate("max transactions received")
except StreamTerminated as e:
print("{}".format(e))
except V20Error as e:
print("Error: {}".format(e))
|
office365/onedrive/driveitems/thumbnail.py | rikeshtailor/Office365-REST-Python-Client | 544 | 12773144 | <reponame>rikeshtailor/Office365-REST-Python-Client
from office365.runtime.client_value import ClientValue
class Thumbnail(ClientValue):
"""
The thumbnail resource type represents a thumbnail for an image, video, document,
or any item that has a bitmap representation.
"""
pass
|
kivy/input/provider.py | sirpercival/kivy | 317 | 12773195 | '''
Motion Event Provider
=====================
Abstract class for the implemention of a
:class:`~kivy.input.motionevent.MotionEvent`
provider. The implementation must support the
:meth:`~MotionEventProvider.start`, :meth:`~MotionEventProvider.stop` and
:meth:`~MotionEventProvider.update` methods.
'''
__all__ = ('MotionEventProvider', )
class MotionEventProvider(object):
'''Base class for a provider.
'''
def __init__(self, device, args):
self.device = device
if self.__class__ == MotionEventProvider:
raise NotImplementedError('class MotionEventProvider is abstract')
def start(self):
'''Start the provider. This method is automatically called when the
application is started and if the configuration uses the current
provider.
'''
pass
def stop(self):
'''Stop the provider.
'''
pass
def update(self, dispatch_fn):
'''Update the provider and dispatch all the new touch events though the
`dispatch_fn` argument.
'''
pass
|
easy_maps/admin.py | cyber-barrista/django-easy-maps | 114 | 12773205 | # -*- coding: utf-8 -*-
from django import forms
from django.contrib import admin
from django.utils.translation import gettext_lazy as _
from .models import Address
from .widgets import AddressWithMapWidget
class HasExceptionFilter(admin.SimpleListFilter):
title = _("exception")
parameter_name = "has_exception"
def lookups(self, request, model_admin):
return (
(1, _("Yes")),
(0, _("No")),
)
def queryset(self, request, queryset):
if self.value() is not None:
ids = Address.objects.values_list("pk", flat=True)
if self.value() == "1":
return queryset.filter(pk__in=ids)
elif self.value() == "0":
return queryset.exclude(pk__in=ids)
return queryset
class AddressAdmin(admin.ModelAdmin):
list_display = ["address", "computed_address", "latitude", "longitude", "has_exception"]
list_filter = [HasExceptionFilter]
search_fields = ["address"]
class form(forms.ModelForm):
class Meta:
widgets = {"address": AddressWithMapWidget({"class": "vTextField"})}
|
scripts/beam_decode.py | quangvy2703/Up-Down-Captioner | 232 | 12773210 | #!/usr/bin/env python
"""
Decode a language model using one GPU.
"""
import numpy as np
import argparse
import sys
import json
import caffe
from evaluate import CaptionScorer
from util import restore_weights
def translate(vocab, blob):
caption = "";
w = 0;
while True:
next_word = vocab[int(blob[w])]
if w == 0:
next_word = next_word.title()
if w > 0 and next_word != "." and next_word != ",":
caption += " ";
if next_word == "\"" or next_word[0] == '"':
caption += "\\"; # Escape
caption += next_word;
w += 1
if caption[-1] == '.' or w == len(blob):
break
return caption
def beam_decode(
model, # net proto definition
vocab_file, # model vocab text file
weights, # pretrained weights to use
gpu, # device id
outfile, # json output
):
vocab = []
with open(vocab_file) as f:
for word in f:
vocab.append(word.strip())
print 'Loaded {:,} words into caption vocab'.format(len(vocab))
caffe.init_log(0, 1)
caffe.log('Using device %s' % str(gpu))
caffe.set_device(int(gpu))
caffe.set_mode_gpu()
net = caffe.Net(model, weights, caffe.TEST)
print 'Loaded proto {} with weights {}'.format(model,weights)
net.layers[0].load_dataset()
id_to_caption = {}
iteration = 0
while True:
ending = False
out = net.forward()
image_ids = net.blobs['image_id'].data
captions = net.blobs['caption'].data
scores = net.blobs['log_prob'].data
batch_size = image_ids.shape[0]
if captions.shape[0] == batch_size:
# Decoding a compact net
beam_size = captions.shape[2]
for n in range(batch_size):
if iteration == 0:
print "\nhttp://mscoco.org/explore/?id=%d" % image_ids[n][0]
for b in range(beam_size):
cap = translate(vocab, captions[n][0][b])
score = scores[n][0][b]
if iteration == 0:
print '[%d] %.2f %s' % (b,score,cap)
else:
# Decoding an unrolled net
beam_size = captions.shape[0] / batch_size
if iteration == 0:
print "Beam size: %d" % beam_size
for n in range(batch_size):
image_id = int(image_ids[n][0])
if iteration == 0:
print "\nhttp://mscoco.org/explore/?id=%d" % image_id
for b in range(beam_size):
cap = translate(vocab, captions[n*beam_size+b])
score = scores[n*beam_size+b]
if b == 0:
if image_id in id_to_caption:
ending = True
else:
id_to_caption[image_id] = cap
if iteration == 0:
print '[%d] %.2f %s' % (b,score,cap)
iteration += 1
if iteration % 1000 == 0:
print 'Iteration: %d' % iteration
if ending:
break
output = []
for image_id in sorted(id_to_caption.keys()):
output.append({
'image_id': image_id,
'caption': id_to_caption[image_id]
})
with open(outfile, 'w') as f:
json.dump(output,f)
print 'Generated %d outputs, saving to %s' % (len(output),outfile)
s = CaptionScorer()
s.score(outfile)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--model", required=True, help="Net proto definition.")
parser.add_argument("--weights", help="Pretrained weights.")
parser.add_argument("--gpu", type=int, default=0, help="Device id.")
parser.add_argument("--vocab", required=True, help="Vocab file.")
parser.add_argument("--outfile", required=True, help="Output file path.")
args = parser.parse_args()
restore_weights(args.weights)
beam_decode(args.model, args.vocab, args.weights, args.gpu, args.outfile)
|
tests/dicts/test_benedict_casting.py | next-franciscoalgaba/python-benedict | 365 | 12773215 | # -*- coding: utf-8 -*-
from benedict import benedict
import unittest
class benedict_casting_test_case(unittest.TestCase):
def test__getitem__(self):
d = {
'a': 1,
'b': {
'c': {
'd': 2,
},
},
}
b = benedict(d)
c = b['b.c']
self.assertTrue(isinstance(c, benedict))
self.assertEqual(type(c), benedict)
self.assertTrue(c == d['b']['c'])
self.assertFalse(c is d['b']['c'])
def test_cast_dict_to_benedict(self):
d = {
'a': 1,
'b': {
'c': {
'd': 2,
},
},
}
b = benedict(d)
bb = benedict(b)
bbd = bb.dict()
self.assertTrue(isinstance(bbd, dict))
self.assertFalse(isinstance(bbd, benedict))
self.assertEqual(d, bbd)
self.assertTrue(d is bbd)
def test_cast_benedict_to_dict(self):
b = benedict({
'a': 1,
'b': {
'c': {
'd': 2,
},
},
})
# d1 = dict(**b)
# print(d1)
d = dict(b)
self.assertTrue(isinstance(d, dict))
self.assertEqual(type(d), dict)
self.assertEqual(b, d)
self.assertFalse(b is d)
d = dict(b)
self.assertTrue(isinstance(d, dict))
self.assertEqual(type(d), dict)
self.assertEqual(b, d)
self.assertFalse(b is d)
def test_cast_benedict_kwargs_to_dict(self):
b = benedict({
'a': 1,
'b': {
'c': {
'd': 2,
},
},
})
d = dict(**b)
self.assertTrue(isinstance(d, dict))
self.assertEqual(type(d), dict)
self.assertEqual(b, d)
self.assertFalse(b is d)
def test_dict(self):
d = {
'a': 1,
'b': {
'c': {
'd': 2,
},
},
}
b = benedict(d)
bd = b.dict()
self.assertTrue(isinstance(bd, dict))
self.assertFalse(isinstance(bd, benedict))
self.assertTrue(d == bd)
self.assertTrue(d is bd)
def test_get(self):
d = {
'a': 1,
'b': {
'c': {
'd': 2,
},
},
}
b = benedict(d)
c = b.get('b.c')
self.assertTrue(isinstance(c, benedict))
self.assertEqual(type(c), benedict)
self.assertTrue(c == d['b']['c'])
self.assertFalse(c is d['b']['c'])
def test_get_dict(self):
d = {
'a': 1,
'b': {
'c': {
'd': 2,
},
},
}
b = benedict(d)
c = b.get_dict('b.c')
self.assertTrue(isinstance(c, benedict))
self.assertEqual(type(c), benedict)
self.assertTrue(c == d['b']['c'])
self.assertFalse(c is d['b']['c'])
def test_get_list_item(self):
d = {
'a': 1,
'b': {
'c': [
{ 'd': 2, },
{ 'e': 3, },
{ 'f': 4, },
]
},
}
b = benedict(d)
c = b.get_list_item('b.c', 1)
self.assertTrue(isinstance(c, benedict))
self.assertEqual(type(c), benedict)
self.assertTrue(c == d['b']['c'][1])
self.assertFalse(c is d['b']['c'][1])
def test_pop(self):
d = {
'a': 1,
'b': {
'c': {
'd': 2,
},
},
}
b = benedict(d)
c = b.pop('b.c')
self.assertTrue(isinstance(c, benedict))
self.assertEqual(type(c), benedict)
with self.assertRaises(KeyError):
d['b']['c']
|
tests/list_view.py | Aloxaf/moshmosh | 114 | 12773217 | <gh_stars>100-1000
from moshmosh.extensions.pattern_matching.runtime import ListView
v = ListView([1, 2, 3, 5], range(1, 4))
v.sort(reverse=True)
assert v == [5, 3, 2]
v.sort(reverse=False)
assert v == [2, 3, 5]
v.sort(reverse=False, key=lambda x: -x)
assert v == [5, 3, 2]
assert isinstance(v, list) |
recipes/Python/577233_Adding_directory_pythexecutable_system_PATH/recipe-577233.py | tdiprima/code | 2,023 | 12773248 | """
a small program to run after the installation of python on windows
adds the directory path to the python executable to the PATH env. variable
with optional parameter remove, removes it
you have to open a new command prompt to see the effects (echo %PATH%)
"""
import sys
import os
import time
import _winreg
import ctypes
def extend(pypath):
'''
extend(pypath) adds pypath to the PATH env. variable as defined in the
registry, and then notifies applications (e.g. the desktop) of this change.
Already opened DOS-Command prompt are not updated. Newly opened will have the
new path (inherited from the updated windows explorer desktop)
'''
hKey = _winreg.OpenKey (_winreg.HKEY_LOCAL_MACHINE,
r'SYSTEM\CurrentControlSet\Control\Session Manager\Environment',
0, _winreg.KEY_READ | _winreg.KEY_SET_VALUE)
value, typ = _winreg.QueryValueEx (hKey, "PATH")
vals = value.split(';')
assert isinstance(vals, list)
if len(sys.argv) > 1 and sys.argv[1] == 'remove':
try:
vals.remove(pypath)
except ValueError:
print 'path element', pypath, 'not found'
return
print 'removing from PATH:', pypath
else:
if pypath in vals:
print 'path element', pypath, 'already in PATH'
return
vals.append(pypath)
print 'adding to PATH:', pypath
_winreg.SetValueEx(hKey, "PATH", 0, typ, ';'.join(vals) )
_winreg.FlushKey(hKey)
# notify other programs
SendMessage = ctypes.windll.user32.SendMessageW
HWND_BROADCAST = 0xFFFF
WM_SETTINGCHANGE = 0x1A
SendMessage(HWND_BROADCAST, WM_SETTINGCHANGE, 0, u'Environment')
def find_python():
'''
retrieves the commandline for .py extensions from the registry
'''
hKey = _winreg.OpenKey(_winreg.HKEY_CLASSES_ROOT,
r'Python.File\shell\open\command')
# get the default value
value, typ = _winreg.QueryValueEx (hKey, None)
program = value.split('"')[1]
if not program.lower().endswith(r'\python.exe'):
return None
return os.path.dirname(program)
pypath=find_python()
extend(pypath)
|
japonicus/interface.py | mczero80/japonicus | 229 | 12773252 | <reponame>mczero80/japonicus
#!/bin/python
import evaluation
def showTitleDisclaimer(backtestsettings, VERSION):
TITLE = """
██╗ █████╗ ██████╗ ██████╗ ███╗ ██╗██╗ ██████╗██╗ ██╗███████╗
██║██╔══██╗██╔══██╗██╔═══██╗████╗ ██║██║██╔════╝██║ ██║██╔════╝
██║███████║██████╔╝██║ ██║██╔██╗ ██║██║██║ ██║ ██║███████╗
██ ██║██╔══██║██╔═══╝ ██║ ██║██║╚██╗██║██║██║ ██║ ██║╚════██║
╚█████╔╝██║ ██║██║ ╚██████╔╝██║ ╚████║██║╚██████╗╚██████╔╝███████║
╚════╝ ╚═╝ ╚═╝╚═╝ ╚═════╝ ╚═╝ ╚═══╝╚═╝ ╚═════╝ ╚═════╝ ╚══════╝
"""
TITLE += "\t EVOLUTIONARY GENETIC ALGORITHMS"
try:
print(TITLE, end="")
except UnicodeEncodeError or SyntaxError:
print("\nJAPONICUS\n")
print('\t' * 4 + 'v%.2f' % VERSION)
print()
profitDisclaimer = "The profits reported here depends on backtest interpreter function;"
interpreterFuncName = backtestsettings['interpreteBacktestProfit']
interpreterInfo = evaluation.gekko.backtest.getInterpreterBacktestInfo(
interpreterFuncName)
print("%s \n\t%s\n" % (profitDisclaimer, interpreterInfo))
|
experiments/segmentation/unet_mcdropout_pascal.py | ElementAI/baal | 575 | 12773260 | import argparse
from copy import deepcopy
from pprint import pprint
import torch.backends
from PIL import Image
from torch import optim
from torchvision.transforms import transforms
from tqdm import tqdm
from baal import get_heuristic, ActiveLearningLoop
from baal.bayesian.dropout import MCDropoutModule
from baal import ModelWrapper
from baal import ClassificationReport
from baal import PILToLongTensor
from utils import pascal_voc_ids, active_pascal, add_dropout, FocalLoss
try:
import segmentation_models_pytorch as smp
except ImportError:
raise Exception('This example requires `smp`.\n pip install segmentation_models_pytorch')
import torch
import torch.nn.functional as F
import numpy as np
def mean_regions(n, grid_size=16):
# Compute the mean uncertainty per regions.
# [batch_size, W, H]
n = torch.from_numpy(n[:, None, ...])
# [Batch_size, 1, grid, grid]
out = F.adaptive_avg_pool2d(n, grid_size)
return np.mean(out.view([-1, grid_size ** 2]).numpy(), -1)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--al_step", default=200, type=int)
parser.add_argument("--batch_size", default=8, type=int)
parser.add_argument("--initial_pool", default=40, type=int)
parser.add_argument("--n_data_to_label", default=20, type=int)
parser.add_argument("--lr", default=0.001)
parser.add_argument("--heuristic", default="random", type=str)
parser.add_argument("--reduce", default="sum", type=str)
parser.add_argument("--data_path", default="/data", type=str)
parser.add_argument("--iterations", default=20, type=int)
parser.add_argument("--learning_epoch", default=50, type=int)
return parser.parse_args()
def get_datasets(initial_pool, path):
IM_SIZE = 224
# TODO add better data augmentation scheme.
transform = transforms.Compose(
[transforms.Resize(512), transforms.CenterCrop(IM_SIZE), transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), ])
test_transform = transforms.Compose(
[transforms.Resize(512), transforms.CenterCrop(IM_SIZE), transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), ])
target_transform = transforms.Compose(
[transforms.Resize(512, interpolation=Image.NEAREST), transforms.CenterCrop(IM_SIZE),
PILToLongTensor(pascal_voc_ids)])
active_set, test_set = active_pascal(path=path,
transform=transform,
test_transform=test_transform,
target_transform=target_transform)
active_set.label_randomly(initial_pool)
return active_set, test_set
def main():
args = parse_args()
batch_size = args.batch_size
use_cuda = torch.cuda.is_available()
hyperparams = vars(args)
pprint(hyperparams)
active_set, test_set = get_datasets(hyperparams['initial_pool'], hyperparams['data_path'])
# We will use the FocalLoss
criterion = FocalLoss(gamma=2, alpha=0.25)
# Our model is a simple Unet
model = smp.Unet(
encoder_name='resnext50_32x4d',
encoder_depth=5,
encoder_weights='imagenet',
decoder_use_batchnorm=False,
classes=len(pascal_voc_ids)
)
# Add a Dropout layerto use MC-Dropout
add_dropout(model, classes=len(pascal_voc_ids), activation=None)
# This will enable Dropout at test time.
model = MCDropoutModule(model)
# Put everything on GPU.
if use_cuda:
model.cuda()
# Make an optimizer
optimizer = optim.SGD(model.parameters(), lr=hyperparams["lr"], momentum=0.9, weight_decay=5e-4)
# Keep a copy of the original weights
initial_weights = deepcopy(model.state_dict())
# Add metrics
model = ModelWrapper(model, criterion)
model.add_metric('cls_report', lambda: ClassificationReport(len(pascal_voc_ids)))
# Which heuristic you want to use?
# We will use our custom reduction function.
heuristic = get_heuristic(hyperparams['heuristic'], reduction=mean_regions)
# The ALLoop is in charge of predicting the uncertainty and
loop = ActiveLearningLoop(active_set,
model.predict_on_dataset_generator,
heuristic=heuristic,
ndata_to_label=hyperparams['n_data_to_label'],
# Instead of predicting on the entire pool, only a subset is used
max_sample=1000,
batch_size=batch_size,
iterations=hyperparams["iterations"],
use_cuda=use_cuda
)
acc = []
for epoch in tqdm(range(args.al_step)):
# Following Gal et al. 2016, we reset the weights.
model.load_state_dict(initial_weights)
# Train 50 epochs before sampling.
model.train_on_dataset(active_set, optimizer, batch_size, hyperparams['learning_epoch'],
use_cuda)
# Validation!
model.test_on_dataset(test_set, batch_size, use_cuda)
should_continue = loop.step()
metrics = model.metrics
val_loss = metrics['test_loss'].value
logs = {
"val": val_loss,
"epoch": epoch,
"train": metrics['train_loss'].value,
"labeled_data": active_set.labelled,
"Next Training set size": len(active_set),
'cls_report': metrics['test_cls_report'].value,
}
pprint(logs)
acc.append(logs)
if not should_continue:
break
if __name__ == "__main__":
main()
|
backend/kale/tests/unit_tests/test_kfputils.py | brness/kale | 502 | 12773277 | <reponame>brness/kale
# Copyright 2020 The Kale Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
from testfixtures import mock
from kale.common import kfputils
@mock.patch('kale.common.kfputils.workflowutils')
@mock.patch('kale.common.kfputils.podutils')
def test_update_uimetadata_not_exists(podutils, workflowutils, tmpdir):
"""Test the uimetadata file is created when it does not exists."""
podutils.get_pod_name.return_value = 'test_pod'
podutils.get_namespace.return_value = 'test_ns'
workflowutils.get_workflow_name.return_value = 'test_wk'
filepath = os.path.join(tmpdir, 'tmp_uimetadata.json')
# update tmp file
kfputils.update_uimetadata('test', uimetadata_path=filepath)
# check file has been updated correctly
updated = json.loads(open(filepath).read())
target = {"outputs": [{
'type': 'web-app',
'storage': 'minio',
'source': 'minio://mlpipeline/artifacts/test_wk/test_pod/test.tgz'
}]}
assert updated == target
@mock.patch('kale.common.kfputils.workflowutils')
@mock.patch('kale.common.kfputils.podutils')
def test_update_uimetadata_from_empty(podutils, workflowutils, tmpdir):
"""Test that the uimetadata file is updated inplace correctly."""
podutils.get_pod_name.return_value = 'test_pod'
podutils.get_namespace.return_value = 'test_ns'
workflowutils.get_workflow_name.return_value = 'test_wk'
# create base tmp file
base = {"outputs": []}
filepath = os.path.join(tmpdir, 'tmp_uimetadata.json')
json.dump(base, open(filepath, 'w'))
# update tmp file
kfputils.update_uimetadata('test', uimetadata_path=filepath)
# check file has been updated correctly
updated = json.loads(open(filepath).read())
target = {"outputs": [{
'type': 'web-app',
'storage': 'minio',
'source': 'minio://mlpipeline/artifacts/test_wk/test_pod/test.tgz'
}]}
assert updated == target
@mock.patch('kale.common.kfputils.workflowutils')
@mock.patch('kale.common.kfputils.podutils')
def test_update_uimetadata_from_not_empty(podutils, workflowutils, tmpdir):
"""Test that the uimetadata file is updated inplace correctly."""
podutils.get_pod_name.return_value = 'test_pod'
podutils.get_namespace.return_value = 'test_ns'
workflowutils.get_workflow_name.return_value = 'test_wk'
# create base tmp file
markdown = {
'type': 'markdown',
'storage': 'inline',
'source': '#Some markdown'
}
base = {"outputs": [markdown]}
filepath = os.path.join(tmpdir, 'tmp_uimetadata.json')
json.dump(base, open(filepath, 'w'))
# update tmp file
kfputils.update_uimetadata('test', uimetadata_path=filepath)
# check file has been updated correctly
updated = json.loads(open(filepath).read())
target = {"outputs": [markdown, {
'type': 'web-app',
'storage': 'minio',
'source': 'minio://mlpipeline/artifacts/test_wk/test_pod/test.tgz'
}]}
assert updated == target
|
classes/logger.py | tmcdonagh/Autorippr | 162 | 12773290 | # -*- coding: utf-8 -*-
"""
Simple logging class
Released under the MIT license
Copyright (c) 2012, <NAME>
@category misc
@version $Id: 1.7.0, 2016-08-22 14:53:29 ACST $;
@author <NAME>
@license http://opensource.org/licenses/MIT
"""
import logging
import os
import sys
class Logger(object):
def __init__(self, name, debug, silent):
self.silent = silent
frmt = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s',
"%Y-%m-%d %H:%M:%S"
)
if debug:
loglevel = logging.DEBUG
else:
loglevel = logging.INFO
self.createhandlers(frmt, name, loglevel)
def __del__(self):
if not self.silent:
self.log.removeHandler(self.sh)
self.log.removeHandler(self.fh)
self.log = None
def createhandlers(self, frmt, name, loglevel):
self.log = logging.getLogger(name)
self.log.setLevel(loglevel)
if not self.silent:
self.sh = logging.StreamHandler(sys.stdout)
self.sh.setLevel(loglevel)
self.sh.setFormatter(frmt)
self.log.addHandler(self.sh)
DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
self.fh = logging.FileHandler('%s/autorippr.log' % DIR)
self.fh.setLevel(loglevel)
self.fh.setFormatter(frmt)
self.log.addHandler(self.fh)
def debug(self, msg):
self.log.debug(msg)
def info(self, msg):
self.log.info(msg)
def warn(self, msg):
self.log.warn(msg)
def error(self, msg):
self.log.error(msg)
def critical(self, msg):
self.log.critical(msg)
|
rpcpy/exceptions.py | william-wambua/rpc.py | 152 | 12773313 | <gh_stars>100-1000
class SerializerNotFound(Exception):
"""
Serializer not found
"""
|
src/utils/pkldf2jsonl.py | fatterbetter/CodeSearchNet | 1,681 | 12773349 | <filename>src/utils/pkldf2jsonl.py
import pandas as pd
from .general_utils import chunkify
from dpu_utils.utils import RichPath
from multiprocessing import Pool, cpu_count
def df_to_jsonl(df: pd.DataFrame, RichPath_obj: RichPath, i: int, basefilename='codedata') -> str:
dest_filename = f'{basefilename}_{str(i).zfill(5)}.jsonl.gz'
RichPath_obj.join(dest_filename).save_as_compressed_file(df.to_dict(orient='records'))
return str(RichPath_obj.join(dest_filename))
def chunked_save_df_to_jsonl(df: pd.DataFrame,
output_folder: RichPath,
num_chunks: int=None,
parallel: bool=True) -> None:
"Chunk DataFrame (n chunks = num cores) and save as jsonl files."
df.reset_index(drop=True, inplace=True)
# parallel saving to jsonl files on azure
n = cpu_count() if num_chunks is None else num_chunks
dfs = chunkify(df, n)
args = zip(dfs, [output_folder]*len(dfs), range(len(dfs)))
if not parallel:
for arg in args:
dest_filename = df_to_jsonl(*arg)
print(f'Wrote chunk to {dest_filename}')
else:
with Pool(cpu_count()) as pool:
pool.starmap(df_to_jsonl, args)
|
terrascript/data/davidji99/herokux.py | mjuenema/python-terrascript | 507 | 12773401 | # terrascript/data/davidji99/herokux.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:18:42 UTC)
import terrascript
class herokux_addons(terrascript.Data):
pass
class herokux_kafka_mtls_iprules(terrascript.Data):
pass
class herokux_postgres_mtls_certificate(terrascript.Data):
pass
class herokux_registry_image(terrascript.Data):
pass
__all__ = [
"herokux_addons",
"herokux_kafka_mtls_iprules",
"herokux_postgres_mtls_certificate",
"herokux_registry_image",
]
|
artemis/utils/basic.py | StanfordGeometryLab/artemis | 254 | 12773408 | """
Various simple (basic) functions in the "utilities".
The MIT License (MIT)
Originally created at 8/31/20, for Python 3.x
Copyright (c) 2021 <NAME> (<EMAIL>) & Stanford Geometric Computing Lab
"""
import torch
import multiprocessing as mp
import dask.dataframe as dd
from torch import nn
from sklearn.model_selection import train_test_split
def iterate_in_chunks(l, n):
"""Yield successive 'n'-sized chunks from iterable 'l'.
Note: last chunk will be smaller than l if n doesn't divide l perfectly.
"""
for i in range(0, len(l), n):
yield l[i:i + n]
def df_parallel_column_apply(df, func, column_name):
n_partitions = mp.cpu_count() * 4
d_data = dd.from_pandas(df, npartitions=n_partitions)
res =\
d_data.map_partitions(lambda df: df.apply((lambda row: func(row[column_name])), axis=1))\
.compute(scheduler='processes')
return res
def cross_entropy(pred, soft_targets):
""" pred: unscaled logits
soft_targets: target-distributions (i.e., sum to 1)
"""
logsoftmax = nn.LogSoftmax(dim=1)
return torch.mean(torch.sum(-soft_targets * logsoftmax(pred), 1))
def make_train_test_val_splits(datataset_df, loads, random_seed, unique_id_column=None):
""" Split the data into train/val/test.
:param datataset_df: pandas Dataframe containing the dataset (e.g., ArtEmis)
:param loads: list with the three floats summing to one for train/val/test
:param random_seed: int
:return: changes the datataset_df in-place to include a column ("split") indicating the split of each row
"""
if sum(loads) != 1:
raise ValueError()
train_size, val_size, test_size = loads
print("Using a {},{},{} for train/val/test purposes".format(train_size, val_size, test_size))
df = datataset_df
## unique id
if unique_id_column is None:
unique_id = df.art_style + df.painting # default for ArtEmis
else:
unique_id = df[unique_id_column]
unique_ids = unique_id.unique()
unique_ids.sort()
train, rest = train_test_split(unique_ids, test_size=val_size+test_size, random_state=random_seed)
train = set(train)
if val_size != 0:
val, test = train_test_split(rest, test_size=round(test_size*len(unique_ids)), random_state=random_seed)
else:
test = rest
test = set(test)
assert len(test.intersection(train)) == 0
def mark_example(x):
if x in train:
return 'train'
elif x in test:
return 'test'
else:
return 'val'
df = df.assign(split=unique_id.apply(mark_example))
return df |
src/sage/groups/libgap_group.py | fchapoton/sage | 1,742 | 12773422 | <gh_stars>1000+
"""
Generic LibGAP-based Group
This is useful if you need to use a GAP group implementation in Sage
that does not have a dedicated Sage interface.
If you want to implement your own group class, you should not derive
from this but directly from
:class:`~sage.groups.libgap_wrapper.ParentLibGAP`.
EXAMPLES::
sage: F.<a,b> = FreeGroup()
sage: G_gap = libgap.Group([ (a*b^2).gap() ])
sage: from sage.groups.libgap_group import GroupLibGAP
sage: G = GroupLibGAP(G_gap); G
Group([ a*b^2 ])
sage: type(G)
<class 'sage.groups.libgap_group.GroupLibGAP_with_category'>
sage: G.gens()
(a*b^2,)
"""
##############################################################################
# Copyright (C) 2013 <NAME> <<EMAIL>>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
##############################################################################
from sage.groups.group import Group
from sage.groups.libgap_wrapper import ParentLibGAP, ElementLibGAP
from sage.groups.libgap_mixin import GroupMixinLibGAP
class GroupLibGAP(GroupMixinLibGAP, Group, ParentLibGAP):
Element = ElementLibGAP
def __init__(self, *args, **kwds):
"""
Group interface for LibGAP-based groups.
INPUT:
Same as :class:`~sage.groups.libgap_wrapper.ParentLibGAP`.
TESTS::
sage: F.<a,b> = FreeGroup()
sage: G_gap = libgap.Group([ (a*b^2).gap() ])
sage: from sage.groups.libgap_group import GroupLibGAP
sage: G = GroupLibGAP(G_gap); G
Group([ a*b^2 ])
sage: g = G.gen(0); g
a*b^2
sage: TestSuite(G).run(skip=['_test_pickling', '_test_elements'])
sage: TestSuite(g).run(skip=['_test_pickling'])
"""
ParentLibGAP.__init__(self, *args, **kwds)
Group.__init__(self)
|
djangoerp/core/signals.py | xarala221/django-erp | 345 | 12773426 | #!/usr/bin/env python
"""This file is part of the django ERP project.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
__author__ = '<NAME> <<EMAIL>>'
__copyright__ = 'Copyright (c) 2013-2015, django ERP Team'
__version__ = '0.0.5'
from django.conf import settings
from django.db.models.signals import post_save
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
from .utils.models import get_model
from .cache import LoggedInUserCache
from .models import Permission, ObjectPermission, Group
## HANDLERS ##
def _update_author_permissions(sender, instance, raw, created, **kwargs):
"""Updates the permissions assigned to the author of the given object.
"""
author = LoggedInUserCache().user
if author and author.is_authenticated:
content_type = ContentType.objects.get_for_model(sender)
app_label = content_type.app_label
model_name = content_type.model
if created:
can_view_this_object, is_new = ObjectPermission.objects.get_or_create_by_natural_key("view_%s" % model_name, app_label, model_name, instance.pk)
can_change_this_object, is_new = ObjectPermission.objects.get_or_create_by_natural_key("change_%s" % model_name, app_label, model_name, instance.pk)
can_delete_this_object, is_new = ObjectPermission.objects.get_or_create_by_natural_key("delete_%s" % model_name, app_label, model_name, instance.pk)
can_view_this_object.users.add(author)
can_change_this_object.users.add(author)
can_delete_this_object.users.add(author)
def manage_author_permissions(cls, enabled=True):
"""Adds permissions assigned to the author of the given object.
Connects the post_save signal of the given model class to the handler which
adds default permissions to the current user. i.e.:
>> manage_author_permissions(Project)
It will add default view, change and delete permissions for each Project's
instances created by the current user.
To disconnect:
>> manage_author_permissions(Project, False)
"""
cls = get_model(cls)
dispatch_uid = "update_%s_permissions" % cls.__name__.lower()
if enabled:
post_save.connect(_update_author_permissions, cls, dispatch_uid=dispatch_uid)
else:
post_save.disconnect(_update_author_permissions, cls, dispatch_uid=dispatch_uid)
def user_post_save(sender, instance, created, *args, **kwargs):
"""Add view/delete/change object permissions to users (on themselves).
It also adds new user instances to "users" group.
"""
auth_app, sep, user_model_name = settings.AUTH_USER_MODEL.rpartition('.')
user_model_name = user_model_name.lower()
# All new users have full control over themselves.
can_view_this_user, is_new = ObjectPermission.objects.get_or_create_by_natural_key("view_%s" % user_model_name, auth_app, user_model_name, instance.pk)
can_change_this_user, is_new = ObjectPermission.objects.get_or_create_by_natural_key("change_%s" % user_model_name, auth_app, user_model_name, instance.pk)
can_delete_this_user, is_new = ObjectPermission.objects.get_or_create_by_natural_key("delete_%s" % user_model_name, auth_app, user_model_name, instance.pk)
can_view_this_user.users.add(instance)
can_change_this_user.users.add(instance)
can_delete_this_user.users.add(instance)
# All new users are members of "users" group.
if created:
users_group, is_new = Group.objects.get_or_create(name='users')
instance.groups.add(users_group)
def add_view_permission(sender, instance, **kwargs):
"""Adds a view permission related to each new ContentType instance.
"""
if isinstance(instance, ContentType):
codename = "view_%s" % instance.model
Permission.objects.get_or_create(content_type=instance, codename=codename, name="Can view %s" % instance.name)
## CONNECTIONS ##
post_save.connect(user_post_save, get_user_model())
post_save.connect(add_view_permission, ContentType)
|
tests/unit/test_dist.py | olegklimov/DeepSpeed | 6,728 | 12773461 | import torch
import torch.distributed as dist
from common import distributed_test
import pytest
@distributed_test(world_size=3)
def test_init():
assert dist.is_initialized()
assert dist.get_world_size() == 3
assert dist.get_rank() < 3
# Demonstration of pytest's parameterization
@pytest.mark.parametrize('number,color', [(1138, 'purple')])
def test_dist_args(number, color):
"""Outer test function with inputs from pytest.mark.parametrize(). Uses a distributed
helper function.
"""
@distributed_test(world_size=2)
def _test_dist_args_helper(x, color='red'):
assert dist.get_world_size() == 2
assert x == 1138
assert color == 'purple'
"""Ensure that we can parse args to distributed_test decorated functions. """
_test_dist_args_helper(number, color=color)
@distributed_test(world_size=[1, 2, 4])
def test_dist_allreduce():
x = torch.ones(1, 3).cuda() * (dist.get_rank() + 1)
sum_of_ranks = (dist.get_world_size() * (dist.get_world_size() + 1)) // 2
result = torch.ones(1, 3).cuda() * sum_of_ranks
dist.all_reduce(x)
assert torch.all(x == result)
|
tests/unit/test_app.py | davidjsherman/repo2docker | 1,047 | 12773472 | import errno
import pytest
from tempfile import TemporaryDirectory
from unittest.mock import patch
import docker
import escapism
from repo2docker.app import Repo2Docker
from repo2docker.__main__ import make_r2d
from repo2docker.utils import chdir
def test_find_image():
images = [{"RepoTags": ["some-org/some-repo:latest"]}]
with patch("repo2docker.docker.docker.APIClient") as FakeDockerClient:
instance = FakeDockerClient.return_value
instance.images.return_value = images
r2d = Repo2Docker()
r2d.output_image_spec = "some-org/some-repo"
assert r2d.find_image()
instance.images.assert_called_with()
def test_dont_find_image():
images = [{"RepoTags": ["some-org/some-image-name:latest"]}]
with patch("repo2docker.docker.docker.APIClient") as FakeDockerClient:
instance = FakeDockerClient.return_value
instance.images.return_value = images
r2d = Repo2Docker()
r2d.output_image_spec = "some-org/some-other-image-name"
assert not r2d.find_image()
instance.images.assert_called_with()
def test_image_name_remains_unchanged():
# if we specify an image name, it should remain unmodified
with TemporaryDirectory() as src:
app = Repo2Docker()
argv = ["--image-name", "a-special-name", "--no-build", src]
app = make_r2d(argv)
app.start()
assert app.output_image_spec == "a-special-name"
def test_image_name_contains_sha1(repo_with_content):
upstream, sha1 = repo_with_content
app = Repo2Docker()
# force selection of the git content provider by prefixing path with
# file://. This is important as the Local content provider does not
# store the SHA1 in the repo spec
argv = ["--no-build", "file://" + upstream]
app = make_r2d(argv)
app.start()
assert app.output_image_spec.endswith(sha1[:7])
def test_local_dir_image_name(repo_with_content):
upstream, sha1 = repo_with_content
app = Repo2Docker()
argv = ["--no-build", upstream]
app = make_r2d(argv)
app.start()
assert app.output_image_spec.startswith(
"r2d" + escapism.escape(upstream, escape_char="-").lower()
)
def test_build_kwargs(repo_with_content):
upstream, sha1 = repo_with_content
argv = [upstream]
app = make_r2d(argv)
app.extra_build_kwargs = {"somekey": "somevalue"}
with patch.object(docker.APIClient, "build") as builds:
builds.return_value = []
app.build()
builds.assert_called_once()
args, kwargs = builds.call_args
assert "somekey" in kwargs
assert kwargs["somekey"] == "somevalue"
def test_run_kwargs(repo_with_content):
upstream, sha1 = repo_with_content
argv = [upstream]
app = make_r2d(argv)
app.extra_run_kwargs = {"somekey": "somevalue"}
with patch.object(docker.DockerClient, "containers") as containers:
app.start_container()
containers.run.assert_called_once()
args, kwargs = containers.run.call_args
assert "somekey" in kwargs
assert kwargs["somekey"] == "somevalue"
def test_root_not_allowed():
with TemporaryDirectory() as src, patch("os.geteuid") as geteuid:
geteuid.return_value = 0
argv = [src]
with pytest.raises(SystemExit) as exc:
app = make_r2d(argv)
assert exc.code == 1
with pytest.raises(ValueError):
app = Repo2Docker(repo=src, run=False)
app.build()
app = Repo2Docker(repo=src, user_id=1000, user_name="jovyan", run=False)
app.initialize()
with patch.object(docker.APIClient, "build") as builds:
builds.return_value = []
app.build()
builds.assert_called_once()
def test_dryrun_works_without_docker(tmpdir, capsys):
with chdir(tmpdir):
with patch.object(docker, "APIClient") as client:
client.side_effect = docker.errors.DockerException("Error: no Docker")
app = Repo2Docker(dry_run=True)
app.build()
captured = capsys.readouterr()
assert "Error: no Docker" not in captured.err
def test_error_log_without_docker(tmpdir, capsys):
with chdir(tmpdir):
with patch.object(docker, "APIClient") as client:
client.side_effect = docker.errors.DockerException("Error: no Docker")
app = Repo2Docker()
with pytest.raises(SystemExit):
app.build()
captured = capsys.readouterr()
assert "Error: no Docker" in captured.err
|
marketplaces/apps/market_community/urls.py | diassor/CollectorCity-Market-Place | 135 | 12773488 | <gh_stars>100-1000
from django.conf.urls.defaults import *
urlpatterns = patterns('',
url('^$', 'market_community.views.overview', name='market_community'),
url(r'^overview/$', 'market_community.views.overview', name='community_overview'),
url(r'^forums/$', 'market_community.views.forums', name='community_forums'),
url(r'^blogs/$', 'market_community.views.blogs', name='community_blogs'),
url(r'^faq/$', 'market_community.views.faq', name='community_faq'),
url(r'^profiles/$', 'market_community.views.profiles', name='community_profiles'),
url(r'^profiles/(?P<letter>[\w]+)/$', 'market_community.views.profiles_list', name='community_profiles_list'),
)
|
mods/Commands.py | waffle620/fagyhal | 494 | 12773532 | import asyncio
import discord
import sys
from discord.ext import commands
from utils import checks
from mods.cog import Cog
class Commands(Cog):
def __init__(self, bot):
super().__init__(bot)
self.cursor = bot.mysql.cursor
self.escape = bot.escape
@commands.group(pass_context=True, aliases=['setprefix', 'changeprefix'], invoke_without_command=True, no_pm=True)
@checks.admin_or_perm(manage_server=True)
async def prefix(self, ctx, *, txt:str=None):
"""Change the Bots Prefix for the Server"""
if txt is None:
sql = "SELECT prefix FROM `prefix` WHERE server={0}"
sql = sql.format(ctx.message.server.id)
sql_channel = "SELECT prefix FROM `prefix_channel` WHERE server={0} AND channel={1}"
sql_channel = sql_channel.format(ctx.message.server.id, ctx.message.channel.id)
result = self.cursor.execute(sql).fetchall()
result2 = self.cursor.execute(sql_channel).fetchall()
if len(result) == 0:
server_prefix = '.'
else:
server_prefix = result[0]['prefix']
if len(result2) == 0:
channel_prefix = None
else:
channel_prefix = result2[0]['prefix']
msg = "Server Prefix: `{0}`\n".format(server_prefix)
if channel_prefix != None:
msg += "**Current** Channel Prefix: `{0}`".format(channel_prefix)
await self.bot.say(msg)
return
sql = "INSERT INTO `prefix` (`server`, `prefix`, `id`) VALUES (%s, %s, %s)"
update_sql = "UPDATE `prefix` SET prefix={0} WHERE server={1}"
update_sql = update_sql.format(self.escape(txt), ctx.message.server.id)
check = "SELECT server FROM `prefix` WHERE server={0}"
check = check.format(ctx.message.server.id)
result = self.cursor.execute(check).fetchall()
if len(result) == 0:
self.cursor.execute(sql, (ctx.message.server.id, txt, ctx.message.author.id))
self.cursor.commit()
await self.bot.say(":white_check_mark: Set bot prefix to \"{0}\" for the server\n".format(txt))
else:
self.cursor.execute(update_sql)
self.cursor.commit()
await self.bot.say(":white_check_mark: Updated bot prefix to \"{0}\" for the server".format(txt))
@prefix.command(pass_context=True, name='channel', no_pm=True)
@checks.admin_or_perm(manage_server=True)
async def _prefix_channel(self, ctx, *, txt:str):
"""Change the Bots Prefix for the current Channel"""
channel = ctx.message.channel
for c in ctx.message.channel_mentions:
channel = c
txt = txt.replace(channel.mention, '').replace('#'+channel.name, '')
sql = "INSERT INTO `prefix_channel` (`server`, `prefix`, `channel`, `id`) VALUES (%s, %s, %s, %s)"
update_sql = "UPDATE `prefix_channel` SET prefix={0} WHERE server={1} AND channel={2}"
update_sql = update_sql.format(self.escape(txt), ctx.message.server.id, channel.id)
check = "SELECT * FROM `prefix_channel` WHERE server={0} AND channel={1}"
check = check.format(ctx.message.server.id, channel.id)
result = self.cursor.execute(check).fetchall()
if len(result) == 0:
self.cursor.execute(sql, (ctx.message.server.id, txt, channel.id, ctx.message.author.id))
self.cursor.commit()
await self.bot.say(":white_check_mark: Set bot prefix to \"{0}\" for {1}".format(txt, channel.mention))
else:
self.cursor.execute(update_sql)
self.cursor.commit()
await self.bot.say(":white_check_mark: Updated bot prefix to \"{0}\" for {1}".format(txt, channel.mention))
@prefix.command(pass_context=True, name='reset', no_pm=True)
@checks.admin_or_perm(manage_server=True)
async def _prefix_reset(self, ctx, what:str=None, channel:discord.Channel=None):
"""Reset All Custom Set Prefixes For the Bot"""
if what is None or what == "server":
sql = "DELETE FROM `prefix` WHERE server={0}"
sql = sql.format(ctx.message.server.id)
check = "SELECT * FROM `prefix` WHERE server={0}"
check = check.format(ctx.message.server.id)
result = self.cursor.execute(check).fetchall()
if len(result) == 0:
await self.bot.say(":no_entry: Current server does **not** have a custom prefix set!")
return
else:
self.cursor.execute(sql)
self.cursor.commit()
await self.bot.say(":exclamation: **Reset server prefix**\nThis does not reset channel prefixes, run \"all\" after reset to reset all prefixes *or* \"channels\" to reset all custom channel prefixes.")
elif what == "channel":
if channel is None:
channel = ctx.message.channel
sql = "DELETE FROM `prefix_channel` WHERE server={0} AND channel={1}"
sql = sql.format(ctx.message.server.id, channel.id)
check = "SELECT * FROM `prefix_channel` WHERE server={0} AND channel={1}"
check = check.format(ctx.message.server.id, channel.id)
result = self.cursor.execute(check).fetchall()
if len(result) == 0:
await self.bot.say(":no_entry: {0} does **not** have a custom prefix Set!\nMention the channel after \"reset channel\" for a specific channel.".format(channel.mention))
return
else:
self.cursor.execute(sql)
self.cursor.commit()
await self.bot.say(":exclamation: Reset {0}'s prefix!\nThis does **not** reset all custom channel prefixes, \"reset channels\" to do so.".format(channel.mention))
return
elif what == "channels":
sql = "DELETE FROM `prefix_channel` WHERE server={0}"
sql = sql.format(ctx.message.server.id)
check = "SELECT * FROM `prefix_channel` WHERE server={0}"
check = check.format(ctx.message.server.id)
result = self.cursor.execute(check).fetchall()
if len(result) == 0:
await self.bot.say(":no_entry: Server does **not** reset a custom prefix set for any channel!\nMention the channel after \"reset channel\" for a specific channel.")
return
else:
self.cursor.execute(sql)
self.cursor.commit()
await self.bot.say(":exclamation: Reset all channels custom prefixes!")
return
elif what == "all" or what == "everything":
sql = "DELETE FROM `prefix_channel` WHERE server={0}"
sql = sql.format(ctx.message.server.id)
sql2 = "DELETE FROM `prefix` WHERE server={0}"
sql2 = sql2.format(ctx.message.server.id)
self.cursor.execute(sql)
self.cursor.execute(sql2)
self.cursor.commit()
await self.bot.say(":warning: Reset all custom server prefix settings!")
return
else:
await self.bot.say(":no_entry: Invalid Option\nOptions: `server, channel, channels, all/everything`")
good_commands = ['command', 'blacklist', 'help', 'invite']
async def command_toggle(self, t:str, ctx, cmd:str, user=None, msg=True):
try:
if cmd in self.good_commands:
await self.bot.send_message(ctx.message.channel, ':no_entry: You cannot disable command: `{0}`!'.format(self.good_commands[self.good_commands.index(cmd)]))
return
if t == 'server':
sql = "SELECT * FROM `command_blacklist` WHERE type='server' AND server={0} AND command={1}"
sql = sql.format(ctx.message.server.id, self.escape(cmd))
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
sql = 'INSERT INTO `command_blacklist` (`command`, `type`, `server`) VALUES (%s, %s, %s)'
self.cursor.execute(sql, (cmd, "server", ctx.message.server.id))
self.cursor.commit()
if msg:
await self.bot.send_message(ctx.message.channel, ':negative_squared_cross_mark: Disabled command `{0}`.'.format(cmd))
else:
sql = "DELETE FROM `command_blacklist` WHERE type='server' AND server={0} AND command={1}"
sql = sql.format(ctx.message.server.id, self.escape(cmd))
self.cursor.execute(sql)
self.cursor.commit()
if msg:
await self.bot.send_message(ctx.message.channel, ':white_check_mark: Enabled command `{0}`.'.format(cmd))
elif t == 'channel':
channel = user
sql = "SELECT * FROM `command_blacklist` WHERE type='channel' AND server={0} AND channel={1} AND command={2}"
sql = sql.format(ctx.message.server.id, channel.id, self.escape(cmd))
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
sql = 'INSERT INTO `command_blacklist` (`command`, `type`, `server`, `channel`) VALUES (%s, %s, %s, %s)'
self.cursor.execute(sql, (cmd, "channel", ctx.message.server.id, channel.id))
self.cursor.commit()
if msg:
await self.bot.send_message(ctx.message.channel, ':negative_squared_cross_mark: Disabled command `{0}` for channel {1}.'.format(cmd, channel.mention))
else:
sql = "DELETE FROM `command_blacklist` WHERE type='channel' AND server={0} AND channel={1} AND command={2}"
sql = sql.format(ctx.message.server.id, channel.id, self.escape(cmd))
self.cursor.execute(sql)
self.cursor.commit()
if msg:
await self.bot.send_message(ctx.message.channel, ':white_check_mark: Enabled command `{0}` for channel {1}.'.format(cmd, channel.mention))
elif t == 'user':
sql = "SELECT * FROM `command_blacklist` WHERE type='user' AND server={0} AND user={1} AND command={2}"
sql = sql.format(ctx.message.server.id, user.id, self.escape(cmd))
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
sql = 'INSERT INTO `command_blacklist` (`command`, `type`, `server`, `user`) VALUES (%s, %s, %s, %s)'
self.cursor.execute(sql, (cmd, "user", ctx.message.server.id, user.id))
self.cursor.commit()
if msg:
await self.bot.send_message(ctx.message.channel, ':negative_squared_cross_mark: Disabled command `{0}` for user `{1}`.'.format(cmd, user))
else:
sql = "DELETE FROM `command_blacklist` WHERE type='user' AND server={0} AND user={1} AND command={2}"
sql = sql.format(ctx.message.server.id, user.id, self.escape(cmd))
self.cursor.execute(sql)
self.cursor.commit()
if msg:
await self.bot.send_message(ctx.message.channel, ':white_check_mark: Enabled command `{0}` for user `{1}`.'.format(cmd, user))
elif t == 'role':
role = user
sql = "SELECT * FROM `command_blacklist` WHERE type='role' AND server={0} AND role={1} AND command={2}"
sql = sql.format(ctx.message.server.id, role.id, self.escape(cmd))
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
sql = 'INSERT INTO `command_blacklist` (`command`, `type`, `server`, `role`) VALUES (%s, %s, %s, %s)'
self.cursor.execute(sql, (cmd, "role", ctx.message.server.id, role.id))
self.cursor.commit()
if msg:
await self.bot.send_message(ctx.message.channel, ':negative_squared_cross_mark: Disabled command `{0}` for role {1}.'.format(cmd, role.mention))
else:
sql = "DELETE FROM `command_blacklist` WHERE type='role' AND server={0} AND role={1} AND command={2}"
sql = sql.format(ctx.message.server.id, role.id, self.escape(cmd))
self.cursor.execute(sql)
self.cursor.commit()
if msg:
await self.bot.send_message(ctx.message.channel, ':white_check_mark: Enabled command `{0}` for role {1}.'.format(cmd, role.mention))
elif t == 'global':
sql = "SELECT * FROM `command_blacklist` WHERE type='global' AND command={0}"
sql = sql.format(self.escape(cmd))
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
sql = 'INSERT INTO `command_blacklist` (`command`, `type`) VALUES (%s, %s)'
self.cursor.execute(sql, (cmd, "global"))
self.cursor.commit()
if msg:
await self.bot.send_message(ctx.message.channel, ':globe_with_meridians: Disabled command `{0}` globally.'.format(cmd))
else:
sql = "DELETE FROM `command_blacklist` WHERE type='global' AND command={0}"
sql = sql.format(self.escape(cmd))
self.cursor.execute(sql)
self.cursor.commit()
if msg:
await self.bot.send_message(ctx.message.channel, ':white_check_mark: Enabled command `{0}` globally.'.format(cmd))
else:
return
except Exception as e:
await self.bot.send_message(ctx.message.channel, str(e))
async def module_command_toggle(self, module, t:str, ctx):
try:
count = 0
disabled = []
for command in self.bot.commands:
if self.bot.commands[command].module == module and command not in disabled:
count += 1
cmd = str(self.bot.commands[command].name)
await self.command_toggle(t, ctx, cmd, msg=False)
await asyncio.sleep(0.21)
disabled.append(command)
return count
except Exception as e:
await self.bot.send_message(ctx.message.channel, str(e))
async def get_modules(self):
modules = []
for module in sys.modules:
if module.startswith('mods.'):
if module == 'mods.Repl' or module == 'mods.Stats' or module == 'mods.Commands':
continue
mod = module.replace('mods.', '')
modules.append(mod)
return modules
@commands.group(pass_context=True, invoke_without_command=True, aliases=['commands', 'cmd'], no_pm=True)
@checks.admin_or_perm(manage_server=True)
async def command(self, ctx, cmd:str):
"""Toggle a command for the server"""
if cmd in self.bot.commands:
cmd = str(self.bot.commands[cmd])
await self.command_toggle('server', ctx, cmd)
else:
await self.bot.say(':no_entry: `Command does not exist.`')
@command.command(name='toggle', aliases=['enable', 'disable'], pass_context=True, invoke_without_command=True, no_pm=True)
@checks.admin_or_perm(manage_server=True)
async def cmd_toggle(self, ctx, cmd:str):
"""Server wide Command Toggle"""
if cmd in self.bot.commands:
cmd = str(self.bot.commands[cmd])
await self.command_toggle('server', ctx, cmd)
else:
await self.bot.say(':no_entry: `Command does not exist.`')
@command.command(name='user', pass_context=True, aliases=['member'], invoke_without_command=True, no_pm=True)
@checks.admin_or_perm(manage_server=True)
async def command_toggle_user(self, ctx, cmd:str, user:discord.User=None):
"""Toggle Command for a user"""
if user is None:
user = ctx.message.author
if cmd in self.bot.commands:
cmd = str(self.bot.commands[cmd])
await self.command_toggle('user', ctx, cmd, user)
else:
await self.bot.say(':no_entry: `Command does not exist.`')
@command.command(name='role', pass_context=True, aliases=['rank'], invoke_without_command=True, no_pm=True)
@checks.admin_or_perm(manage_server=True)
async def command_toggle_role(self, ctx, cmd:str, role:discord.Role):
"""Toggle Command for a role"""
if cmd in self.bot.commands:
cmd = str(self.bot.commands[cmd])
await self.command_toggle('role', ctx, cmd, role)
else:
await self.bot.say(':no_entry: `Command does not exist.`')
@command.command(name='channel', pass_context=True, invoke_without_command=True, no_pm=True)
@checks.admin_or_perm(manage_server=True)
async def command_toggle_channel(self, ctx, cmd:str, chan:discord.Channel=None):
"""Toggle Command for a channel"""
if chan is None:
chan = ctx.message.channel
if cmd in self.bot.commands:
cmd = str(self.bot.commands[cmd])
await self.command_toggle('channel', ctx, cmd, chan)
else:
await self.bot.say(':no_entry: `Command does not exist.`')
@command.command(name='global', pass_context=True, invoke_without_command=True)
@checks.is_owner()
async def command_toggle_global(self, ctx, cmd:str):
"""Toggle command globally"""
if cmd in self.bot.commands:
cmd = str(self.bot.commands[cmd])
await self.command_toggle('global', ctx, cmd)
else:
await self.bot.say(':no_entry: `Command does not exist.`')
@command.group(name='module', pass_context=True, invoke_without_command=True, no_pm=True)
@checks.admin_or_perm(manage_server=True)
async def command_toggle_module(self, ctx, module:str, chan:discord.Channel=None):
"""Toggle a bot command module"""
try:
mod = sys.modules['mods.{0}'.format(module)]
except KeyError:
modules = await self.get_modules()
await self.bot.say(':no_entry: Invalid Module\n**Modules**\n`{0}`'.format(', '.join(modules)))
return
if chan:
count = await self.module_command_toggle(mod, 'channel', ctx)
else:
count = await self.module_command_toggle(mod, 'server', ctx)
await self.bot.say(':white_check_mark: Disabled **{0}** commands in module `{1}`.'.format(count, module))
@command_toggle_module.command(name='list', pass_context=True, invoke_without_command=True)
async def command_toggle_module_list(self, ctx):
modules = await self.get_modules()
await self.bot.say(':information_source: **Modules**\n`{0}`'.format(', '.join(modules)))
@command.command(name='all', pass_context=True, invoke_without_command=True, no_pm=True)
@checks.admin_or_perm(manage_server=True)
async def command_toggle_all(self, ctx):
sql = 'SELECT COUNT(*) FROM `command_blacklist` WHERE server={0}'
sql = sql.format(ctx.message.server.id)
count = str(self.cursor.execute(sql).fetchall()[0]['COUNT(*)'])
sql = 'DELETE FROM `command_blacklist` WHERE server={0}'
sql = sql.format(ctx.message.server.id)
self.cursor.execute(sql)
self.cursor.commit()
await self.bot.say(':white_check_mark: Enabled **{0}** server command(s).'.format(count))
@command.command(name='list', pass_context=True, invoke_without_command=True, no_pm=True)
async def command_list(self, ctx):
sql = 'SELECT * FROM `command_blacklist` WHERE server={0} OR type="global"'
sql = sql.format(ctx.message.server.id)
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
await self.bot.say(':no_entry: Server does **not** have any commands blacklisted.')
return
msg = ''
for s in result:
if s['type'] == 'global':
msg += ':globe_with_meridians: Globaly Command Disabled: `{0}`\n'.format(s['command'])
elif s['type'] == 'server':
msg += ':desktop: Command Disabled on Server: `{0}`\n'.format(s['command'])
elif s['type'] == 'channel':
msg += ':arrow_right: Command Disabled in <#{0}>: `{1}`\n'.format(s['channel'] ,s['command'])
elif s['type'] == 'role':
msg += ':eight_spoked_asterisk: Command Disabled for <@&{0}>: `{1}`\n'.format(s['role'], s['command'])
elif s['type'] == 'user':
user = discord.utils.get(self.bot.get_all_members(), id=str(s['user']))
if user is None:
user = '<@{0}> (Not Found)'.format(s['user'])
msg += ':bust_in_silhouette: Command Disabled for **{0}**: `{1}`\n'.format(user, s['command'])
await self.bot.say(':white_check_mark: **Commands Disabled**\n'+msg)
def setup(bot):
bot.add_cog(Commands(bot)) |
capstone/capdb/migrations/0114_auto_20210420_2105.py | rachelaus/capstone | 134 | 12773553 | <filename>capstone/capdb/migrations/0114_auto_20210420_2105.py<gh_stars>100-1000
# Generated by Django 3.2 on 2021-04-20 21:05
import capdb.storages
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('capdb', '0113_auto_20210414_1532'),
]
operations = [
migrations.AlterField(
model_name='caseanalysis',
name='value',
field=models.JSONField(),
),
migrations.AlterField(
model_name='casebodycache',
name='json',
field=models.JSONField(blank=True, null=True),
),
migrations.AlterField(
model_name='caseinitialmetadata',
name='metadata',
field=models.JSONField(),
),
migrations.AlterField(
model_name='casemetadata',
name='attorneys',
field=models.JSONField(blank=True, null=True),
),
migrations.AlterField(
model_name='casemetadata',
name='docket_numbers',
field=models.JSONField(blank=True, null=True),
),
migrations.AlterField(
model_name='casemetadata',
name='judges',
field=models.JSONField(blank=True, null=True),
),
migrations.AlterField(
model_name='casemetadata',
name='no_index_elided',
field=models.JSONField(blank=True, help_text='Elided text will be shown on click. Example: {"Text to elide (must be exact match)": "Extra text that\'s currently not used. Can be left as empty string."}', null=True),
),
migrations.AlterField(
model_name='casemetadata',
name='no_index_redacted',
field=models.JSONField(blank=True, help_text='Redacted text will be hidden from view and replaced with key\'s value specified above. Example: {"Text to redact (must be exact match)": "Text to replace redacted text."}', null=True),
),
migrations.AlterField(
model_name='casemetadata',
name='opinions',
field=models.JSONField(blank=True, null=True),
),
migrations.AlterField(
model_name='casemetadata',
name='parties',
field=models.JSONField(blank=True, null=True),
),
migrations.AlterField(
model_name='casestructure',
name='opinions',
field=models.JSONField(),
),
migrations.AlterField(
model_name='datamigration',
name='alto_xml_changed',
field=models.JSONField(),
),
migrations.AlterField(
model_name='datamigration',
name='alto_xml_rollback',
field=models.JSONField(),
),
migrations.AlterField(
model_name='datamigration',
name='case_xml_changed',
field=models.JSONField(),
),
migrations.AlterField(
model_name='datamigration',
name='case_xml_rollback',
field=models.JSONField(),
),
migrations.AlterField(
model_name='datamigration',
name='volume_xml_changed',
field=models.JSONField(),
),
migrations.AlterField(
model_name='datamigration',
name='volume_xml_rollback',
field=models.JSONField(),
),
migrations.AlterField(
model_name='historicalcasemetadata',
name='attorneys',
field=models.JSONField(blank=True, null=True),
),
migrations.AlterField(
model_name='historicalcasemetadata',
name='docket_numbers',
field=models.JSONField(blank=True, null=True),
),
migrations.AlterField(
model_name='historicalcasemetadata',
name='judges',
field=models.JSONField(blank=True, null=True),
),
migrations.AlterField(
model_name='historicalcasemetadata',
name='no_index_elided',
field=models.JSONField(blank=True, help_text='Elided text will be shown on click. Example: {"Text to elide (must be exact match)": "Extra text that\'s currently not used. Can be left as empty string."}', null=True),
),
migrations.AlterField(
model_name='historicalcasemetadata',
name='no_index_redacted',
field=models.JSONField(blank=True, help_text='Redacted text will be hidden from view and replaced with key\'s value specified above. Example: {"Text to redact (must be exact match)": "Text to replace redacted text."}', null=True),
),
migrations.AlterField(
model_name='historicalcasemetadata',
name='opinions',
field=models.JSONField(blank=True, null=True),
),
migrations.AlterField(
model_name='historicalcasemetadata',
name='parties',
field=models.JSONField(blank=True, null=True),
),
migrations.AlterField(
model_name='historicalpagestructure',
name='blocks',
field=models.JSONField(),
),
migrations.AlterField(
model_name='historicalpagestructure',
name='duplicates',
field=models.JSONField(blank=True, null=True),
),
migrations.AlterField(
model_name='historicalpagestructure',
name='extra_redacted_ids',
field=models.JSONField(blank=True, null=True),
),
migrations.AlterField(
model_name='historicalpagestructure',
name='font_names',
field=models.JSONField(blank=True, null=True),
),
migrations.AlterField(
model_name='historicalpagestructure',
name='spaces',
field=models.JSONField(blank=True, null=True),
),
migrations.AlterField(
model_name='historicalvolumemetadata',
name='bibliographic_review',
field=models.CharField(blank=True, choices=[('No', 'No'), ('Complete', 'Complete'), ('Yes', 'Yes')], max_length=8, null=True),
),
migrations.AlterField(
model_name='historicalvolumemetadata',
name='ingest_errors',
field=models.JSONField(blank=True, null=True),
),
migrations.AlterField(
model_name='historicalvolumemetadata',
name='task_statuses',
field=models.JSONField(default=dict, help_text='Date and results of tasks run for this volume'),
),
migrations.AlterField(
model_name='historicalvolumemetadata',
name='xml_metadata',
field=models.JSONField(blank=True, null=True),
),
migrations.AlterField(
model_name='pagestructure',
name='blocks',
field=models.JSONField(),
),
migrations.AlterField(
model_name='pagestructure',
name='duplicates',
field=models.JSONField(blank=True, null=True),
),
migrations.AlterField(
model_name='pagestructure',
name='extra_redacted_ids',
field=models.JSONField(blank=True, null=True),
),
migrations.AlterField(
model_name='pagestructure',
name='font_names',
field=models.JSONField(blank=True, null=True),
),
migrations.AlterField(
model_name='pagestructure',
name='spaces',
field=models.JSONField(blank=True, null=True),
),
migrations.AlterField(
model_name='volumemetadata',
name='bibliographic_review',
field=models.CharField(blank=True, choices=[('No', 'No'), ('Complete', 'Complete'), ('Yes', 'Yes')], max_length=8, null=True),
),
migrations.AlterField(
model_name='volumemetadata',
name='ingest_errors',
field=models.JSONField(blank=True, null=True),
),
migrations.AlterField(
model_name='volumemetadata',
name='pdf_file',
field=models.FileField(blank=True, help_text='Exported volume PDF', max_length=1000, storage=capdb.storages.DownloadOverlayStorage(base_url='http://case.test:8000/download/', location='/Users/jcushman/Documents/capstone/capstone/test_data/downloads'), upload_to=''),
),
migrations.AlterField(
model_name='volumemetadata',
name='task_statuses',
field=models.JSONField(default=dict, help_text='Date and results of tasks run for this volume'),
),
migrations.AlterField(
model_name='volumemetadata',
name='xml_metadata',
field=models.JSONField(blank=True, null=True),
),
]
|
test/components/stream_test.py | sytelus/longview | 3,453 | 12773572 | from tensorwatch.stream import Stream
s1 = Stream(stream_name='s1', console_debug=True)
s2 = Stream(stream_name='s2', console_debug=True)
s3 = Stream(stream_name='s3', console_debug=True)
s1.subscribe(s2)
s2.subscribe(s3)
s3.write('S3 wrote this')
s2.write('S2 wrote this')
s1.write('S1 wrote this')
|
report_builder_demo/demo_second_app/admin.py | nazmizorlu/django-report-builder | 560 | 12773634 | from django.contrib import admin
from .models import Bar
@admin.register(Bar)
class BarAdmin(admin.ModelAdmin):
pass
|
alephnull/live/broker.py | flatM/AlephNull | 234 | 12773639 | __author__ = 'oglebrandon'
from logbook import Logger
from ib.ext.Contract import Contract
from ib.ext.ExecutionFilter import ExecutionFilter
from ib.ext.Order import Order as IBOrder
from alephnull.finance.blotter import Blotter
from alephnull.utils.protocol_utils import Enum
from alephnull.finance.slippage import Transaction
import alephnull.protocol as zp
# Medici fork of IbPy
# https://github.com/CarterBain/Medici
from ib.client.IBrokers import IBClient
import datetime as dt
import pytz
log = Logger('Blotter')
ORDER_STATUS = Enum(
'OPEN',
'FILLED',
'CANCELLED'
)
def round_for_minimum_price_variation(x):
#Todo: modify to round to minimum tick
return x
class LiveBlotter(Blotter):
id_map = {}
def __init__(self):
super(LiveBlotter, self).__init__()
def order(self, sid, amount, limit_price, stop_price, order_id=None):
id = super(LiveBlotter, self).order(sid, amount, limit_price, stop_price, order_id=None)
order_obj = self.orders[id]
ib_order = IBOrder()
ib_order.m_transmit = True
ib_order.m_orderRef = order_obj.id
ib_order.m_totalQuantity = order_obj.amount
ib_order.m_action = ['BUY' if ib_order.m_totalQuantity > 0 else 'SELL'][0]
ib_order.m_tif = 'DAY'
#Todo: make the FA params configurable
ib_order.m_faGroup = 'ALL'
ib_order.m_faMethod = 'AvailableEquity'
# infer order type
if order_obj.stop and not order_obj.limit:
ib_order.m_orderType = 'STP'
ib_order.m_auxPrice = float(order_obj.stop)
elif order_obj.limit and not order_obj.stop:
ib_order.m_orderType = 'LMT'
ib_order.m_lmtPrice = float(order_obj.limit)
elif order_obj.stop and order_obj.limit:
ib_order.m_orderType = 'STPLMT'
ib_order.m_auxPrice = float(order_obj.stop)
ib_order.m_lmtPrice = float(order_obj.limit)
else:
ib_order.m_orderType = 'MKT'
contract = Contract()
contract.m_symbol = order_obj.sid
contract.m_currency = 'USD'
if hasattr(order_obj, 'contract'):
# This is a futures contract
contract.m_secType = 'FUT'
contract.m_exchange = 'GLOBEX'
contract.m_expiry = order_obj.contract
else:
# This is a stock
contract.m_secType = 'STK'
contract.m_exchange = 'SMART'
ib_id = self.place_order(contract, ib_order)
self.id_map[order_obj.id] = ib_id
return order_obj.id
def cancel(self, order_id):
ib_id = self.id_map[order_id]
self.cancel_order(ib_id)
super(Blotter, self).order(order_id)
def process_trade(self, trade_event):
# checks if event is trade
if trade_event.type != zp.DATASOURCE_TYPE.TRADE:
return
# checks if is future contract
if hasattr(trade_event, 'contract'):
sid = (trade_event.sid, trade_event.cotract)
else:
sid = trade_event.sid
if sid in self.open_orders:
orders = self.open_orders[sid]
# sort orders by datetime, and filter out future dates
# lambda x: sort([order.dt for order in orders])
else:
return
for order, txn in self.get_transactions(trade_event, orders):
# check that not commission
order.filled += txn.amount
if order.amount - order.filled == 0:
order.status = ORDER_STATUS.FILLED
order.dt = txn.dt
print txn.__dict__
yield txn, order
self.open_orders[sid] = \
[order for order
in self.open_orders[sid]
if order.open]
class LiveExecution(IBClient):
"""Client connection to the Interactive Brokers API
inherits from IBClient in the Medici fork of IbPy
"""
def __init__(self, call_msg):
super(LiveExecution, self).__init__(call_msg=call_msg)
self._blotter = LiveBlotter()
self._blotter.place_order = self.place_order
self._blotter.get_transactions = self.get_transactions
self._blotter.cancel_order = self.cancel_order
super(LiveExecution, self).__track_orders__()
@property
def blotter(self):
return self._blotter
def __ib_to_aleph_sym_map__(self, contract):
decade = dt.date.today().strftime('%y')[0]
sym = contract.m_symbol
exp = contract.m_localSymbol.split(sym)[1]
exp = exp[0] + decade[0] + exp[1]
return (sym, exp)
def total_cash(self):
cash = 0
for acct in self.account.child_accounts:
try:
cash += float([x.value for x in self.account_details(acct)
if x.key == 'TotalCashValue'][0])
except:
return self.total_cash()
return cash
def ib_portfolio(self):
portfolio_store = zp.Portfolio()
positions_store = zp.Positions()
for acct in self.account.child_accounts:
positions = self.portfolio(acct)
for pos in positions:
# Skip empty requests
if hasattr(pos, 'contract'):
contract = pos.contract
# determine position sid
if contract.m_secType == 'STK':
sid = contract.m_localSymbol
if contract.m_secType == 'FUT':
sid = self.__ib_to_aleph_sym_map__(contract)
# if sid not in positions create a new position object
if sid not in positions_store:
if type(sid) is tuple:
positions_store[sid] = zp.Position(sid[0], contract=sid[1])
else:
positions_store[sid] = zp.Position(sid)
positions_store[sid].amount = pos.position_size
positions_store[sid].last_sale_price = pos.market_price
positions_store[sid].cost_basis = pos.avg_cost
else:
current_size = positions_store[sid].amount
# adjust cost basis:
# this should never result in a different value unless
# IB doesn't enforce best execution
positions_store[sid].amount += pos.position_size
if positions_store[sid].amount != 0:
mkt_value = positions_store[sid].cost_basis * current_size
added_value = pos.avg_cost * pos.position_size
positions_store[sid].cost_basis = (mkt_value + added_value) / \
positions_store[sid].amount
portfolio_store.positions_value += pos.market_value
portfolio_store.pnl = pos.realized_pnl + pos.unrealized_pnl
portfolio_store.positions = positions_store
return portfolio_store
def get_transactions(self, event, orders):
import time
time.sleep(1)
efilter = ExecutionFilter()
efilter.m_symbol = event.sid
for order in orders:
# Todo: I need to refactor how executions are summoned, this is currently a huge bottleneck
# cycle through all executions matching the event sid
for execution in self.executions(efilter):
prior_execution = None
# further filter out any executions not matching the order.id
if execution.m_orderRef == order.id:
# prevent processing of duplicate executions
if execution != prior_execution:
order_status_vals = (0, 0)
# cycle through the order status messages to get transaction details
for status in self.order_status(execution.m_orderId):
# filter out duplicate transaction messages
if (status['remaining'], status['filled']) != order_status_vals:
# get execution date
date = dt.datetime.strptime(execution.m_time,
'%Y%m%d %H:%M:%S').replace(tzinfo=pytz.utc)
amount = status['filled'] - order_status_vals[1]
txn = {'sid': event.sid,
'amount': int(amount),
'dt': date,
'price': status['lastFillPrice'],
'order_id': order.id}
transaction = Transaction(**txn)
order_status_vals = (status['remaining'], status['filled'])
#TODO: pretty sure there is still transactions are being duplicated still
if order.status == ORDER_STATUS.OPEN:
yield order, transaction
prior_execution = execution
|
omnidet/train_semantic.py | AtlasGooo2/WoodScape | 348 | 12773651 | <filename>omnidet/train_semantic.py
"""
Semantic segmentation training for OmniDet.
# author: <NAME> <<EMAIL>>
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; Authors provide no warranty with the software
and are not liable for anything.
"""
import time
import numpy as np
import torch
from torch.utils.data import DataLoader
from data_loader.woodscape_loader import WoodScapeRawDataset
from losses.semantic_loss import CrossEntropyLoss2d, FocalLoss
from models.resnet import ResnetEncoder
from models.semantic_decoder import SemanticDecoder
from utils import TrainUtils, semantic_color_encoding, IoU
class SemanticInit(TrainUtils):
def __init__(self, args):
super().__init__(args)
semantic_class_weights = dict(
woodscape_enet=([3.25, 2.33, 20.42, 30.59, 38.4, 45.73, 10.76, 34.16, 44.3, 49.19]),
woodscape_mfb=(0.04, 0.03, 0.43, 0.99, 2.02, 4.97, 0.17, 1.01, 3.32, 20.35))
print(f"=> Setting Class weights based on: {args.semantic_class_weighting} \n"
f"=> {semantic_class_weights[args.semantic_class_weighting]}")
semantic_class_weights = torch.tensor(semantic_class_weights[args.semantic_class_weighting]).to(args.device)
# Setup Metrics
self.metric = IoU(args.semantic_num_classes, args.dataset, ignore_index=None)
if args.semantic_loss == "cross_entropy":
self.semantic_criterion = CrossEntropyLoss2d(weight=semantic_class_weights)
elif args.semantic_loss == "focal_loss":
self.semantic_criterion = FocalLoss(weight=semantic_class_weights, gamma=2, size_average=True)
self.best_semantic_iou = 0.0
self.alpha = 0.5 # to blend semantic predictions with color image
self.color_encoding = semantic_color_encoding(args)
class SemanticModel(SemanticInit):
def __init__(self, args):
super().__init__(args)
# --- Init model ---
self.models["encoder"] = ResnetEncoder(num_layers=self.args.network_layers, pretrained=True).to(self.device)
self.models["semantic"] = SemanticDecoder(self.models["encoder"].num_ch_enc,
n_classes=args.semantic_num_classes).to(self.device)
self.parameters_to_train += list(self.models["encoder"].parameters())
self.parameters_to_train += list(self.models["semantic"].parameters())
if args.use_multiple_gpu:
self.models["encoder"] = torch.nn.DataParallel(self.models["encoder"])
self.models["semantic"] = torch.nn.DataParallel(self.models["semantic"])
print(f"=> Training on the {self.args.dataset.upper()} dataset \n"
f"=> Training model named: {self.args.model_name} \n"
f"=> Models and tensorboard events files are saved to: {self.args.output_directory} \n"
f"=> Training is using the cuda device id: {self.args.cuda_visible_devices} \n"
f"=> Loading {self.args.dataset} training and validation dataset")
# --- Load Data ---
train_dataset = WoodScapeRawDataset(data_path=args.dataset_dir,
path_file=args.train_file,
is_train=True,
config=args)
self.train_loader = DataLoader(train_dataset,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.num_workers,
pin_memory=True,
drop_last=False)
val_dataset = WoodScapeRawDataset(data_path=args.dataset_dir,
path_file=args.val_file,
is_train=False,
config=args)
self.val_loader = DataLoader(val_dataset,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.num_workers,
pin_memory=True,
drop_last=True)
print(f"=> Total number of training examples: {len(train_dataset)} \n"
f"=> Total number of validation examples: {len(val_dataset)}")
self.num_total_steps = len(train_dataset) // args.batch_size * args.epochs
self.configure_optimizers()
if args.pretrained_weights:
self.load_model()
self.save_args()
if 'cuda' in self.device:
torch.cuda.synchronize()
def semantic_train(self):
for self.epoch in range(self.args.epochs):
# switch to train mode
self.set_train()
data_loading_time = 0
gpu_time = 0
before_op_time = time.time()
for batch_idx, inputs in enumerate(self.train_loader):
current_time = time.time()
data_loading_time += (current_time - before_op_time)
before_op_time = current_time
# -- PUSH INPUTS DICT TO DEVICE --
self.inputs_to_device(inputs)
features = self.models["encoder"](inputs["color_aug", 0, 0])
outputs = self.models["semantic"](features)
losses = dict()
losses["semantic_loss"] = self.semantic_criterion(outputs["semantic", 0],
inputs["semantic_labels", 0, 0])
# -- COMPUTE GRADIENT AND DO OPTIMIZER STEP --
self.optimizer.zero_grad()
losses["semantic_loss"].backward()
self.optimizer.step()
duration = time.time() - before_op_time
gpu_time += duration
if batch_idx % self.args.log_frequency == 0:
self.log_time(batch_idx, duration, losses["semantic_loss"].cpu().data, data_loading_time, gpu_time)
self.semantic_statistics("train", inputs, outputs, losses)
data_loading_time = 0
gpu_time = 0
self.step += 1
before_op_time = time.time()
# Validate on each step, save model on improvements
val_metrics = self.semantic_val()
print(self.epoch, "IoU:", val_metrics["mean_iou"])
if val_metrics["mean_iou"] >= self.best_semantic_iou:
print(f"=> Saving model weights with mean_iou of {val_metrics['mean_iou']:.3f} "
f"at step {self.step} on {self.epoch} epoch.")
self.best_semantic_iou = val_metrics["mean_iou"]
self.save_model()
self.lr_scheduler.step(val_metrics["mean_iou"])
print("Training complete!")
@torch.no_grad()
def semantic_val(self):
"""Validate the semantic model"""
self.set_eval()
losses = dict()
for inputs in self.val_loader:
self.inputs_to_device(inputs)
features = self.models["encoder"](inputs["color", 0, 0])
outputs = self.models["semantic"](features)
losses["semantic_loss"] = self.semantic_criterion(outputs["semantic", 0], inputs["semantic_labels", 0, 0])
_, predictions = torch.max(outputs["semantic", 0].data, 1)
self.metric.add(predictions, inputs["semantic_labels", 0, 0])
outputs["class_iou"], outputs["mean_iou"] = self.metric.value()
# Compute stats for the tensorboard
self.semantic_statistics("val", inputs, outputs, losses)
self.metric.reset()
del inputs, losses
self.set_train()
return outputs
def semantic_statistics(self, mode, inputs, outputs, losses) -> None:
writer = self.writers[mode]
for loss, value in losses.items():
writer.add_scalar(f"{loss}", value.mean(), self.step)
if mode == "val":
writer.add_scalar(f"mean_iou", outputs["mean_iou"], self.step)
for k, v in outputs["class_iou"].items():
writer.add_scalar(f"class_iou/{k}", v, self.step)
writer.add_scalar("learning_rate", self.optimizer.param_groups[0]['lr'], self.step)
for j in range(min(4, self.args.batch_size)): # write maximum of four images
if self.args.train == "semantic":
writer.add_image(f"color/{j}", inputs[("color", 0, 0)][j], self.step)
# Predictions is one-hot encoded with "num_classes" channels.
# Convert it to a single int using the indices where the maximum (1) occurs
_, predictions = torch.max(outputs["semantic", 0][j].data, 0)
predictions_gray = predictions.byte().squeeze().cpu().detach().numpy()
color_semantic = np.array(self.trans_pil(inputs[("color", 0, 0)].cpu()[j].data))
not_background = predictions_gray != 0
color_semantic[not_background, ...] = (color_semantic[not_background, ...] * (1 - self.alpha) +
self.color_encoding[predictions_gray[not_background]] * self.alpha)
writer.add_image(f"semantic_pred_0/{j}", color_semantic.transpose(2, 0, 1), self.step)
labels = inputs["semantic_labels", 0, 0][j].data
labels_gray = labels.byte().squeeze().cpu().detach().numpy()
labels_rgb = np.array(self.trans_pil(inputs[("color", 0, 0)].cpu()[j].data))
not_background = labels_gray != 0
labels_rgb[not_background, ...] = (labels_rgb[not_background, ...] * (1 - self.alpha) +
self.color_encoding[labels_gray[not_background]] * self.alpha)
writer.add_image(f"semantic_labels_0/{j}", labels_rgb.transpose(2, 0, 1), self.step)
|
marlo/crowdai_helpers.py | spMohanty/marlo | 214 | 12773693 | #!/usr/bin/env python
import os
import crowdai_api
########################################################################
# Instatiate Event Notifier
########################################################################
crowdai_events = crowdai_api.events.CrowdAIEvents()
class CrowdAIMarloEvents:
REQUEST_ENV_JOIN_TOKENS="marlo.events.REQUEST_JOIN_TOKENS"
END_OF_GRADING="marlo.events.END_OF_GRADING"
GAME_INIT="marlo.events.GAME_INIT"
ENV_RESET="marlo.events.ENV_RESET"
ENV_ACTION="marlo.events.ENV_ACTION"
STEP_REWARD="marlo.events.STEP_REWARD"
EPISODE_PENDING="marlo.events.EPISODE_PENDING"
EPISODE_INITIATED="marlo.events.EPISODE_INITIATED"
EPISODE_RUNNING="marlo.events.EPISODE_RUNNING"
EPISODE_DONE="marlo.events.EPISODE_DONE" #Episode Complete
EPISODE_ERROR="marlo.events.EPISODE_ERROR"
EVALUATION_PENDING="marlo.events.EVALUATION_PENDING"
EVALUATION_RUNNING="marlo.events.EVALUATION_RUNNING"
EVALUATION_ERROR="marlo.events.EVALUATION_ERROR"
EVALUATION_COMPLETE="marlo.events.EVALUATION_COMPLETE"
def is_grading():
"""Returns if the code is being executed inside the crowdAI evaluation
system.
:returns: bool
"""
return os.getenv("CROWDAI_IS_GRADING", False)
def evaluator_join_token(params={}):
"""Returns evaluator join tokens from the crowdAI evaluation system
:param params: a dictionary containing game params. Note that only a certain
subset of params will be considered by the grader. TODO: Add list
:type params: dict
:returns: a list of strings representing join tokens for all the agents
in a game; or marks the end of the evaluation
"""
crowdai_events = crowdai_api.CrowdAIEvents()
# Request a list of JOIN_TOKENS
response = crowdai_events.register_event(
event_type=crowdai_events.CROWDAI_EVENT_INFO,
message="",
payload={
"event_type": CrowdAIMarloEvents.REQUEST_ENV_JOIN_TOKENS,
"params": params
},
blocking=True
)
if not response:
register_end_of_grading(crowdai_events)
return response
def register_end_of_grading(crowdai_events):
"""Marks the end of an evaluation, and waits for the rest of the
evaluation system to complete the post processing.
:param crowdai_events: a crowdai events object
:type `crowdai_api.CrowdAIEvents` object
"""
crowdai_events.register_event(
event_type=crowdai_events.CROWDAI_EVENT_INFO,
message="",
payload={
"event_type": CrowdAIMarloEvents.END_OF_GRADING
},
blocking=True
)
class CrowdAiNotifier():
@staticmethod
def _send_notification(event_type, message, payload={}, blocking=False):
crowdai_events = crowdai_api.events.CrowdAIEvents()
default_payload = {"challenge_id": "MarLo"}
default_payload.update(payload)
crowdai_events.register_event(event_type, message, payload, blocking)
@staticmethod
def _game_init():
CrowdAiNotifier._send_notification(
event_type=crowdai_events.CROWDAI_EVENT_INFO,
message="Game Initialized",
payload={
"event_type" : CrowdAIMarloEvents.GAME_INIT
},
blocking=False)
@staticmethod
def _env_reset():
CrowdAiNotifier._send_notification(
event_type=crowdai_events.CROWDAI_EVENT_INFO,
message="Environment Reset",
payload={
"event_type" : CrowdAIMarloEvents.ENV_RESET
},
blocking=False)
@staticmethod
def _env_action(action):
CrowdAiNotifier._send_notification(
event_type=crowdai_events.CROWDAI_EVENT_INFO,
message="",
payload={
"event_type" : CrowdAIMarloEvents.ENV_ACTION,
"action": action
},
blocking=False)
@staticmethod
def _step_reward(reward):
CrowdAiNotifier._send_notification(
event_type=crowdai_events.CROWDAI_EVENT_INFO,
message="",
payload={
"event_type" : CrowdAIMarloEvents.STEP_REWARD,
"r":reward
},
blocking=False)
@staticmethod
def _episode_done():
CrowdAiNotifier._send_notification(
event_type=crowdai_events.CROWDAI_EVENT_INFO,
message="",
payload={
"event_type" : CrowdAIMarloEvents.EPISODE_DONE,
},
blocking=False)
@staticmethod
def _env_error(error_message):
CrowdAiNotifier._send_notification(
event_type=crowdai_events.CROWDAI_EVENT_ERROR,
message="execution_error",
payload={
"event_type" : CrowdAIMarloEvents.EPISODE_ERROR,
"error_message":error_message
},
blocking=False)
|
tfHub_sentence_similarity/tfHub_sentence_similarity.py | jae-yong-2/awesomeScripts | 245 | 12773723 | import re
import string
import tensorflow_hub as hub
from scipy.spatial.distance import cdist
module_url = "https://tfhub.dev/google/universal-sentence-encoder/4"
class SimilarityModel():
def __init__(self):
print("Loading model from tf hub...")
self.model = hub.load(module_url)
print("module %s loaded" % module_url)
def process_text(self, text):
'''Clean text by removing unnecessary characters and altering the format of words.'''
re_print = re.compile('[^%s]' % re.escape(string.printable))
text = text.lower()
text = re.sub(r"i'm", "i am", text)
text = re.sub(r"he's", "he is", text)
text = re.sub(r"she's", "she is", text)
text = re.sub(r"it's", "it is", text)
text = re.sub(r"that's", "that is", text)
text = re.sub(r"what's", "that is", text)
text = re.sub(r"where's", "where is", text)
text = re.sub(r"how's", "how is", text)
text = re.sub(r"\'ll", " will", text)
text = re.sub(r"\'ve", " have", text)
text = re.sub(r"\'re", " are", text)
text = re.sub(r"\'d", " would", text)
text = re.sub(r"\'re", " are", text)
text = re.sub(r"won't", "will not", text)
text = re.sub(r"can't", "cannot", text)
text = re.sub(r"n't", " not", text)
text = re.sub(r"n'", "ng", text)
text = re.sub(r"'bout", "about", text)
text = re.sub(r"'til", "until", text)
text = re.sub(r"[$-()\"#/@;:<>{}`+=~|.!?,'*-^]", "", text)
text = text.split()
text = [re_print.sub('', w) for w in text]
return ' '.join(text)
def similarity(self, sentence1, sentence2):
processed_sent1 = self.process_text(sentence1)
processed_sent2 = self.process_text(sentence2)
sent_vector1 = self.model([processed_sent1])
sent_vector2 = self.model([processed_sent2])
similarities = cdist(sent_vector1, sent_vector2, metric='cosine')
return similarities
if __name__ == "__main__":
sim_model = SimilarityModel()
sentence1 = "<NAME>"
sentence2 = "I want money"
distance = sim_model.similarity(sentence1, sentence2)
print("Similarity score is: ", 1 - distance[0][0])
|
examples/pytorch/dimenet/modules/embedding_block.py | ketyi/dgl | 9,516 | 12773725 | import numpy as np
import torch
import torch.nn as nn
from modules.envelope import Envelope
from modules.initializers import GlorotOrthogonal
class EmbeddingBlock(nn.Module):
def __init__(self,
emb_size,
num_radial,
bessel_funcs,
cutoff,
envelope_exponent,
num_atom_types=95,
activation=None):
super(EmbeddingBlock, self).__init__()
self.bessel_funcs = bessel_funcs
self.cutoff = cutoff
self.activation = activation
self.envelope = Envelope(envelope_exponent)
self.embedding = nn.Embedding(num_atom_types, emb_size)
self.dense_rbf = nn.Linear(num_radial, emb_size)
self.dense = nn.Linear(emb_size * 3, emb_size)
self.reset_params()
def reset_params(self):
nn.init.uniform_(self.embedding.weight, a=-np.sqrt(3), b=np.sqrt(3))
GlorotOrthogonal(self.dense_rbf.weight)
GlorotOrthogonal(self.dense.weight)
def edge_init(self, edges):
""" msg emb init """
# m init
rbf = self.dense_rbf(edges.data['rbf'])
if self.activation is not None:
rbf = self.activation(rbf)
m = torch.cat([edges.src['h'], edges.dst['h'], rbf], dim=-1)
m = self.dense(m)
if self.activation is not None:
m = self.activation(m)
# rbf_env init
d_scaled = edges.data['d'] / self.cutoff
rbf_env = [f(d_scaled) for f in self.bessel_funcs]
rbf_env = torch.stack(rbf_env, dim=1)
d_cutoff = self.envelope(d_scaled)
rbf_env = d_cutoff[:, None] * rbf_env
return {'m': m, 'rbf_env': rbf_env}
def forward(self, g):
g.ndata['h'] = self.embedding(g.ndata['Z'])
g.apply_edges(self.edge_init)
return g |
malspider_django/dashboard/management/commands/del_alerts.py | andrewhenke/malspider | 453 | 12773741 | #
# Copyright (c) 2016-present, Cisco Systems, Inc. All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
#
from django.core.management.base import BaseCommand, CommandError
from dashboard.models import Alert
from django.db.models import Q
class Command(BaseCommand):
help = 'Removes ALL alerts.'
def handle(self, *args, **options):
Alert.objects.all().delete()
|
alipay/aop/api/response/AlipayMarketingRecruitPlanQueryResponse.py | antopen/alipay-sdk-python-all | 213 | 12773752 | <reponame>antopen/alipay-sdk-python-all<gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.RecruitEnrollRule import RecruitEnrollRule
class AlipayMarketingRecruitPlanQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayMarketingRecruitPlanQueryResponse, self).__init__()
self._description = None
self._enroll_end_time = None
self._enroll_rules = None
self._enroll_start_time = None
self._logo = None
self._plan_id = None
self._plan_name = None
self._status = None
@property
def description(self):
return self._description
@description.setter
def description(self, value):
self._description = value
@property
def enroll_end_time(self):
return self._enroll_end_time
@enroll_end_time.setter
def enroll_end_time(self, value):
self._enroll_end_time = value
@property
def enroll_rules(self):
return self._enroll_rules
@enroll_rules.setter
def enroll_rules(self, value):
if isinstance(value, list):
self._enroll_rules = list()
for i in value:
if isinstance(i, RecruitEnrollRule):
self._enroll_rules.append(i)
else:
self._enroll_rules.append(RecruitEnrollRule.from_alipay_dict(i))
@property
def enroll_start_time(self):
return self._enroll_start_time
@enroll_start_time.setter
def enroll_start_time(self, value):
self._enroll_start_time = value
@property
def logo(self):
return self._logo
@logo.setter
def logo(self, value):
self._logo = value
@property
def plan_id(self):
return self._plan_id
@plan_id.setter
def plan_id(self, value):
self._plan_id = value
@property
def plan_name(self):
return self._plan_name
@plan_name.setter
def plan_name(self, value):
self._plan_name = value
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
def parse_response_content(self, response_content):
response = super(AlipayMarketingRecruitPlanQueryResponse, self).parse_response_content(response_content)
if 'description' in response:
self.description = response['description']
if 'enroll_end_time' in response:
self.enroll_end_time = response['enroll_end_time']
if 'enroll_rules' in response:
self.enroll_rules = response['enroll_rules']
if 'enroll_start_time' in response:
self.enroll_start_time = response['enroll_start_time']
if 'logo' in response:
self.logo = response['logo']
if 'plan_id' in response:
self.plan_id = response['plan_id']
if 'plan_name' in response:
self.plan_name = response['plan_name']
if 'status' in response:
self.status = response['status']
|
examples/colors.py | edouard-lopez/colorful | 517 | 12773862 | # -*- coding: utf-8 -*-
"""
colorful
~~~~~~~~
Terminal string styling done right, in Python.
:copyright: (c) 2017 by <NAME> <<EMAIL>>
:license: MIT, see LICENSE for more details.
"""
import sys
import colorful
def show():
"""
Show the modifiers and colors
"""
# modifiers
sys.stdout.write(colorful.bold('bold') + ' ')
sys.stdout.write(colorful.dimmed('dimmed') + ' ')
sys.stdout.write(colorful.italic('italic') + ' ')
sys.stdout.write(colorful.underlined('underlined') + ' ')
sys.stdout.write(colorful.inversed('inversed') + ' ')
sys.stdout.write(colorful.concealed('concealed') + ' ')
sys.stdout.write(colorful.struckthrough('struckthrough') + '\n')
# foreground colors
sys.stdout.write(colorful.red('red') + ' ')
sys.stdout.write(colorful.green('green') + ' ')
sys.stdout.write(colorful.yellow('yellow') + ' ')
sys.stdout.write(colorful.blue('blue') + ' ')
sys.stdout.write(colorful.magenta('magenta') + ' ')
sys.stdout.write(colorful.cyan('cyan') + ' ')
sys.stdout.write(colorful.white('white') + '\n')
# background colors
sys.stdout.write(colorful.on_red('red') + ' ')
sys.stdout.write(colorful.on_green('green') + ' ')
sys.stdout.write(colorful.on_yellow('yellow') + ' ')
sys.stdout.write(colorful.on_blue('blue') + ' ')
sys.stdout.write(colorful.on_magenta('magenta') + ' ')
sys.stdout.write(colorful.on_cyan('cyan') + ' ')
sys.stdout.write(colorful.on_white('white') + '\n')
if __name__ == '__main__':
show()
|
tests/python/test_jit_transform.py | ishine/aps | 117 | 12773903 | <gh_stars>100-1000
#!/usr/bin/env python
# Copyright 2021 <NAME>
# License: Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import pytest
import torch as th
from aps.io import read_audio
from aps.transform.utils import forward_stft, export_jit
from aps.transform import AsrTransform
egs1_wav = read_audio("tests/data/transform/egs1.wav", sr=16000)
@pytest.mark.parametrize("wav", [egs1_wav])
@pytest.mark.parametrize("feats", ["fbank-log-cmvn", "perturb-mfcc-aug-delta"])
def test_asr_transform_jit(wav, feats):
wav = th.from_numpy(wav[None, ...])
packed = forward_stft(wav,
400,
160,
mode="librosa",
window="hamm",
pre_emphasis=0.96,
center=False,
return_polar=False)
trans = AsrTransform(feats=feats,
stft_mode="librosa",
window="hamm",
frame_len=400,
frame_hop=160,
use_power=True,
pre_emphasis=0.96,
center=False,
aug_prob=0.5,
aug_mask_zero=False)
trans.eval()
scripted_trans = th.jit.script(export_jit(trans.transform))
ref_out = trans(wav, None)[0]
jit_out = scripted_trans(packed)
th.testing.assert_allclose(ref_out, jit_out)
if __name__ == "__main__":
test_asr_transform_jit(egs1_wav, "fbank-log-cmvn")
|
src/transformers/models/roformer/tokenization_utils.py | liminghao1630/transformers | 50,404 | 12773926 | <filename>src/transformers/models/roformer/tokenization_utils.py
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization utils for RoFormer."""
from typing import List
from tokenizers import NormalizedString, PreTokenizedString, normalizers
class JiebaPreTokenizer:
def __init__(self, vocab) -> None:
self.vocab = vocab
self.normalizers = normalizers.BertNormalizer(
clean_text=False,
handle_chinese_chars=True,
strip_accents=False,
lowercase=False,
)
try:
import rjieba
except ImportError:
raise ImportError(
"You need to install rjieba to use RoFormerTokenizer. "
"See https://pypi.org/project/rjieba/ for installation."
)
self.jieba = rjieba
def jieba_split(self, i: int, normalized_string: NormalizedString) -> List[NormalizedString]:
splits = []
# this code slice normalized_string is too slow (6s) but test_alignement_methods can pass
for token, start, end in self.jieba.tokenize(str(normalized_string), hmm=False):
if token in self.vocab:
splits.append(normalized_string[start:end])
else:
token_list = self.normalizers.normalize_str(token).split()
for token in token_list:
if token:
end = start + len(token)
splits.append(normalized_string[start:end])
start = end
# this code test_alignement_methods can't pass but fast (300ms)
# for token in self.jieba.cut(str(normalized_string), False):
# if token in self.vocab:
# splits.append(NormalizedString(token))
# else:
# token_list = self.normalizers.normalize_str(token).split()
# for token in token_list:
# if token:
# splits.append(NormalizedString(token))
return splits
def pre_tokenize(self, pretok: PreTokenizedString):
pretok.split(self.jieba_split)
|
zfs/posix/__init__.py | mcclung/zfsp | 600 | 12773930 | <gh_stars>100-1000
import logging
from .. import ondisk
from .. import datasets
from zfs.posix.attributes import POSIXAttrs_for
logger = logging.getLogger(__name__)
class PosixObject(object):
def __init__(self, dnode: ondisk.DNode, dataset: datasets.Dataset) -> None:
self.attrs = POSIXAttrs_for(dataset)(dnode.bonus)
self.dataset = dataset
self.dnode = dnode
from .posix_file import File
from .directory import Directory
|
ml_collections/config_dict/tests/field_reference_test.py | wyddmw/ViT-pytorch-1 | 311 | 12773966 | # Copyright 2021 The ML Collections Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for ml_collections.FieldReference."""
import operator
from absl.testing import absltest
from absl.testing import parameterized
import ml_collections
from ml_collections.config_dict import config_dict
class FieldReferenceTest(parameterized.TestCase):
def _test_binary_operator(self,
initial_value,
other_value,
op,
true_value,
new_initial_value,
new_true_value,
assert_fn=None):
"""Helper for testing binary operators.
Generally speaking this checks that:
1. `op(initial_value, other_value) COMP true_value`
2. `op(new_initial_value, other_value) COMP new_true_value
where `COMP` is the comparison function defined by `assert_fn`.
Args:
initial_value: Initial value for the `FieldReference`, this is the first
argument for the binary operator.
other_value: The second argument for the binary operator.
op: The binary operator.
true_value: The expected output of the binary operator.
new_initial_value: The value that the `FieldReference` is changed to.
new_true_value: The expected output of the binary operator after the
`FieldReference` has changed.
assert_fn: Function used to check the output values.
"""
if assert_fn is None:
assert_fn = self.assertEqual
ref = ml_collections.FieldReference(initial_value)
new_ref = op(ref, other_value)
assert_fn(new_ref.get(), true_value)
config = ml_collections.ConfigDict()
config.a = initial_value
config.b = other_value
config.result = op(config.get_ref('a'), config.b)
assert_fn(config.result, true_value)
config.a = new_initial_value
assert_fn(config.result, new_true_value)
def _test_unary_operator(self,
initial_value,
op,
true_value,
new_initial_value,
new_true_value,
assert_fn=None):
"""Helper for testing unary operators.
Generally speaking this checks that:
1. `op(initial_value) COMP true_value`
2. `op(new_initial_value) COMP new_true_value
where `COMP` is the comparison function defined by `assert_fn`.
Args:
initial_value: Initial value for the `FieldReference`, this is the first
argument for the unary operator.
op: The unary operator.
true_value: The expected output of the unary operator.
new_initial_value: The value that the `FieldReference` is changed to.
new_true_value: The expected output of the unary operator after the
`FieldReference` has changed.
assert_fn: Function used to check the output values.
"""
if assert_fn is None:
assert_fn = self.assertEqual
ref = ml_collections.FieldReference(initial_value)
new_ref = op(ref)
assert_fn(new_ref.get(), true_value)
config = ml_collections.ConfigDict()
config.a = initial_value
config.result = op(config.get_ref('a'))
assert_fn(config.result, true_value)
config.a = new_initial_value
assert_fn(config.result, new_true_value)
def testBasic(self):
ref = ml_collections.FieldReference(1)
self.assertEqual(ref.get(), 1)
def testGetRef(self):
config = ml_collections.ConfigDict()
config.a = 1.
config.b = config.get_ref('a') + 10
config.c = config.get_ref('b') + 10
self.assertEqual(config.c, 21.0)
def testFunction(self):
def fn(x):
return x + 5
config = ml_collections.ConfigDict()
config.a = 1
config.b = fn(config.get_ref('a'))
config.c = fn(config.get_ref('b'))
self.assertEqual(config.b, 6)
self.assertEqual(config.c, 11)
config.a = 2
self.assertEqual(config.b, 7)
self.assertEqual(config.c, 12)
def testCycles(self):
config = ml_collections.ConfigDict()
config.a = 1.
config.b = config.get_ref('a') + 10
config.c = config.get_ref('b') + 10
self.assertEqual(config.b, 11.0)
self.assertEqual(config.c, 21.0)
# Introduce a cycle
with self.assertRaisesRegex(config_dict.MutabilityError, 'cycle'):
config.a = config.get_ref('c') - 1.0
# Introduce a cycle on second operand
with self.assertRaisesRegex(config_dict.MutabilityError, 'cycle'):
config.a = ml_collections.FieldReference(5.0) + config.get_ref('c')
# We can create multiple FieldReferences that all point to the same object
l = [0]
config = ml_collections.ConfigDict()
config.a = l
config.b = l
config.c = config.get_ref('a') + ['c']
config.d = config.get_ref('b') + ['d']
self.assertEqual(config.c, [0, 'c'])
self.assertEqual(config.d, [0, 'd'])
# Make sure nothing was mutated
self.assertEqual(l, [0])
self.assertEqual(config.c, [0, 'c'])
config.a = [1]
config.b = [2]
self.assertEqual(l, [0])
self.assertEqual(config.c, [1, 'c'])
self.assertEqual(config.d, [2, 'd'])
@parameterized.parameters(
{
'initial_value': 1,
'other_value': 2,
'true_value': 3,
'new_initial_value': 10,
'new_true_value': 12
}, {
'initial_value': 2.0,
'other_value': 2.5,
'true_value': 4.5,
'new_initial_value': 3.7,
'new_true_value': 6.2
}, {
'initial_value': 'hello, ',
'other_value': 'world!',
'true_value': 'hello, world!',
'new_initial_value': 'foo, ',
'new_true_value': 'foo, world!'
}, {
'initial_value': ['hello'],
'other_value': ['world'],
'true_value': ['hello', 'world'],
'new_initial_value': ['foo'],
'new_true_value': ['foo', 'world']
}, {
'initial_value': ml_collections.FieldReference(10),
'other_value': ml_collections.FieldReference(5.0),
'true_value': 15.0,
'new_initial_value': 12,
'new_true_value': 17.0
}, {
'initial_value': config_dict.placeholder(float),
'other_value': 7.0,
'true_value': None,
'new_initial_value': 12,
'new_true_value': 19.0
}, {
'initial_value': 5.0,
'other_value': config_dict.placeholder(float),
'true_value': None,
'new_initial_value': 8.0,
'new_true_value': None
}, {
'initial_value': config_dict.placeholder(str),
'other_value': 'tail',
'true_value': None,
'new_initial_value': 'head',
'new_true_value': 'headtail'
})
def testAdd(self, initial_value, other_value, true_value, new_initial_value,
new_true_value):
self._test_binary_operator(initial_value, other_value, operator.add,
true_value, new_initial_value, new_true_value)
@parameterized.parameters(
{
'initial_value': 5,
'other_value': 3,
'true_value': 2,
'new_initial_value': -1,
'new_true_value': -4
}, {
'initial_value': 2.0,
'other_value': 2.5,
'true_value': -0.5,
'new_initial_value': 12.3,
'new_true_value': 9.8
}, {
'initial_value': set(['hello', 123, 4.5]),
'other_value': set([123]),
'true_value': set(['hello', 4.5]),
'new_initial_value': set([123]),
'new_true_value': set([])
}, {
'initial_value': ml_collections.FieldReference(10),
'other_value': ml_collections.FieldReference(5.0),
'true_value': 5.0,
'new_initial_value': 12,
'new_true_value': 7.0
}, {
'initial_value': config_dict.placeholder(float),
'other_value': 7.0,
'true_value': None,
'new_initial_value': 12,
'new_true_value': 5.0
})
def testSub(self, initial_value, other_value, true_value, new_initial_value,
new_true_value):
self._test_binary_operator(initial_value, other_value, operator.sub,
true_value, new_initial_value, new_true_value)
@parameterized.parameters(
{
'initial_value': 1,
'other_value': 2,
'true_value': 2,
'new_initial_value': 3,
'new_true_value': 6
}, {
'initial_value': 2.0,
'other_value': 2.5,
'true_value': 5.0,
'new_initial_value': 3.5,
'new_true_value': 8.75
}, {
'initial_value': ['hello'],
'other_value': 3,
'true_value': ['hello', 'hello', 'hello'],
'new_initial_value': ['foo'],
'new_true_value': ['foo', 'foo', 'foo']
}, {
'initial_value': ml_collections.FieldReference(10),
'other_value': ml_collections.FieldReference(5.0),
'true_value': 50.0,
'new_initial_value': 1,
'new_true_value': 5.0
}, {
'initial_value': config_dict.placeholder(float),
'other_value': 7.0,
'true_value': None,
'new_initial_value': 12,
'new_true_value': 84.0
})
def testMul(self, initial_value, other_value, true_value, new_initial_value,
new_true_value):
self._test_binary_operator(initial_value, other_value, operator.mul,
true_value, new_initial_value, new_true_value)
@parameterized.parameters(
{
'initial_value': 3,
'other_value': 2,
'true_value': 1.5,
'new_initial_value': 10,
'new_true_value': 5.0
}, {
'initial_value': 2.0,
'other_value': 2.5,
'true_value': 0.8,
'new_initial_value': 6.3,
'new_true_value': 2.52
}, {
'initial_value': ml_collections.FieldReference(10),
'other_value': ml_collections.FieldReference(5.0),
'true_value': 2.0,
'new_initial_value': 13,
'new_true_value': 2.6
}, {
'initial_value': config_dict.placeholder(float),
'other_value': 7.0,
'true_value': None,
'new_initial_value': 17.5,
'new_true_value': 2.5
})
def testTrueDiv(self, initial_value, other_value, true_value,
new_initial_value, new_true_value):
self._test_binary_operator(initial_value, other_value, operator.truediv,
true_value, new_initial_value, new_true_value)
@parameterized.parameters(
{
'initial_value': 3,
'other_value': 2,
'true_value': 1,
'new_initial_value': 7,
'new_true_value': 3
}, {
'initial_value': ml_collections.FieldReference(10),
'other_value': ml_collections.FieldReference(5),
'true_value': 2,
'new_initial_value': 28,
'new_true_value': 5
}, {
'initial_value': config_dict.placeholder(int),
'other_value': 7,
'true_value': None,
'new_initial_value': 25,
'new_true_value': 3
})
def testFloorDiv(self, initial_value, other_value, true_value,
new_initial_value, new_true_value):
self._test_binary_operator(initial_value, other_value, operator.floordiv,
true_value, new_initial_value, new_true_value)
@parameterized.parameters(
{
'initial_value': 3,
'other_value': 2,
'true_value': 9,
'new_initial_value': 10,
'new_true_value': 100
}, {
'initial_value': 2.7,
'other_value': 3.2,
'true_value': 24.0084457245,
'new_initial_value': 6.5,
'new_true_value': 399.321543621
}, {
'initial_value': ml_collections.FieldReference(10),
'other_value': ml_collections.FieldReference(5),
'true_value': 1e5,
'new_initial_value': 2,
'new_true_value': 32
}, {
'initial_value': config_dict.placeholder(float),
'other_value': 3.0,
'true_value': None,
'new_initial_value': 7.0,
'new_true_value': 343.0
})
def testPow(self, initial_value, other_value, true_value, new_initial_value,
new_true_value):
self._test_binary_operator(
initial_value,
other_value,
operator.pow,
true_value,
new_initial_value,
new_true_value,
assert_fn=self.assertAlmostEqual)
@parameterized.parameters(
{
'initial_value': 3,
'other_value': 2,
'true_value': 1,
'new_initial_value': 10,
'new_true_value': 0
}, {
'initial_value': 5.3,
'other_value': 3.2,
'true_value': 2.0999999999999996,
'new_initial_value': 77,
'new_true_value': 0.2
}, {
'initial_value': ml_collections.FieldReference(10),
'other_value': ml_collections.FieldReference(5),
'true_value': 0,
'new_initial_value': 32,
'new_true_value': 2
}, {
'initial_value': config_dict.placeholder(int),
'other_value': 7,
'true_value': None,
'new_initial_value': 25,
'new_true_value': 4
})
def testMod(self, initial_value, other_value, true_value, new_initial_value,
new_true_value):
self._test_binary_operator(
initial_value,
other_value,
operator.mod,
true_value,
new_initial_value,
new_true_value,
assert_fn=self.assertAlmostEqual)
@parameterized.parameters(
{
'initial_value': True,
'other_value': True,
'true_value': True,
'new_initial_value': False,
'new_true_value': False
}, {
'initial_value': ml_collections.FieldReference(False),
'other_value': ml_collections.FieldReference(False),
'true_value': False,
'new_initial_value': True,
'new_true_value': False
}, {
'initial_value': config_dict.placeholder(bool),
'other_value': True,
'true_value': None,
'new_initial_value': False,
'new_true_value': False
})
def testAnd(self, initial_value, other_value, true_value, new_initial_value,
new_true_value):
self._test_binary_operator(initial_value, other_value, operator.and_,
true_value, new_initial_value, new_true_value)
@parameterized.parameters(
{
'initial_value': False,
'other_value': False,
'true_value': False,
'new_initial_value': True,
'new_true_value': True
}, {
'initial_value': ml_collections.FieldReference(True),
'other_value': ml_collections.FieldReference(True),
'true_value': True,
'new_initial_value': False,
'new_true_value': True
}, {
'initial_value': config_dict.placeholder(bool),
'other_value': False,
'true_value': None,
'new_initial_value': True,
'new_true_value': True
})
def testOr(self, initial_value, other_value, true_value, new_initial_value,
new_true_value):
self._test_binary_operator(initial_value, other_value, operator.or_,
true_value, new_initial_value, new_true_value)
@parameterized.parameters(
{
'initial_value': False,
'other_value': True,
'true_value': True,
'new_initial_value': True,
'new_true_value': False
}, {
'initial_value': ml_collections.FieldReference(True),
'other_value': ml_collections.FieldReference(True),
'true_value': False,
'new_initial_value': False,
'new_true_value': True
}, {
'initial_value': config_dict.placeholder(bool),
'other_value': True,
'true_value': None,
'new_initial_value': True,
'new_true_value': False
})
def testXor(self, initial_value, other_value, true_value, new_initial_value,
new_true_value):
self._test_binary_operator(initial_value, other_value, operator.xor,
true_value, new_initial_value, new_true_value)
@parameterized.parameters(
{
'initial_value': 3,
'true_value': -3,
'new_initial_value': -22,
'new_true_value': 22
}, {
'initial_value': 15.3,
'true_value': -15.3,
'new_initial_value': -0.2,
'new_true_value': 0.2
}, {
'initial_value': ml_collections.FieldReference(7),
'true_value': ml_collections.FieldReference(-7),
'new_initial_value': 123,
'new_true_value': -123
}, {
'initial_value': config_dict.placeholder(int),
'true_value': None,
'new_initial_value': -6,
'new_true_value': 6
})
def testNeg(self, initial_value, true_value, new_initial_value,
new_true_value):
self._test_unary_operator(initial_value, operator.neg, true_value,
new_initial_value, new_true_value)
@parameterized.parameters(
{
'initial_value': config_dict.create(attribute=2),
'true_value': 2,
'new_initial_value': config_dict.create(attribute=3),
'new_true_value': 3,
},
{
'initial_value': config_dict.create(attribute={'a': 1}),
'true_value': config_dict.create(a=1),
'new_initial_value': config_dict.create(attribute={'b': 1}),
'new_true_value': config_dict.create(b=1),
},
{
'initial_value':
ml_collections.FieldReference(config_dict.create(attribute=2)),
'true_value':
ml_collections.FieldReference(2),
'new_initial_value':
config_dict.create(attribute=3),
'new_true_value':
3,
},
{
'initial_value': config_dict.placeholder(config_dict.ConfigDict),
'true_value': None,
'new_initial_value': config_dict.create(attribute=3),
'new_true_value': 3,
},
)
def testAttr(self, initial_value, true_value, new_initial_value,
new_true_value):
self._test_unary_operator(initial_value, lambda x: x.attr('attribute'),
true_value, new_initial_value, new_true_value)
@parameterized.parameters(
{
'initial_value': 3,
'true_value': 3,
'new_initial_value': -101,
'new_true_value': 101
}, {
'initial_value': -15.3,
'true_value': 15.3,
'new_initial_value': 7.3,
'new_true_value': 7.3
}, {
'initial_value': ml_collections.FieldReference(-7),
'true_value': ml_collections.FieldReference(7),
'new_initial_value': 3,
'new_true_value': 3
}, {
'initial_value': config_dict.placeholder(float),
'true_value': None,
'new_initial_value': -6.25,
'new_true_value': 6.25
})
def testAbs(self, initial_value, true_value, new_initial_value,
new_true_value):
self._test_unary_operator(initial_value, operator.abs, true_value,
new_initial_value, new_true_value)
def testToInt(self):
self._test_unary_operator(25.3, lambda ref: ref.to_int(), 25, 27.9, 27)
ref = ml_collections.FieldReference(64.7)
ref = ref.to_int()
self.assertEqual(ref.get(), 64)
self.assertEqual(ref._field_type, int)
def testToFloat(self):
self._test_unary_operator(12, lambda ref: ref.to_float(), 12.0, 0, 0.0)
ref = ml_collections.FieldReference(647)
ref = ref.to_float()
self.assertEqual(ref.get(), 647.0)
self.assertEqual(ref._field_type, float)
def testToString(self):
self._test_unary_operator(12, lambda ref: ref.to_str(), '12', 0, '0')
ref = ml_collections.FieldReference(647)
ref = ref.to_str()
self.assertEqual(ref.get(), '647')
self.assertEqual(ref._field_type, str)
def testSetValue(self):
ref = ml_collections.FieldReference(1.0)
other = ml_collections.FieldReference(3)
ref_plus_other = ref + other
self.assertEqual(ref_plus_other.get(), 4.0)
ref.set(2.5)
self.assertEqual(ref_plus_other.get(), 5.5)
other.set(110)
self.assertEqual(ref_plus_other.get(), 112.5)
# Type checking
with self.assertRaises(TypeError):
other.set('this is a string')
with self.assertRaises(TypeError):
other.set(ml_collections.FieldReference('this is a string'))
with self.assertRaises(TypeError):
other.set(ml_collections.FieldReference(None, field_type=str))
def testSetResult(self):
ref = ml_collections.FieldReference(1.0)
result = ref + 1.0
second_result = result + 1.0
self.assertEqual(ref.get(), 1.0)
self.assertEqual(result.get(), 2.0)
self.assertEqual(second_result.get(), 3.0)
ref.set(2.0)
self.assertEqual(ref.get(), 2.0)
self.assertEqual(result.get(), 3.0)
self.assertEqual(second_result.get(), 4.0)
result.set(4.0)
self.assertEqual(ref.get(), 2.0)
self.assertEqual(result.get(), 4.0)
self.assertEqual(second_result.get(), 5.0)
# All references are broken at this point.
ref.set(1.0)
self.assertEqual(ref.get(), 1.0)
self.assertEqual(result.get(), 4.0)
self.assertEqual(second_result.get(), 5.0)
def testTypeChecking(self):
ref = ml_collections.FieldReference(1)
string_ref = ml_collections.FieldReference('a')
x = ref + string_ref
with self.assertRaises(TypeError):
x.get()
def testNoType(self):
self.assertRaisesRegex(TypeError, 'field_type should be a type.*',
ml_collections.FieldReference, None, 0)
def testEqual(self):
# Simple case
ref1 = ml_collections.FieldReference(1)
ref2 = ml_collections.FieldReference(1)
ref3 = ml_collections.FieldReference(2)
self.assertEqual(ref1, 1)
self.assertEqual(ref1, ref1)
self.assertEqual(ref1, ref2)
self.assertNotEqual(ref1, 2)
self.assertNotEqual(ref1, ref3)
# ConfigDict inside FieldReference
ref1 = ml_collections.FieldReference(ml_collections.ConfigDict({'a': 1}))
ref2 = ml_collections.FieldReference(ml_collections.ConfigDict({'a': 1}))
ref3 = ml_collections.FieldReference(ml_collections.ConfigDict({'a': 2}))
self.assertEqual(ref1, ml_collections.ConfigDict({'a': 1}))
self.assertEqual(ref1, ref1)
self.assertEqual(ref1, ref2)
self.assertNotEqual(ref1, ml_collections.ConfigDict({'a': 2}))
self.assertNotEqual(ref1, ref3)
def testLessEqual(self):
# Simple case
ref1 = ml_collections.FieldReference(1)
ref2 = ml_collections.FieldReference(1)
ref3 = ml_collections.FieldReference(2)
self.assertLessEqual(ref1, 1)
self.assertLessEqual(ref1, 2)
self.assertLessEqual(0, ref1)
self.assertLessEqual(1, ref1)
self.assertGreater(ref1, 0)
self.assertLessEqual(ref1, ref1)
self.assertLessEqual(ref1, ref2)
self.assertLessEqual(ref1, ref3)
self.assertGreater(ref3, ref1)
def testControlFlowError(self):
ref1 = ml_collections.FieldReference(True)
ref2 = ml_collections.FieldReference(False)
with self.assertRaises(NotImplementedError):
if ref1:
pass
with self.assertRaises(NotImplementedError):
_ = ref1 and ref2
with self.assertRaises(NotImplementedError):
_ = ref1 or ref2
with self.assertRaises(NotImplementedError):
_ = not ref1
if __name__ == '__main__':
absltest.main()
|
OpenGL/lazywrapper.py | t20100/pyopengl | 210 | 12773977 | """Simplistic wrapper decorator for Python-coded wrappers"""
from OpenGL.latebind import Curry
from OpenGL import MODULE_ANNOTATIONS
class _LazyWrapper( Curry ):
"""Marker to tell us that an object is a lazy wrapper"""
def lazy( baseFunction ):
"""Produce a lazy-binding decorator that uses baseFunction
Allows simple implementation of wrappers where the
whole of the wrapper can be summed up as do 1 thing
then call base function with the cleaned up result.
Passes baseFunction in as the first argument of the
wrapped function, all other parameters are passed
unchanged. The wrapper class created has __nonzero__
and similar common wrapper entry points defined.
"""
def wrap( wrapper ):
"""Wrap wrapper with baseFunction"""
def __bool__( self ):
return bool( baseFunction )
def __repr__( self ):
return '%s( %r )'%(
'OpenGL.lazywrapper.lazy',
baseFunction.__name__,
)
_with_wrapper = type( wrapper.__name__, (_LazyWrapper,), {
'__repr__': __repr__,
'__doc__': wrapper.__doc__,
'__nonzero__': __bool__,
'__bool__': __bool__,
'wrappedOperation': baseFunction,
'restype': getattr(wrapper, 'restype',getattr(baseFunction,'restype',None)),
} )
with_wrapper = _with_wrapper(wrapper,baseFunction)
with_wrapper.__name__ = wrapper.__name__
if hasattr( baseFunction, '__module__' ):
with_wrapper.__module__ = baseFunction.__module__
return with_wrapper
return wrap
if __name__ == "__main__":
from OpenGL.raw import GLU
func = GLU.gluNurbsCallbackData
output = []
def testwrap( base ):
"Testing"
output.append( base )
testlazy = lazy( func )( testwrap )
testlazy( )
assert testlazy.__doc__ == "Testing"
assert testlazy.__class__.__name__ == 'testwrap'
assert testlazy.__name__ == 'testwrap'
assert testlazy.baseFunction is func
assert testlazy.wrapperFunction is testwrap
assert output
|
Tests/EndToEndTests/CNTKv2Python/Examples/ConvNet_CIFAR10_DataAug_test.py | shyamalschandra/CNTK | 17,702 | 12773997 | # Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file in the project root
# for full license information.
# ==============================================================================
import numpy as np
import os
import sys
from cntk.ops.tests.ops_test_utils import cntk_device
from cntk.cntk_py import DeviceKind_GPU
from cntk.device import try_set_default_device
import pytest
abs_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(abs_path)
sys.path.append(os.path.join(abs_path, "..", "..", "..", "..", "Examples", "Image", "Classification", "ConvNet", "Python"))
from prepare_test_data import prepare_CIFAR10_data
from ConvNet_CIFAR10_DataAug import *
TOLERANCE_ABSOLUTE = 1e-1
def test_cifar_convnet_error(device_id):
if cntk_device(device_id).type() != DeviceKind_GPU:
pytest.skip('test only runs on GPU')
try_set_default_device(cntk_device(device_id))
base_path = prepare_CIFAR10_data()
# change dir to locate data.zip correctly
os.chdir(base_path)
from _cntk_py import set_fixed_random_seed, force_deterministic_algorithms
set_fixed_random_seed(1)
force_deterministic_algorithms()
reader_train = create_reader(os.path.join(base_path, 'train_map.txt'), os.path.join(base_path, 'CIFAR-10_mean.xml'), False)
model = create_convnet_cifar10_model(num_classes=10)
model.update_signature((num_channels, image_height, image_width))
criterion = create_criterion_function(model, normalize=lambda x: x / 256)
train_loss, metric = train_model(reader_train, model, criterion, epoch_size=128, max_epochs=5)
expected_loss_metric = (2.2963, 0.9062)
assert np.allclose((train_loss, metric), expected_loss_metric, atol=TOLERANCE_ABSOLUTE)
if __name__=='__main__':
test_cifar_convnet_error(0)
|
config/common/all_params.py | leozz37/makani | 1,178 | 12774012 | # Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""All parameters."""
from makani.config import mconfig
@mconfig.Config(deps={
'control': 'common.control.control_params',
'monitor': 'common.monitor.monitor_params',
'sim': 'common.sim.sim_params',
'system': mconfig.WING_MODEL + '.system_params'
})
def MakeParams(params):
return {
'control': params['control'],
'monitor': params['monitor'],
'sim': params['sim'],
'system': params['system']
}
|
train/new_train.py | zeroAska/TFSegmentation | 633 | 12774016 | <filename>train/new_train.py
"""
New trainer faster than ever
"""
from metrics.metrics import Metrics
from utils.reporter import Reporter
from utils.misc import timeit
from tqdm import tqdm
import numpy as np
import tensorflow as tf
import matplotlib
import time
matplotlib.use('Agg')
import matplotlib.pyplot as plt
class NewTrain(object):
def __init__(self, args, sess, model):
print("\nTraining is initializing itself\n")
self.args = args
self.sess = sess
self.model = model
# shortcut for model params
self.params = self.model.params
# To initialize all variables
self.init = None
self.init_model()
# Create a saver object
self.saver = tf.train.Saver(max_to_keep=self.args.max_to_keep,
keep_checkpoint_every_n_hours=10,
save_relative_paths=True)
self.saver_best = tf.train.Saver(max_to_keep=1,
save_relative_paths=True)
# Load from latest checkpoint if found
self.load_model()
##################################################################################
# Init summaries
# Summary variables
self.scalar_summary_tags = ['mean_iou_on_val',
'train-loss-per-epoch', 'val-loss-per-epoch',
'train-acc-per-epoch', 'val-acc-per-epoch']
self.images_summary_tags = [
('train_prediction_sample', [None, self.params.img_height, self.params.img_width * 2, 3]),
('val_prediction_sample', [None, self.params.img_height, self.params.img_width * 2, 3])]
self.summary_tags = []
self.summary_placeholders = {}
self.summary_ops = {}
# init summaries and it's operators
self.init_summaries()
# Create summary writer
self.summary_writer = tf.summary.FileWriter(self.args.summary_dir, self.sess.graph)
##################################################################################
if self.args.mode == 'train':
self.num_iterations_training_per_epoch = self.args.tfrecord_train_len // self.args.batch_size
self.num_iterations_validation_per_epoch = self.args.tfrecord_val_len // self.args.batch_size
else:
self.test_data = None
self.test_data_len = None
self.num_iterations_testing_per_epoch = None
self.load_test_data()
##################################################################################
# Init metrics class
self.metrics = Metrics(self.args.num_classes)
# Init reporter class
if self.args.mode == 'train' or 'overfit':
self.reporter = Reporter(self.args.out_dir + 'report_train.json', self.args)
elif self.args.mode == 'test':
self.reporter = Reporter(self.args.out_dir + 'report_test.json', self.args)
##################################################################################
@timeit
def load_test_data(self):
print("Loading Testing data..")
self.test_data = {'X': np.load(self.args.data_dir + "X_val.npy"),
'Y': np.load(self.args.data_dir + "Y_val.npy")}
self.test_data_len = self.test_data['X'].shape[0] - self.test_data['X'].shape[0] % self.args.batch_size
print("Test-shape-x -- " + str(self.test_data['X'].shape))
print("Test-shape-y -- " + str(self.test_data['Y'].shape))
self.num_iterations_testing_per_epoch = (self.test_data_len + self.args.batch_size - 1) // self.args.batch_size
print("Test data is loaded")
@timeit
def init_model(self):
print("Initializing the variables of the model")
self.init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
self.sess.run(self.init)
print("Initialization finished")
def save_model(self):
"""
Save Model Checkpoint
:return:
"""
print("saving a checkpoint")
self.saver.save(self.sess, self.args.checkpoint_dir, self.model.global_step_tensor)
print("Saved a checkpoint")
def save_best_model(self):
"""
Save BEST Model Checkpoint
:return:
"""
print("saving a checkpoint for the best model")
self.saver_best.save(self.sess, self.args.checkpoint_best_dir, self.model.global_step_tensor)
print("Saved a checkpoint for the best model")
def load_best_model(self):
"""
Load the best model checkpoint
:return:
"""
print("loading a checkpoint for BEST ONE")
latest_checkpoint = tf.train.latest_checkpoint(self.args.checkpoint_best_dir)
if latest_checkpoint:
print("Loading model checkpoint {} ...\n".format(latest_checkpoint))
self.saver_best.restore(self.sess, latest_checkpoint)
else:
print("ERROR NO best checkpoint found")
exit(-1)
print("BEST MODEL LOADED..")
def init_summaries(self):
"""
Create the summary part of the graph
:return:
"""
with tf.variable_scope('train-summary-per-epoch'):
for tag in self.scalar_summary_tags:
self.summary_tags += tag
self.summary_placeholders[tag] = tf.placeholder('float32', None, name=tag)
self.summary_ops[tag] = tf.summary.scalar(tag, self.summary_placeholders[tag])
for tag, shape in self.images_summary_tags:
self.summary_tags += tag
self.summary_placeholders[tag] = tf.placeholder('float32', shape, name=tag)
self.summary_ops[tag] = tf.summary.image(tag, self.summary_placeholders[tag], max_outputs=10)
def add_summary(self, step, summaries_dict=None, summaries_merged=None):
"""
Add the summaries to tensorboard
:param step:
:param summaries_dict:
:param summaries_merged:
:return:
"""
if summaries_dict is not None:
summary_list = self.sess.run([self.summary_ops[tag] for tag in summaries_dict.keys()],
{self.summary_placeholders[tag]: value for tag, value in
summaries_dict.items()})
for summary in summary_list:
self.summary_writer.add_summary(summary, step)
if summaries_merged is not None:
self.summary_writer.add_summary(summaries_merged, step)
@timeit
def load_model(self):
"""
Load the latest checkpoint
:return:
"""
try:
# This is for loading the pretrained weights if they can't be loaded during initialization.
self.model.encoder.load_pretrained_weights(self.sess)
except AttributeError:
pass
print("Searching for a checkpoint")
latest_checkpoint = tf.train.latest_checkpoint(self.args.checkpoint_dir)
if latest_checkpoint:
print("Loading model checkpoint {} ...\n".format(latest_checkpoint))
self.saver.restore(self.sess, latest_checkpoint)
print("Model loaded from the latest checkpoint\n")
else:
print("\n.. No ckpt, SO First time to train :D ..\n")
def train(self):
print("Training mode will begin NOW ..")
tf.train.start_queue_runners(sess=self.sess)
curr_lr = self.model.args.learning_rate
for cur_epoch in range(self.model.global_epoch_tensor.eval(self.sess) + 1, self.args.num_epochs + 1, 1):
# init tqdm and get the epoch value
tt = tqdm(range(self.num_iterations_training_per_epoch), total=self.num_iterations_training_per_epoch,
desc="epoch-" + str(cur_epoch) + "-")
# init acc and loss lists
loss_list = []
acc_list = []
# loop by the number of iterations
for cur_iteration in tt:
# get the cur_it for the summary
cur_it = self.model.global_step_tensor.eval(self.sess)
# Feed this variables to the network
feed_dict = {
self.model.handle: self.model.training_handle,
self.model.is_training: True,
self.model.curr_learning_rate: curr_lr
}
# Run the feed forward but the last iteration finalize what you want to do
if cur_iteration < self.num_iterations_training_per_epoch - 1:
# run the feed_forward
_, loss, acc, summaries_merged = self.sess.run(
[self.model.train_op, self.model.loss, self.model.accuracy, self.model.merged_summaries],
feed_dict=feed_dict)
# log loss and acc
loss_list += [loss]
acc_list += [acc]
# summarize
self.add_summary(cur_it, summaries_merged=summaries_merged)
else:
# run the feed_forward
_, loss, acc, summaries_merged, segmented_imgs = self.sess.run(
[self.model.train_op, self.model.loss, self.model.accuracy,
self.model.merged_summaries, self.model.segmented_summary],
feed_dict=feed_dict)
# log loss and acc
loss_list += [loss]
acc_list += [acc]
total_loss = np.mean(loss_list)
total_acc = np.mean(acc_list)
# summarize
summaries_dict = dict()
summaries_dict['train-loss-per-epoch'] = total_loss
summaries_dict['train-acc-per-epoch'] = total_acc
summaries_dict['train_prediction_sample'] = segmented_imgs
self.add_summary(cur_it, summaries_dict=summaries_dict, summaries_merged=summaries_merged)
# report
self.reporter.report_experiment_statistics('train-acc', 'epoch-' + str(cur_epoch), str(total_acc))
self.reporter.report_experiment_statistics('train-loss', 'epoch-' + str(cur_epoch), str(total_loss))
self.reporter.finalize()
# Update the Global step
self.model.global_step_assign_op.eval(session=self.sess,
feed_dict={self.model.global_step_input: cur_it + 1})
# Update the Cur Epoch tensor
# it is the last thing because if it is interrupted it repeat this
self.model.global_epoch_assign_op.eval(session=self.sess,
feed_dict={self.model.global_epoch_input: cur_epoch + 1})
# print in console
tt.close()
print("epoch-" + str(cur_epoch) + "-" + "loss:" + str(total_loss) + "-" + " acc:" + str(total_acc)[
:6])
# Break the loop to finalize this epoch
break
# Update the Global step
self.model.global_step_assign_op.eval(session=self.sess,
feed_dict={self.model.global_step_input: cur_it + 1})
# Save the current checkpoint
if cur_epoch % self.args.save_every == 0:
self.save_model()
# Test the model on validation
if cur_epoch % self.args.test_every == 0:
self.test_per_epoch(step=self.model.global_step_tensor.eval(self.sess),
epoch=self.model.global_epoch_tensor.eval(self.sess))
if cur_epoch % self.args.learning_decay_every == 0:
curr_lr = curr_lr * self.args.learning_decay
print('Current learning rate is ', curr_lr)
print("Training Finished")
def test_per_epoch(self, step, epoch):
print("Validation at step:" + str(step) + " at epoch:" + str(epoch) + " ..")
# init tqdm and get the epoch value
tt = tqdm(range(self.num_iterations_validation_per_epoch), total=self.num_iterations_validation_per_epoch,
desc="Val-epoch-" + str(epoch) + "-")
# init acc and loss lists
loss_list = []
acc_list = []
inf_list = []
# reset metrics
self.metrics.reset()
# get the maximum iou to compare with and save the best model
max_iou = self.model.best_iou_tensor.eval(self.sess)
# init dataset to validation
self.sess.run(self.model.validation_iterator.initializer)
# loop by the number of iterations
for cur_iteration in tt:
# Feed this variables to the network
feed_dict = {
self.model.handle: self.model.validation_handle,
self.model.is_training: False
}
# Run the feed forward but the last iteration finalize what you want to do
if cur_iteration < self.num_iterations_validation_per_epoch - 1:
start = time.time()
# run the feed_forward
next_img, out_argmax, loss, acc = self.sess.run(
[self.model.next_img, self.model.out_argmax, self.model.loss, self.model.accuracy],
feed_dict=feed_dict)
end = time.time()
# log loss and acc
loss_list += [loss]
acc_list += [acc]
inf_list += [end - start]
# log metrics
self.metrics.update_metrics_batch(out_argmax, next_img[1])
else:
start = time.time()
# run the feed_forward
next_img, out_argmax, loss, acc, segmented_imgs = self.sess.run(
[self.model.next_img, self.model.out_argmax, self.model.loss, self.model.accuracy,
self.model.segmented_summary],
feed_dict=feed_dict)
end = time.time()
# log loss and acc
loss_list += [loss]
acc_list += [acc]
inf_list += [end - start]
# log metrics
self.metrics.update_metrics_batch(out_argmax, next_img[1])
# mean over batches
total_loss = np.mean(loss_list)
total_acc = np.mean(acc_list)
mean_iou = self.metrics.compute_final_metrics(self.num_iterations_validation_per_epoch)
mean_iou_arr = self.metrics.iou
mean_inference = str(np.mean(inf_list)) + '-seconds'
# summarize
summaries_dict = dict()
summaries_dict['val-loss-per-epoch'] = total_loss
summaries_dict['val-acc-per-epoch'] = total_acc
summaries_dict['mean_iou_on_val'] = mean_iou
summaries_dict['val_prediction_sample'] = segmented_imgs
self.add_summary(step, summaries_dict=summaries_dict)
self.summary_writer.flush()
# report
self.reporter.report_experiment_statistics('validation-acc', 'epoch-' + str(epoch), str(total_acc))
self.reporter.report_experiment_statistics('validation-loss', 'epoch-' + str(epoch), str(total_loss))
self.reporter.report_experiment_statistics('avg_inference_time_on_validation', 'epoch-' + str(epoch),
str(mean_inference))
self.reporter.report_experiment_validation_iou('epoch-' + str(epoch), str(mean_iou), mean_iou_arr)
self.reporter.finalize()
# print in console
tt.close()
print("Val-epoch-" + str(epoch) + "-" + "loss:" + str(total_loss) + "-" +
"acc:" + str(total_acc)[:6] + "-mean_iou:" + str(mean_iou))
print("Last_max_iou: " + str(max_iou))
if mean_iou > max_iou:
print("This validation got a new best iou. so we will save this one")
# save the best model
self.save_best_model()
# Set the new maximum
self.model.best_iou_assign_op.eval(session=self.sess,
feed_dict={self.model.best_iou_input: mean_iou})
else:
print("hmm not the best validation epoch :/..")
# Break the loop to finalize this epoch
break
def test(self):
print("Testing mode will begin NOW..")
# load the best model checkpoint to test on it
self.load_best_model()
# init tqdm and get the epoch value
tt = tqdm(range(self.test_data_len))
naming = np.load(self.args.data_dir + 'names_train.npy')
# init acc and loss lists
loss_list = []
acc_list = []
img_list = []
# idx of image
idx = 0
# reset metrics
self.metrics.reset()
# loop by the number of iterations
for cur_iteration in tt:
# load mini_batches
x_batch = self.test_data['X'][idx:idx + 1]
y_batch = self.test_data['Y'][idx:idx + 1]
# update idx of mini_batch
idx += 1
# Feed this variables to the network
feed_dict = {self.model.x_pl: x_batch,
self.model.y_pl: y_batch,
self.model.is_training: False
}
# run the feed_forward
out_argmax, loss, acc, summaries_merged, segmented_imgs = self.sess.run(
[self.model.out_argmax, self.model.loss, self.model.accuracy,
self.model.merged_summaries, self.model.segmented_summary],
feed_dict=feed_dict)
np.save(self.args.out_dir + 'npy/' + str(cur_iteration) + '.npy', out_argmax[0])
plt.imsave(self.args.out_dir + 'imgs/' + 'test_' + str(cur_iteration) + '.png', segmented_imgs[0])
# log loss and acc
loss_list += [loss]
acc_list += [acc]
# log metrics
self.metrics.update_metrics(out_argmax[0], y_batch[0], 0, 0)
# mean over batches
total_loss = np.mean(loss_list)
total_acc = np.mean(acc_list)
mean_iou = self.metrics.compute_final_metrics(self.test_data_len)
# print in console
tt.close()
print("Here the statistics")
print("Total_loss: " + str(total_loss))
print("Total_acc: " + str(total_acc)[:6])
print("mean_iou: " + str(mean_iou))
print("Plotting imgs")
def finalize(self):
self.reporter.finalize()
self.summary_writer.close()
self.save_model()
|
algnuth/polynom.py | louisabraham/algnuth | 290 | 12774025 | """
Modular arithmetic
"""
from collections import defaultdict
import numpy as np
class ModInt:
"""
Integers of Z/pZ
"""
def __init__(self, a, n):
self.v = a % n
self.n = n
def __eq__(a, b):
if isinstance(b, ModInt):
return not bool(a - b)
else:
return NotImplemented
def __hash__(self):
return hash((self.v, self.n))
def __bool__(self):
return bool(self.v)
def __add__(a, b):
assert isinstance(b, ModInt)
assert a.n == b.n
return ModInt(a.v + b.v, a.n)
def __radd__(a, b):
assert isinstance(b, int)
return ModInt(a.v + b, a.n)
def __neg__(a): return ModInt(-a.v, a.n)
def __sub__(a, b): return ModInt(a.v - b.v, a.n)
def __mul__(a, b):
if isinstance(b, int):
return ModInt(b * a.v, a.n)
elif isinstance(b, ModInt):
assert a.n == b.n
return ModInt(a.v * b.v, a.n)
return NotImplemented
def __rmul__(a, b):
return a * b
def __pow__(P, k):
assert isinstance(k, int)
V = 1
A = P
while k:
if k & 1:
V *= A
k >>= 1
if not k:
break
A *= A
return V
def inv(self):
if self.v == 0:
raise ZeroDivisionError
return ModInt(ModInt._inv(self.v, self.n), self.n)
@staticmethod
def _inv(k, n):
k %= n
if k == 1:
return k
return (n - n // k) * ModInt._inv(n % k, n) % n
def __truediv__(a, b):
assert isinstance(b, ModInt)
assert a.n == b.n
return a * b.inv()
def __rtruediv__(a, k):
assert isinstance(k, int)
return ModInt(k, a.n) / a
@staticmethod
def extended_euclid(a, b):
"""Extended Euclid algorithm
Return
------
x : int
y : int
a * x + b * y = gcd(a, b)
"""
A, B = a, b
sa, sb = (1 if a >= 0 else -1), (1 if b >= 0 else -1)
xp, yp = 1, 0
x, y = 0, 1
while b:
assert A * xp + B * yp == a
assert A * x + B * y == b
r = a // b
a, b = b, a % b
x, xp = xp - r * x, x
y, yp = yp - r * y, y
return sa * xp, sb * yp
def __repr__(self):
return '%s(%s, %s)' % (self.__class__.__name__, self.v, self.n)
def __str__(self):
return '%s' % self.v
class Polynomial:
"""
Generic class for polynomials
Works with int, float and ModInt
"""
def __len__(self):
return len(self.C)
def trim(C):
i = len(C) - 1
while i >= 0 and not C[i]:
i -= 1
return C[:i + 1]
def __init__(self, C=None):
if C is None:
C = []
self.C = Polynomial.trim(C)
@property
def deg(self):
return len(self.C) - 1
def prime(self): return Polynomial([i * self[i]
for i in range(1, len(self))])
def eval(self, x):
if not self:
return 0
v = self[-1]
for c in self[-2::-1]:
v = v * x + c
return v
def shift(self, d): return Polynomial(
[0 * self[0]] * d + self.C if self else [])
def __eq__(P, Q):
return P.deg == Q.deg and all(cP == cQ for cP, cQ in zip(P, Q))
def __hash__(self):
return hash(tuple(self.C))
def __call__(self, x): return Polynomial.eval(self, x)
def __getitem__(self, x): return self.C[x]
def __neg__(P): return Polynomial([-c for c in P.C])
def __add__(P, Q):
if len(P.C) < len(Q.C):
P, Q = Q, P
return Polynomial([P[d] + Q[d] for d in range(len(Q))] + P[len(Q):])
def __sub__(P, Q): return P + (-Q)
def _mulpoly(P, Q):
assert isinstance(Q, Polynomial)
return Polynomial([sum(P[k] * Q[d - k]
for k in range(max(0, d + 1 - len(Q)),
min(d + 1, len(P)))
) for d in range(len(P) + len(Q) - 1)])
def _mulscal(P, k):
return Polynomial([k * c for c in P])
def __mul__(P, Q):
if isinstance(Q, Polynomial):
return P._mulpoly(Q)
return P._mulscal(Q)
def __rmul__(P, Q):
return P * Q
def __pow__(P, k):
assert isinstance(k, int)
V = 1
A = P
while k:
if k & 1:
V *= A
k >>= 1
if not k:
break
A *= A
return V
def __iter__(self):
yield from self.C
def euclidean_division(A, B):
Q = [0 * B[0]] * max(0, len(A) - len(B) + 1)
while len(A.C) >= len(B.C):
Q[len(A.C) - len(B.C)] = A[-1] / B[-1]
A -= B.shift(len(A) - len(B)) * (A[-1] / B[-1])
return Polynomial(Q), A
def __floordiv__(A, B):
assert isinstance(B, Polynomial)
return A.euclidean_division(B)[0]
def __mod__(A, B):
"""
Polynomial euclidian division
or modular reduction
"""
if isinstance(B, Polynomial):
return A.euclidean_division(B)[1]
else:
assert isinstance(B, int)
assert all(isinstance(c, int) for c in A)
return A.reduceP(B)
def __lt__(A, B): return A.deg < B.deg
def __bool__(self): return bool(self.C)
def gcd(A, B):
while B:
A, B = B, A % B
return A * (1 / A[-1])
@staticmethod
def gaussianElimKer(M, zero, one):
"""
Outputs an element of the kernel of M
zero and one are elements of the same field
"""
# V satisfies the invariant
# M = V M_0
V = [Polynomial([zero] * i + [one]) for i in range(len(M))]
pivots = [None] * (len(M) + 1)
for l in range(len(M)):
while M[l].deg >= 0:
idp = M[l].deg
if pivots[idp] is None:
pivots[idp] = l
break
else:
c = M[l][idp] / M[pivots[idp]][idp]
M[l] -= c * M[pivots[idp]]
V[l] -= c * V[pivots[idp]]
else:
# If a line is null, we found an element of the kernel
return V[l]
return None
def computeQ(P):
# only for Z/pZ[X] square-free polynoms, for p prime
p = P[0].n
# We ignore the image of 1 because (F-Id)(1) = 0
M = [Polynomial(([ModInt(0, p)] * (i * p)) + [ModInt(1, p)]) % P
for i in range(1, P.deg)]
# M -= Id
for i in range(1, P.deg):
M[i - 1] -= Polynomial([ModInt(0, p)] * i + [ModInt(1, p)])
# We find an element of the kernel by Gaussian elimination
pQ = Polynomial.gaussianElimKer(M, ModInt(0, p), ModInt(1, p))
# We put back the 1 tha was removed
return pQ.shift(1) if pQ is not None else None
def factor_unit(P):
"""
Berlekamp's algorithm
only in Z/pZ
"""
assert all(isinstance(c, ModInt) for c in P)
assert len(set(c.n for c in P)) == 1
if P.deg == 1:
return defaultdict(int, {P: 1})
p = P[0].n
S = Polynomial.gcd(P, P.prime())
if S.deg == P.deg:
# P' = 0 so P = R^p
R = Polynomial(P.C[::p])
return defaultdict(int,
{D: p * v
for D, v in Polynomial.factor_unit(R).items()})
else:
factors = defaultdict(int)
if S.deg:
for D, v in S.factor_unit().items():
factors[D] += v
P //= S
# P is now square-free
# We look for Q in Ker(F-Id) \ {1}
Q = Polynomial.computeQ(P)
if Q is None:
# P is irreducible
factors[P] += 1
else:
# P is the product of the gcd(P, Q-i)
# that are factored recursively
for i in range(p):
D = Polynomial.gcd(P, Q - Polynomial([ModInt(i, p)]))
if D.deg:
for DD, v in D.factor_unit().items():
factors[DD] += v
return factors
def factor(P):
"""
Factorization of P
only in Z/pZ
"""
cd = P[-1]
if P.deg == 0:
return (cd, defaultdict(int))
P = P * (1 / cd)
return (cd, P.factor_unit())
@staticmethod
def ppfactors(fz):
c, Ds = fz
a = str(c) if not Ds or c * c != c else ''
l = [a] + [(str(D) if D.deg == 1 and not D[0] else ('(%s)' % D))
+ (v > 1) * ('^%s' % v)
for D, v in sorted(Ds.items(),
key=lambda e: (e[0].deg, e[1]))]
return '⋅'.join(i for i in l if i)
def reduceP(P, p):
return Polynomial([ModInt(c, p) for c in P])
@staticmethod
def sign_changes(l):
return sum(a * b < 0 for a, b in zip(l, l[1:]))
def isreal(P):
return not any(isinstance(c, ModInt) for c in P)
def isinteger(P):
return all(isinstance(c, int) for c in P)
def sturm(P):
"""
Number of distinct real roots
by Sturm's theorem.
Only works on int or float coefficients
"""
inf = float('inf')
assert P.isreal()
A = P
B = A.prime()
l1 = [A(-inf)]
l2 = [A(inf)]
while B:
l1.append(B(-inf))
l2.append(B(inf))
B, A = -A % B, B
return Polynomial.sign_changes(l1) - Polynomial.sign_changes(l2)
@property
def r1(P):
"""
Number of real roots with multiplicity
"""
assert P.isreal()
ans = 0
s = P.sturm()
while s:
ans += s
P = P.gcd(P.prime())
s = P.sturm()
return ans
@property
def r2(P):
ans = P.deg - P.r1
assert ans % 2 == 0
return ans // 2
def sylvester(P, Q):
"""
Sylvester's matrix
"""
assert P.isreal()
assert Q.isreal()
p = P.deg
q = Q.deg
P = np.array(P)
Q = np.array(Q)
m = np.zeros((p + q, p + q))
for i in range(q):
m[i][i:i + p + 1] = P
for i in range(p):
m[q + i][i:i + q + 1] = Q
return m
def resultant(P, Q):
"""
Resultant of two real polynomials
"""
return np.linalg.det(P.sylvester(Q))
@property
def disc(P):
"""
Discriminant of a real polynomial
"""
ans = P.resultant(P.prime()) / P[-1]
if P.isinteger():
ans = int(ans.round())
if P.deg % 4 in [0, 1]:
return ans
else:
return -ans
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self.C)
@staticmethod
def _formatmonomial(c, d):
assert c
a = b = ''
if c * c != c or not d:
a = str(c) + (d != 0) * '⋅'
if d > 1:
b = 'X^' + str(d)
elif d == 1:
b = 'X'
return a + b
def __str__(self):
if not self.C:
return "0"
ans = '+'.join(self._formatmonomial(c, d)
for (d, c) in reversed(list(enumerate(self))) if c)
return ans.replace("+-", "-").replace('-1⋅', '-')
|
official/projects/edgetpu/vision/configs/mobilenet_edgetpu_config.py | 62theories/tf-flask | 82,518 | 12774029 | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=line-too-long
# type: ignore
"""Configuration definitions for MobilenetEdgeTPU losses, learning rates, optimizers, and training."""
import dataclasses
import os
from typing import Any, Mapping, Optional
# Import libraries
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import optimization
from official.vision.beta.configs import common
from official.vision.beta.configs import image_classification as base_config
@dataclasses.dataclass
class MobilenetEdgeTPUModelConfig(base_config.ImageClassificationModel):
"""Configuration for the MobilenetEdgeTPU model.
Attributes:
name: The name of the model. Defaults to 'MobilenetEdgeTPU'.
model_params: A dictionary that represents the parameters of the
EfficientNet model. These will be passed in to the "from_name" function.
"""
model_params: Mapping[str, Any] = dataclasses.field(
default_factory=lambda: { # pylint: disable=g-long-lambda
'model_name': 'mobilenet_edgetpu_v2_xs',
'model_weights_path': '',
'checkpoint_format': 'tf_checkpoint',
'overrides': {
'batch_norm': 'tpu',
'num_classes': 1001,
'rescale_input': False,
'dtype': 'bfloat16'
}
})
@dataclasses.dataclass
class MobilenetEdgeTPUTaskConfig(base_config.ImageClassificationTask):
"""Task defination for MobileNetEdgeTPU.
Attributes:
model: A `ModelConfig` instance.
saved_model_path: Instead of initializing a model from the model config,
the model can be loaded from a file path.
"""
model: MobilenetEdgeTPUModelConfig = MobilenetEdgeTPUModelConfig()
saved_model_path: Optional[str] = None
IMAGENET_TRAIN_EXAMPLES = 1281167
IMAGENET_VAL_EXAMPLES = 50000
IMAGENET_INPUT_PATH_BASE = 'imagenet-2012-tfrecord'
def mobilenet_edgetpu_base_experiment_config(
model_name: str) -> cfg.ExperimentConfig:
"""Image classification on imagenet with mobilenet_edgetpu.
Experiment config common across all mobilenet_edgetpu variants.
Args:
model_name: Name of the mobilenet_edgetpu model variant
Returns:
ExperimentConfig
"""
train_batch_size = 4096
eval_batch_size = 4096
steps_per_epoch = IMAGENET_TRAIN_EXAMPLES // train_batch_size
mobilenet_edgetpu_config = MobilenetEdgeTPUModelConfig(
num_classes=1001, input_size=[224, 224, 3])
mobilenet_edgetpu_config.model_params.model_name = model_name
config = cfg.ExperimentConfig(
task=MobilenetEdgeTPUTaskConfig(
model=mobilenet_edgetpu_config,
losses=base_config.Losses(label_smoothing=0.1),
train_data=base_config.DataConfig(
input_path=os.path.join(IMAGENET_INPUT_PATH_BASE, 'train*'),
is_training=True,
global_batch_size=train_batch_size,
dtype='bfloat16',
aug_type=common.Augmentation(type='autoaug')),
validation_data=base_config.DataConfig(
input_path=os.path.join(IMAGENET_INPUT_PATH_BASE, 'valid*'),
is_training=False,
dtype='bfloat16',
drop_remainder=False,
global_batch_size=eval_batch_size)),
trainer=cfg.TrainerConfig(
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch * 5,
max_to_keep=10,
train_steps=550 * steps_per_epoch,
validation_steps=IMAGENET_VAL_EXAMPLES // eval_batch_size,
validation_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'rmsprop',
'rmsprop': {
'rho': 0.9,
'momentum': 0.9,
'epsilon': 0.001,
}
},
'ema': {
'average_decay': 0.99,
'trainable_weights_only': False,
},
'learning_rate': {
'type': 'exponential',
'exponential': {
'initial_learning_rate':
0.008 * (train_batch_size // 128),
'decay_steps':
int(2.4 * steps_per_epoch),
'decay_rate':
0.97,
'staircase':
True
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 5 * steps_per_epoch,
'warmup_learning_rate': 0
}
},
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
# Registration for MobileNet-EdgeTPU-Search models.
# When this config is used, users need to specify the saved model path via
# --params_override=task.saved_model_path='your/saved_model/path/'.
@exp_factory.register_config_factory('mobilenet_edgetpu_search')
def mobilenet_edgetpu_search() -> cfg.ExperimentConfig:
return mobilenet_edgetpu_base_experiment_config('mobilenet_edgetpu_search')
# Registration for MobileNet-EdgeTPU-V2 models.
@exp_factory.register_config_factory('mobilenet_edgetpu_v2_tiny')
def mobilenet_edgetpu_v2_tiny() -> cfg.ExperimentConfig:
return mobilenet_edgetpu_base_experiment_config('mobilenet_edgetpu_v2_tiny')
# Registration for MobileNet-EdgeTPU-V2 models.
@exp_factory.register_config_factory('mobilenet_edgetpu_v2_xs')
def mobilenet_edgetpu_v2_xs() -> cfg.ExperimentConfig:
return mobilenet_edgetpu_base_experiment_config('mobilenet_edgetpu_v2_xs')
@exp_factory.register_config_factory('mobilenet_edgetpu_v2_s')
def mobilenet_edgetpu_v2_s() -> cfg.ExperimentConfig:
return mobilenet_edgetpu_base_experiment_config('mobilenet_edgetpu_v2_s')
@exp_factory.register_config_factory('mobilenet_edgetpu_v2_m')
def mobilenet_edgetpu_v2_m() -> cfg.ExperimentConfig:
return mobilenet_edgetpu_base_experiment_config('mobilenet_edgetpu_v2_m')
@exp_factory.register_config_factory('mobilenet_edgetpu_v2_l')
def mobilenet_edgetpu_v2_l() -> cfg.ExperimentConfig:
return mobilenet_edgetpu_base_experiment_config('mobilenet_edgetpu_v2_l')
# Registration for MobileNet-EdgeTPU-V1 models.
@exp_factory.register_config_factory('mobilenet_edgetpu')
def mobilenet_edgetpu() -> cfg.ExperimentConfig:
return mobilenet_edgetpu_base_experiment_config('mobilenet_edgetpu')
# Registration for MobileNet-EdgeTPU-V1 models.
# We use 'depth_multiplier' to scale the models.
# E.g. dm1p25 implies depth multiplier of 1.25x
@exp_factory.register_config_factory('mobilenet_edgetpu_dm1p25')
def mobilenet_edgetpu_dm1p25() -> cfg.ExperimentConfig:
return mobilenet_edgetpu_base_experiment_config('mobilenet_edgetpu_dm1p25')
@exp_factory.register_config_factory('mobilenet_edgetpu_dm1p5')
def mobilenet_edgetpu_dm1p5() -> cfg.ExperimentConfig:
return mobilenet_edgetpu_base_experiment_config('mobilenet_edgetpu_dm1p5')
@exp_factory.register_config_factory('mobilenet_edgetpu_dm1p75')
def mobilenet_edgetpu_dm1p75() -> cfg.ExperimentConfig:
return mobilenet_edgetpu_base_experiment_config('mobilenet_edgetpu_dm1p75')
# Registration for AutoSeg-EdgeTPU backbones
@exp_factory.register_config_factory('autoseg_edgetpu_backbone_xs')
def autoseg_edgetpu_backbone_xs() -> cfg.ExperimentConfig:
return mobilenet_edgetpu_base_experiment_config('autoseg_edgetpu_backbone_xs')
@exp_factory.register_config_factory('autoseg_edgetpu_backbone_s')
def autoseg_edgetpu_backbone_s() -> cfg.ExperimentConfig:
return mobilenet_edgetpu_base_experiment_config('autoseg_edgetpu_backbone_s')
@exp_factory.register_config_factory('autoseg_edgetpu_backbone_m')
def autoseg_edgetpu_backbone_m() -> cfg.ExperimentConfig:
return mobilenet_edgetpu_base_experiment_config('autoseg_edgetpu_backbone_m')
|
jug/tests/jugfiles/custom_hash_function.py | dombrno/jug | 309 | 12774039 | <gh_stars>100-1000
from jug import TaskGenerator
from jug.utils import CustomHash
hash_called = 0
def bad_hash(x):
global hash_called
hash_called += 1
return ('%s' % x).encode('utf-8')
@TaskGenerator
def double(x):
return 2*x
one = CustomHash(1, bad_hash)
two = double(one)
|
refinery/bnpy/bnpy-dev/bnpy/init/__init__.py | csa0001/Refinery | 103 | 12774062 | """
The :mod:`init` module gathers initialization procedures for model parameters
"""
import FromScratchGauss, FromScratchMult
import FromScratchBernRel
import FromSaved, FromTruth
__all__ = ['FromScratchGauss', 'FromSaved', 'FromTruth', 'FromScratchMult', 'FromScratchBernRel']
|
Codes/Python32/Lib/test/test_timeout.py | eyantra/FireBird_Swiss_Knife | 319 | 12774083 | """Unit tests for socket timeout feature."""
import unittest
from test import support
# This requires the 'network' resource as given on the regrtest command line.
skip_expected = not support.is_resource_enabled('network')
import time
import errno
import socket
class CreationTestCase(unittest.TestCase):
"""Test case for socket.gettimeout() and socket.settimeout()"""
def setUp(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def tearDown(self):
self.sock.close()
def testObjectCreation(self):
# Test Socket creation
self.assertEqual(self.sock.gettimeout(), None,
"timeout not disabled by default")
def testFloatReturnValue(self):
# Test return value of gettimeout()
self.sock.settimeout(7.345)
self.assertEqual(self.sock.gettimeout(), 7.345)
self.sock.settimeout(3)
self.assertEqual(self.sock.gettimeout(), 3)
self.sock.settimeout(None)
self.assertEqual(self.sock.gettimeout(), None)
def testReturnType(self):
# Test return type of gettimeout()
self.sock.settimeout(1)
self.assertEqual(type(self.sock.gettimeout()), type(1.0))
self.sock.settimeout(3.9)
self.assertEqual(type(self.sock.gettimeout()), type(1.0))
def testTypeCheck(self):
# Test type checking by settimeout()
self.sock.settimeout(0)
self.sock.settimeout(0)
self.sock.settimeout(0.0)
self.sock.settimeout(None)
self.assertRaises(TypeError, self.sock.settimeout, "")
self.assertRaises(TypeError, self.sock.settimeout, "")
self.assertRaises(TypeError, self.sock.settimeout, ())
self.assertRaises(TypeError, self.sock.settimeout, [])
self.assertRaises(TypeError, self.sock.settimeout, {})
self.assertRaises(TypeError, self.sock.settimeout, 0j)
def testRangeCheck(self):
# Test range checking by settimeout()
self.assertRaises(ValueError, self.sock.settimeout, -1)
self.assertRaises(ValueError, self.sock.settimeout, -1)
self.assertRaises(ValueError, self.sock.settimeout, -1.0)
def testTimeoutThenBlocking(self):
# Test settimeout() followed by setblocking()
self.sock.settimeout(10)
self.sock.setblocking(1)
self.assertEqual(self.sock.gettimeout(), None)
self.sock.setblocking(0)
self.assertEqual(self.sock.gettimeout(), 0.0)
self.sock.settimeout(10)
self.sock.setblocking(0)
self.assertEqual(self.sock.gettimeout(), 0.0)
self.sock.setblocking(1)
self.assertEqual(self.sock.gettimeout(), None)
def testBlockingThenTimeout(self):
# Test setblocking() followed by settimeout()
self.sock.setblocking(0)
self.sock.settimeout(1)
self.assertEqual(self.sock.gettimeout(), 1)
self.sock.setblocking(1)
self.sock.settimeout(1)
self.assertEqual(self.sock.gettimeout(), 1)
class TimeoutTestCase(unittest.TestCase):
# There are a number of tests here trying to make sure that an operation
# doesn't take too much longer than expected. But competing machine
# activity makes it inevitable that such tests will fail at times.
# When fuzz was at 1.0, I (tim) routinely saw bogus failures on Win2K
# and Win98SE. Boosting it to 2.0 helped a lot, but isn't a real
# solution.
fuzz = 2.0
localhost = '127.0.0.1'
def setUp(self):
raise NotImplementedError()
tearDown = setUp
def _sock_operation(self, count, timeout, method, *args):
"""
Test the specified socket method.
The method is run at most `count` times and must raise a socket.timeout
within `timeout` + self.fuzz seconds.
"""
self.sock.settimeout(timeout)
method = getattr(self.sock, method)
for i in range(count):
t1 = time.time()
try:
method(*args)
except socket.timeout as e:
delta = time.time() - t1
break
else:
self.fail('socket.timeout was not raised')
# These checks should account for timing unprecision
self.assertLess(delta, timeout + self.fuzz)
self.assertGreater(delta, timeout - 1.0)
class TCPTimeoutTestCase(TimeoutTestCase):
"""TCP test case for socket.socket() timeout functions"""
def setUp(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addr_remote = ('www.python.org.', 80)
def tearDown(self):
self.sock.close()
def testConnectTimeout(self):
# Choose a private address that is unlikely to exist to prevent
# failures due to the connect succeeding before the timeout.
# Use a dotted IP address to avoid including the DNS lookup time
# with the connect time. This avoids failing the assertion that
# the timeout occurred fast enough.
addr = ('10.0.0.0', 12345)
with support.transient_internet(addr[0]):
self._sock_operation(1, 0.001, 'connect', addr)
def testRecvTimeout(self):
# Test recv() timeout
with support.transient_internet(self.addr_remote[0]):
self.sock.connect(self.addr_remote)
self._sock_operation(1, 1.5, 'recv', 1024)
def testAcceptTimeout(self):
# Test accept() timeout
support.bind_port(self.sock, self.localhost)
self.sock.listen(5)
self._sock_operation(1, 1.5, 'accept')
def testSend(self):
# Test send() timeout
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as serv:
support.bind_port(serv, self.localhost)
serv.listen(5)
self.sock.connect(serv.getsockname())
# Send a lot of data in order to bypass buffering in the TCP stack.
self._sock_operation(100, 1.5, 'send', b"X" * 200000)
def testSendto(self):
# Test sendto() timeout
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as serv:
support.bind_port(serv, self.localhost)
serv.listen(5)
self.sock.connect(serv.getsockname())
# The address argument is ignored since we already connected.
self._sock_operation(100, 1.5, 'sendto', b"X" * 200000,
serv.getsockname())
def testSendall(self):
# Test sendall() timeout
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as serv:
support.bind_port(serv, self.localhost)
serv.listen(5)
self.sock.connect(serv.getsockname())
# Send a lot of data in order to bypass buffering in the TCP stack.
self._sock_operation(100, 1.5, 'sendall', b"X" * 200000)
class UDPTimeoutTestCase(TimeoutTestCase):
"""UDP test case for socket.socket() timeout functions"""
def setUp(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def tearDown(self):
self.sock.close()
def testRecvfromTimeout(self):
# Test recvfrom() timeout
# Prevent "Address already in use" socket exceptions
support.bind_port(self.sock, self.localhost)
self._sock_operation(1, 1.5, 'recvfrom', 1024)
def test_main():
support.requires('network')
support.run_unittest(
CreationTestCase,
TCPTimeoutTestCase,
UDPTimeoutTestCase,
)
if __name__ == "__main__":
test_main()
|
cx_Freeze/samples/advanced/advanced_1.py | lexa/cx_Freeze | 358 | 12774086 | <filename>cx_Freeze/samples/advanced/advanced_1.py
#!/usr/bin/env python
print("Hello from cx_Freeze Advanced #1\n")
module = __import__("testfreeze_1")
|
cea/plots/colors.py | architecture-building-systems/cea-toolbox | 121 | 12774087 | """
This is the official list of CEA colors to use in plots
"""
import os
import pandas as pd
import yaml
import warnings
import functools
from typing import List, Callable
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, Architecture and Building Systems - ETH Zurich"
__credits__ = ["<NAME>"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
COLORS_TO_RGB = {"red": "rgb(240,75,91)",
"red_light": "rgb(246,148,143)",
"red_lighter": "rgb(252,217,210)",
"blue": "rgb(63,192,194)",
"blue_light": "rgb(171,221,222)",
"blue_lighter": "rgb(225,242,242)",
"yellow": "rgb(255,209,29)",
"yellow_light": "rgb(255,225,133)",
"yellow_lighter": "rgb(255,243,211)",
"brown": "rgb(174,148,72)",
"brown_light": "rgb(201,183,135)",
"brown_lighter": "rgb(233,225,207)",
"purple": "rgb(171,95,127)",
"purple_light": "rgb(198,149,167)",
"purple_lighter": "rgb(231,214,219)",
"green": "rgb(126,199,143)",
"green_light": "rgb(178,219,183)",
"green_lighter": "rgb(227,241,228)",
"grey": "rgb(68,76,83)",
"grey_light": "rgb(126,127,132)",
"black": "rgb(35,31,32)",
"white": "rgb(255,255,255)",
"orange": "rgb(245,131,69)",
"orange_light": "rgb(248,159,109)",
"orange_lighter": "rgb(254,220,198)"}
def color_to_rgb(color):
try:
return COLORS_TO_RGB[color]
except KeyError:
import re
if re.match("rgb\(\s*\d+\s*,\s*\d+\s*,\s*\d+\s*\)", color):
# already an rgb formatted color
return color
return COLORS_TO_RGB["black"] |
ioflo/aid/aggregating.py | BradyHammond/ioflo | 128 | 12774089 | import math
import statistics
def fuzzyAnd(m):
"""
fuzzy anding
m = list of membership values to be anded
returns smallest value in the list
"""
return min(m)
FuzzyAnd = fuzzyAnd
def fuzzyOr(m):
"""
fuzzy oring
m = list of membership values to be ored
returns largest value in the list
"""
return max(m)
FuzzyOr = fuzzyOr
def fuzzyNot(x):
"""
fuzzy not
x = single membership value to be noted
returns the inverse membership value
"""
return 1 - x
def compensatoryAnd(m, g=0.5):
"""
anding function
m = list of membership values for x derived from n membership functions
g = gamma value 0=product 1=algebraic sum
returns compensatory AND value of x
"""
g = float(g)
product1 = 1
product2 = 1
for mem in m:
product1 *= mem
product2 *= (1 - mem)
return math.pow(product1, 1 - g) * math.pow((1 - product2), g)
CompensatoryAnd = compensatoryAnd
def gowa(w, wm, l=1.0):
"""
Generalized Ordered Weighted Averaging Operator
More info can be found here:
https://pdfs.semanticscholar.org/2810/c971af0d01d085c799fb2295dc5668d055c8.pdf
l = -1 = Ordered Weighted Harmonic Averaging Operator
l = -.000000000001 = Ordered Weighted Geometric Averaging Operator
l = 1 = Ordered Weighted Arithmetic Averaging Operator
l = 2 = Ordered Weighted Quadratic Averaging Operator
w = list of weights
wm = list of importance weighted membership values
l = lambda real number specifying type of owa to use
returns ordered weighted average
"""
if len(w) != len(wm):
raise ValueError("Weights and membership value lists must be of equal length.")
if l == 0:
raise ZeroDivisionError("Param l cannot be 0. Use -.000000000001 for owg.")
wm.sort(reverse=True)
s = 0
for i in range(len(w)):
s += w[i] * math.pow(wm[i], l)
return math.pow(s, 1/l)
Gowa = gowa
def owa(w, wm):
"""
Ordered Weighted Arithmetic Averaging Operator
w = [1,0,0,0] = AND
w = [0,0,0,1] = OR
w = [1/n,1/n,1/n,1/n] = Arithmetic Average where n=len(w)
w = list of weights
wm = list of importance weighted membership values
returns ordered arithmetic weighted average
"""
if len(w) != len(wm):
raise ValueError("Weights and membership value lists must be of equal length.")
wm.sort(reverse=True)
s = 0
for i in range(len(w)):
s += w[i] * wm[i]
return s
Owa = owa
def owg(w, wm):
"""
Ordered Weighted Geometric Averaging Operator
More info can be found here:
ftp://decsai.ugr.es/pub/arai/tech_rep/decision/libroOWG.pdf
w = [1,0,0,0] = AND
w = [0,0,0,1] = OR
w = [1/n,1/n,1/n,1/n] = Geometric Average where n=len(w)
w = list of weights
wm = list of importance weighted membership values
returns ordered geometric weighted average
"""
if len(w) != len(wm):
raise ValueError("Weights and membership value lists must be of equal length.")
wm.sort(reverse=True)
s = 1
for i in range(len(w)):
s *= math.pow(wm[i], w[i])
return s
Owg = owa
def owh(w, wm):
"""
Ordered Weighted Harmonic Averaging Operator
w = [1,0,0,0] = AND
w = [0,0,0,1] = OR
w = [1/n,1/n,1/n,1/n] = Harmonic Average where n=len(w)
w = list of weights
wm = list of importance weighted membership values
returns ordered harmonic weighted average
"""
return gowa(w, wm, -1)
Owh = owh
def owq(w, wm):
"""
Ordered Weighted Quadratic Averaging Operator
w = [1,0,0,0] = AND
w = [0,0,0,1] = OR
w = [1/n,1/n,1/n,1/n] = Quadratic Average where n=len(w)
w = list of weights
wm = list of importance weighted membership values
returns ordered quadratic weighted average
"""
return gowa(w, wm, 2)
Owq = owq
def median(wm):
"""
Median Operator
wm = list of importance weighted membership values
returns the middle value in the set
"""
return statistics.median(wm)
Median = median
|
segmentation/datasets.py | dataflowr/evaluating_bdl | 110 | 12774106 | # code-checked
# server-checked
import cv2
import numpy as np
import os
import os.path as osp
import random
import torch
from torch.utils import data
import pickle
def generate_scale_label(image, label):
f_scale = 0.5 + random.randint(0, 16)/10.0
image = cv2.resize(image, None, fx=f_scale, fy=f_scale, interpolation=cv2.INTER_LINEAR)
label = cv2.resize(label, None, fx=f_scale, fy=f_scale, interpolation=cv2.INTER_NEAREST)
return image, label
def id2trainId(label, id_to_trainid):
label_copy = label.copy()
for k, v in id_to_trainid.items():
label_copy[label == k] = v
return label_copy
################################################################################
# Cityscapes
################################################################################
class DatasetCityscapesAugmentation(data.Dataset):
def __init__(self, root, list_path, max_iters=None, crop_size=(512, 512), ignore_label=255):
self.root = root
self.list_path = list_path
self.crop_h, self.crop_w = crop_size
self.ignore_label = ignore_label
self.img_ids = [i_id.strip().split() for i_id in open(list_path)]
print ("DatasetCityscapesAugmentation - num unique examples: %d" % len(self.img_ids))
if not max_iters==None:
self.img_ids = self.img_ids * int(np.ceil(float(max_iters) / len(self.img_ids)))
print ("DatasetCityscapesAugmentation - num examples: %d" % len(self.img_ids))
self.files = []
for item in self.img_ids:
image_path, label_path = item
name = osp.splitext(osp.basename(label_path))[0]
img_file = osp.join(self.root, image_path)
label_file = osp.join(self.root, label_path)
self.files.append({
"img": img_file,
"label": label_file,
"name": name,
"weight": 1
})
self.id_to_trainid = {-1: ignore_label, 0: ignore_label, 1: ignore_label, 2: ignore_label,
3: ignore_label, 4: ignore_label, 5: ignore_label, 6: ignore_label,
7: 0, 8: 1, 9: ignore_label, 10: ignore_label, 11: 2, 12: 3, 13: 4,
14: ignore_label, 15: ignore_label, 16: ignore_label, 17: 5,
18: ignore_label, 19: 6, 20: 7, 21: 8, 22: 9, 23: 10, 24: 11, 25: 12, 26: 13, 27: 14,
28: 15, 29: ignore_label, 30: ignore_label, 31: 16, 32: 17, 33: 18}
def __len__(self):
return len(self.files)
def __getitem__(self, index):
datafiles = self.files[index]
image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR)
label = cv2.imread(datafiles["label"], cv2.IMREAD_GRAYSCALE)
label = id2trainId(label, self.id_to_trainid)
size = image.shape
name = datafiles["name"]
image, label = generate_scale_label(image, label)
image = np.asarray(image, np.float32)
mean = (102.9801, 115.9465, 122.7717)
image = image[:,:,::-1]
image -= mean
img_h, img_w = label.shape
pad_h = max(self.crop_h - img_h, 0)
pad_w = max(self.crop_w - img_w, 0)
if pad_h > 0 or pad_w > 0:
img_pad = cv2.copyMakeBorder(image, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(0.0, 0.0, 0.0))
label_pad = cv2.copyMakeBorder(label, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(self.ignore_label,))
else:
img_pad, label_pad = image, label
img_h, img_w = label_pad.shape
h_off = random.randint(0, img_h - self.crop_h)
w_off = random.randint(0, img_w - self.crop_w)
image = np.asarray(img_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
label = np.asarray(label_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
image = image.transpose((2, 0, 1))
flip = np.random.choice(2)*2 - 1
image = image[:, :, ::flip]
label = label[:, ::flip]
return image.copy(), label.copy(), np.array(size), name
class DatasetCityscapesEval(data.Dataset):
def __init__(self, root, list_path, ignore_label=255):
self.root = root
self.list_path = list_path
self.ignore_label = ignore_label
self.img_ids = [i_id.strip().split() for i_id in open(list_path)]
print ("DatasetCityscapesEval - num examples: %d" % len(self.img_ids))
self.files = []
for item in self.img_ids:
image_path, label_path = item
name = osp.splitext(osp.basename(label_path))[0]
img_file = osp.join(self.root, image_path)
label_file = osp.join(self.root, label_path)
self.files.append({
"img": img_file,
"label": label_file,
"name": name,
"weight": 1
})
self.id_to_trainid = {-1: ignore_label, 0: ignore_label, 1: ignore_label, 2: ignore_label,
3: ignore_label, 4: ignore_label, 5: ignore_label, 6: ignore_label,
7: 0, 8: 1, 9: ignore_label, 10: ignore_label, 11: 2, 12: 3, 13: 4,
14: ignore_label, 15: ignore_label, 16: ignore_label, 17: 5,
18: ignore_label, 19: 6, 20: 7, 21: 8, 22: 9, 23: 10, 24: 11, 25: 12, 26: 13, 27: 14,
28: 15, 29: ignore_label, 30: ignore_label, 31: 16, 32: 17, 33: 18}
def __len__(self):
return len(self.files)
def __getitem__(self, index):
datafiles = self.files[index]
image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR)
label = cv2.imread(datafiles["label"], cv2.IMREAD_GRAYSCALE)
if not os.path.exists(datafiles["img"]): # (26 out of 25000 images are missing)
return self.__getitem__(0)
label = id2trainId(label, self.id_to_trainid)
size = image.shape
name = datafiles["name"]
image = np.asarray(image, np.float32)
mean = (102.9801, 115.9465, 122.7717)
image = image[:,:,::-1]
image -= mean
image = image.transpose((2, 0, 1))
return image.copy(), label.copy(), np.array(size), name
class DatasetCityscapesEvalSeq(data.Dataset):
def __init__(self, data_path, sequence="00"):
self.data_path = data_path
self.img_dir = self.data_path + "/leftImg8bit/demoVideo/stuttgart_" + sequence + "/"
self.examples = []
file_names = os.listdir(self.img_dir)
for file_name in file_names:
img_id = file_name.split("_leftImg8bit.png")[0]
img_path = self.img_dir + file_name
example = {}
example["img_path"] = img_path
example["img_id"] = img_id
self.examples.append(example)
self.num_examples = len(self.examples)
print ("DatasetCityscapesEvalSeq - num examples: %d" % self.num_examples)
def __len__(self):
return len(self.examples)
def __getitem__(self, index):
datafiles = self.examples[index]
image = cv2.imread(datafiles["img_path"], cv2.IMREAD_COLOR)
size = image.shape
name = datafiles["img_id"]
image = np.asarray(image, np.float32)
mean = (102.9801, 115.9465, 122.7717)
image = image[:,:,::-1]
image -= mean
image = image.transpose((2, 0, 1))
return image.copy(), np.array(size), name
################################################################################
# Synscapes
################################################################################
class DatasetSynscapesAugmentation(data.Dataset):
def __init__(self, root, root_meta, type="train", max_iters=None, crop_size=(512, 512), ignore_label=255):
self.root = root
self.root_meta = root_meta
self.crop_h, self.crop_w = crop_size
self.ignore_label = ignore_label
if type == "train":
with open(root_meta + "/train_img_ids.pkl", "rb") as file: # (needed for python3)
self.img_ids = pickle.load(file)
elif type == "val":
with open(root_meta + "/val_img_ids.pkl", "rb") as file: # (needed for python3)
self.img_ids = pickle.load(file)
else:
raise Exception("type must be either 'train' or 'val'!")
print ("DatasetSynscapesAugmentation - num unique examples: %d" % len(self.img_ids))
if not max_iters==None:
self.img_ids = self.img_ids * int(np.ceil(float(max_iters) / len(self.img_ids)))
print ("DatasetSynscapesAugmentation - num examples: %d" % len(self.img_ids))
self.files = []
for img_id in self.img_ids:
self.files.append({
"img": self.root + "/img/rgb-2k/" + img_id + ".png",
"label": self.root_meta + "/gtFine/" + img_id + ".png",
"name": img_id,
"weight": 1
})
self.id_to_trainid = {-1: ignore_label, 0: ignore_label, 1: ignore_label, 2: ignore_label,
3: ignore_label, 4: ignore_label, 5: ignore_label, 6: ignore_label,
7: 0, 8: 1, 9: ignore_label, 10: ignore_label, 11: 2, 12: 3, 13: 4,
14: ignore_label, 15: ignore_label, 16: ignore_label, 17: 5,
18: ignore_label, 19: 6, 20: 7, 21: 8, 22: 9, 23: 10, 24: 11, 25: 12, 26: 13, 27: 14,
28: 15, 29: ignore_label, 30: ignore_label, 31: 16, 32: 17, 33: 18}
def __len__(self):
return len(self.files)
def __getitem__(self, index):
datafiles = self.files[index]
image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR)
label = cv2.imread(datafiles["label"], cv2.IMREAD_GRAYSCALE)
if not os.path.exists(datafiles["img"]): # (26 out of 25000 images are missing)
return self.__getitem__(0)
label = id2trainId(label, self.id_to_trainid)
size = image.shape
name = datafiles["name"]
image, label = generate_scale_label(image, label)
image = np.asarray(image, np.float32)
mean = (102.9801, 115.9465, 122.7717)
image = image[:,:,::-1]
image -= mean
img_h, img_w = label.shape
pad_h = max(self.crop_h - img_h, 0)
pad_w = max(self.crop_w - img_w, 0)
if pad_h > 0 or pad_w > 0:
img_pad = cv2.copyMakeBorder(image, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(0.0, 0.0, 0.0))
label_pad = cv2.copyMakeBorder(label, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(self.ignore_label,))
else:
img_pad, label_pad = image, label
img_h, img_w = label_pad.shape
h_off = random.randint(0, img_h - self.crop_h)
w_off = random.randint(0, img_w - self.crop_w)
image = np.asarray(img_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
label = np.asarray(label_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
image = image.transpose((2, 0, 1))
flip = np.random.choice(2)*2 - 1
image = image[:, :, ::flip]
label = label[:, ::flip]
return image.copy(), label.copy(), np.array(size), name
class DatasetSynscapesEval(data.Dataset):
def __init__(self, root, root_meta, type="val", ignore_label=255):
self.root = root
self.root_meta = root_meta
self.ignore_label = ignore_label
if type == "train":
with open(root_meta + "/train_img_ids.pkl", "rb") as file: # (needed for python3)
self.img_ids = pickle.load(file)
elif type == "val":
with open(root_meta + "/val_img_ids.pkl", "rb") as file: # (needed for python3)
self.img_ids = pickle.load(file)
else:
raise Exception("type must be either 'train' or 'val'!")
print ("DatasetSynscapesEval - num examples: %d" % len(self.img_ids))
self.files = []
for img_id in self.img_ids:
self.files.append({
"img": self.root + "/img/rgb-2k/" + img_id + ".png",
"label": self.root_meta + "/gtFine/" + img_id + ".png",
"name": img_id,
"weight": 1
})
self.id_to_trainid = {-1: ignore_label, 0: ignore_label, 1: ignore_label, 2: ignore_label,
3: ignore_label, 4: ignore_label, 5: ignore_label, 6: ignore_label,
7: 0, 8: 1, 9: ignore_label, 10: ignore_label, 11: 2, 12: 3, 13: 4,
14: ignore_label, 15: ignore_label, 16: ignore_label, 17: 5,
18: ignore_label, 19: 6, 20: 7, 21: 8, 22: 9, 23: 10, 24: 11, 25: 12, 26: 13, 27: 14,
28: 15, 29: ignore_label, 30: ignore_label, 31: 16, 32: 17, 33: 18}
def __len__(self):
return len(self.files)
def __getitem__(self, index):
datafiles = self.files[index]
image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR)
label = cv2.imread(datafiles["label"], cv2.IMREAD_GRAYSCALE)
if not os.path.exists(datafiles["img"]): # (26 out of 25000 images are missing)
return self.__getitem__(0)
label = id2trainId(label, self.id_to_trainid)
size = image.shape
name = datafiles["name"]
image = np.asarray(image, np.float32)
mean = (102.9801, 115.9465, 122.7717)
image = image[:,:,::-1]
image -= mean
image = image.transpose((2, 0, 1))
return image.copy(), label.copy(), np.array(size), name
|
ISMLnextGen/retryTest.py | Ravenclaw-OIer/ISML_auto_voter | 128 | 12774128 | <reponame>Ravenclaw-OIer/ISML_auto_voter<filename>ISMLnextGen/retryTest.py
#coding:utf-8
import logging,traceback
from functools import wraps
log = logging.getLogger(__name__)
acceptStatus=(503,'其他接受的状态码')
class RetryExhaustedError(Exception):
pass
#def __init__(self, funcname,args,kwargs):
# print('Exception from {}: {} {}'.format(funcname,args,kwargs))
import aiohttp,asyncio
loop = asyncio.get_event_loop()
def retry(*exceptions, retries=3, cooldown=1, verbose=True):
"""Decorate an async function to execute it a few times before giving up.
Hopes that problem is resolved by another side shortly.
Args:
exceptions (Tuple[Exception]) : The exceptions expected during function execution
retries (int): Number of retries of function execution.
cooldown (int): Seconds to wait before retry.
verbose (bool): Specifies if we should log about not successful attempts.
"""
def wrap(func):
@wraps(func)
async def inner(*args, **kwargs):
retries_count = 0
while True:
try:
result = await func(*args, **kwargs)
except exceptions as err: #exceoptions是从retry传入的
retries_count += 1
message = "Exception:{} during\n{} execution. " \
"{} of {} retries attempted"\
.format(err, func, retries_count, retries)
if retries_count >= retries:
#verbose and log.exception(message)
verbose and print(message)
#raise RetryExhaustedError(
# func.__qualname__, args, kwargs) from err
#raise RetryExhaustedError
return err
else:
#verbose and log.warning(message)
verbose and print(message)
await asyncio.sleep(cooldown)
else:
return result
return inner
return wrap
# Example is taken from http://aiohttp.readthedocs.io/en/stable/#getting-started
async def fetch(session, url):
async with session.get(url) as response:
#return await response.text()
text=await response.text()
if (response.status<400 or response.status in acceptStatus):
return text
else:
return response.raise_for_status()
# Client code, provided for reference
@retry(aiohttp.ClientError,asyncio.TimeoutError)
#@retry(aiohttp.WSServerHandshakeError,aiohttp.ContentTypeError)
async def main():
async with aiohttp.ClientSession() as session:
html = await fetch(session, 'http://localhost:55556')
print(html)
if __name__=='__main__':
loop.run_until_complete(main())
|
python/src/main/python/drivers/run-browser-android.py | KishkinJ10/graphicsfuzz | 519 | 12774133 | <reponame>KishkinJ10/graphicsfuzz<filename>python/src/main/python/drivers/run-browser-android.py
#!/usr/bin/env python3
# Copyright 2018 The GraphicsFuzz Project Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import shlex
import subprocess
import os
import argparse
def clear_logcat():
clear_cmd = "adb logcat -c"
subprocess.run(shlex.split(clear_cmd))
# start_logcat() returns the logcat stdout
def start_logcat():
clear_logcat()
logcat_cmd = "adb logcat -b system 'ActivityManager:I' '*:S'"
logcat_subprocess_arg = shlex.split(logcat_cmd)
# The universal_newlines flag enable to get stdout as text and not byte stream
logcat_subprocess = subprocess.Popen(logcat_subprocess_arg, stdout=subprocess.PIPE, universal_newlines=True)
return logcat_subprocess.stdout
def start_worker(server, worker, newtab=False):
# Note the escaped ampersand in the command
worker_cmd = "adb shell am start -n org.mozilla.firefox/org.mozilla.gecko.LauncherActivity"
if newtab:
worker_cmd += " -a android.intent.action.VIEW -d 'http://" + server + "/static/runner.html?context=webgl2\&worker=" + worker + "'"
# shlex.split() doesn't keep the escape around the URL ... ? resort
# to shell=True
subprocess.run(worker_cmd, shell=True)
################################################################################
# Main
parser = argparse.ArgumentParser()
parser.add_argument(
'server',
help='Server URL, e.g. localhost:8080')
parser.add_argument(
'worker',
help='Worker name to identify to the server')
args = parser.parse_args()
logcat = start_logcat()
start_worker(args.server, args.worker, newtab=True)
while True:
line = logcat.readline()
if (" Process org.mozilla.firefox " in line) and (" has died" in line):
print("Detected a crash: " + line, end='')
print('Restart worker...')
start_worker(args.server, args.worker)
|
src/lib/Encryption.py | gamesguru/vault | 147 | 12774140 | import base64
import string
from random import randint, choice
from Crypto.Cipher import AES
from Crypto.Hash import SHA256
from Crypto import Random as CryptoRandom
class Encryption():
def __init__(self, key):
self.key = key # Key in bytes
self.salted_key = None # Placeholder for optional salted key
def digest_key(self):
"""
Use SHA-256 over our key to get a proper-sized AES key
"""
# Add optional salt to key
key = self.key
if self.salted_key:
key = self.salted_key
return SHA256.new(key).digest()
def get_aes(self, IV):
"""
AES instance
"""
return AES.new(self.digest_key(), AES.MODE_CBC, IV)
def gen_salt(self, set_=True):
"""
Generate a random salt
"""
min_char = 8
max_char = 12
allchar = string.ascii_letters + string.punctuation + string.digits
salt = "".join(choice(allchar)
for x in range(randint(min_char, max_char))).encode()
# Set the salt in the same instance if required
if set_:
self.set_salt(salt)
return salt
def set_salt(self, salt=None):
"""
Add a salt to the secret key for this specific encryption or decryption
"""
if salt:
self.salted_key = salt + self.key
else:
self.salted_key = None
def encrypt(self, secret):
"""
Encrypt a secret
"""
# generate IV
IV = CryptoRandom.new().read(AES.block_size)
# Retrieve AES instance
aes = self.get_aes(IV)
# calculate needed padding
padding = AES.block_size - len(secret) % AES.block_size
# Python 2.x: secret += chr(padding) * padding
secret += bytes([padding]) * padding
# store the IV at the beginning and encrypt
data = IV + aes.encrypt(secret)
# Reset salted key
self.set_salt()
# Return base 64 encoded bytes
return base64.b64encode(data)
def decrypt(self, enc_secret):
"""
Decrypt a secret
"""
# Decode base 64
enc_secret = base64.b64decode(enc_secret)
# extract the IV from the beginning
IV = enc_secret[:AES.block_size]
# Retrieve AES instance
aes = self.get_aes(IV)
# Decrypt
data = aes.decrypt(enc_secret[AES.block_size:])
# pick the padding value from the end; Python 2.x: ord(data[-1])
padding = data[-1]
# Python 2.x: chr(padding) * padding
if data[-padding:] != bytes([padding]) * padding:
raise ValueError("Invalid padding...")
# Reset salted key
self.set_salt()
# Remove the padding and return the bytes
return data[:-padding]
|
deepconcolic/training.py | nberth/DeepConcolic | 102 | 12774155 | from __future__ import absolute_import, division, print_function, unicode_literals
# NB: see head of `datasets.py'
from training_utils import *
from utils_io import os, tempdir
from datasets import image_kinds
print ("Using TensorFlow version:", tf.__version__)
def train_n_save_classifier (model, class_names, input_kind,
train_data, test_data = None,
optimizer = 'adam',
kind = 'sparse_categorical',
outdir = tempdir,
early_stopping = True,
validate_on_test_data = False,
cm_plot_args = {},
**kwds):
x_train, y_train = train_data
path = os.path.join (outdir, model.name)
log_dir = path + '_logs'
fw_train, fw_confision_matrix = \
tf.summary.create_file_writer (os.path.join (log_dir, 'train')), \
tf.summary.create_file_writer (os.path.join (log_dir, 'confusion_matrix'))
# Very basic & dumb test for detecting images...
if input_kind in image_kinds:
log_25_img_dataset_grid (fw_train, class_names, 'Training data (some)', train_data)
model.summary ()
loss, metric = (tf.losses.SparseCategoricalCrossentropy (from_logits=True),
tf.metrics.SparseCategoricalAccuracy ()) # if kind = 'sparse_categorical' else ?
model.compile (optimizer = optimizer,
loss = loss,
metrics = [metric])
callbacks = [
tf.keras.callbacks.ModelCheckpoint (
# Path where to save the model
# The two parameters below mean that we will overwrite
# the current checkpoint if and only if
# the `val_loss` score has improved.
# The saved model name will include the current epoch.
filepath = path + "_{epoch}",
save_best_only = True, # Only save a model if `val_loss` has improved.
monitor = "val_loss",
verbose = 1,
),
tf.keras.callbacks.TensorBoard (
log_dir = log_dir,
histogram_freq = 1, # How often to log histogram visualizations
embeddings_freq = 1, # How often to log embedding visualizations
update_freq = "epoch", # How often to write logs (default: once per epoch)
),
] + ([
# https://www.tensorflow.org/guide/keras/train_and_evaluate#checkpointing_models
tf.keras.callbacks.EarlyStopping (
# Stop training when `val_loss` is no longer improving
monitor = "val_loss",
# "no longer improving" being defined as "no better than 1e-2 less"
min_delta = 1e-2,
# "no longer improving" being further defined as "for at least 2 epochs"
patience = 3,
verbose = 1,
),
] if early_stopping else []) + ([
log_confusion_matrix_callback (
fw_confision_matrix,
model, class_names, test_data,
**cm_plot_args),
] if test_data is not None else [])
valargs = dict (validation_data = test_data) \
if validate_on_test_data and test_data is not None \
else {}
model.fit (x_train, y_train,
callbacks = callbacks,
**{'epochs': 20, # some defaults:
'shuffle': True,
'batch_size': 64,
'validation_split': 0.2,
**valargs,
**kwds})
if test_data is not None:
x_test, y_test = test_data
print ('Performing final validation on given test data:')
# Just check and show accuracy on "official" test data:
_, test_accuracy = model.evaluate (x_test, y_test, verbose = 1)
print ('Validation accuracy on given test data:', test_accuracy)
print ('Saving model in', path + '.h5')
model.save (path + '.h5')
# ---
def classifier (load_data, make_model, model_name = None,
load_data_args = {}, make_model_args = {}, **kwds):
train_data, test_data, input_shape, input_kind, class_names = load_data (**load_data_args)
train_n_save_classifier (make_model (input_shape, name = model_name, **make_model_args),
class_names, input_kind, train_data, test_data, **kwds)
# ---
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Reshape, Dense
def make_dense (input_shape, n_neurons = (100,), n_classes = 5,
input_reshape = False, **kwds):
"""Builds a very basic DNN.
n_neurons: gives the number of neurons for each layer, as a list or
tuple
n_classes: number of output neurons (= |classes|)
input_reshape: whether to include a dummy reshape input layer
(useful to access input features as activations, for DeepConcolic's
internal statistical analysis and layerwise abstractions).
"""
assert len (n_neurons) > 0
layer_args = [dict (activation = 'relu') for _ in n_neurons]
layer_args[0]['input_shape'] = input_shape
layer_args[-1]['activation'] = 'softmax'
layers = (Reshape (input_shape = input_shape, target_shape = input_shape),) if input_reshape else ()
layers += tuple (Dense (n, **args) for n, args in zip (n_neurons, layer_args))
return Sequential (layers, **kwds)
# ---
def make_dense_classifier (load_data, prefix, n_features, n_classes, n_neurons, **kwds):
"""A wrapper for training DNNs built using {make_dense}."""
model_name = (f'{prefix}{n_features}_{n_classes}_dense'
f'_{"_".join (str (c) for c in n_neurons)}')
model_args = dict (n_classes = n_classes, n_neurons = n_neurons)
classifier (load_data, make_dense, epochs = 50,
model_name = model_name, make_model_args = model_args,
**kwds)
# ---
|
src/main/python/aut/udfs.py | ruebot/aut | 113 | 12774161 | <reponame>ruebot/aut<filename>src/main/python/aut/udfs.py
from pyspark import SparkContext
from pyspark.sql.column import Column, _to_java_column, _to_seq
from pyspark.sql.functions import col
def compute_image_size(col):
sc = SparkContext.getOrCreate()
udf = (
sc.getOrCreate()._jvm.io.archivesunleashed.udfs.package.computeImageSize().apply
)
return Column(udf(_to_seq(sc, [col], _to_java_column)))
def compute_md5(col):
sc = SparkContext.getOrCreate()
udf = sc.getOrCreate()._jvm.io.archivesunleashed.udfs.package.computeMD5().apply
return Column(udf(_to_seq(sc, [col], _to_java_column)))
def compute_sha1(col):
sc = SparkContext.getOrCreate()
udf = sc.getOrCreate()._jvm.io.archivesunleashed.udfs.package.computeSHA1().apply
return Column(udf(_to_seq(sc, [col], _to_java_column)))
def detect_language(col):
sc = SparkContext.getOrCreate()
udf = sc.getOrCreate()._jvm.io.archivesunleashed.udfs.package.detectLanguage().apply
return Column(udf(_to_seq(sc, [col], _to_java_column)))
def detect_mime_type_tika(col):
sc = SparkContext.getOrCreate()
udf = (
sc.getOrCreate()
._jvm.io.archivesunleashed.udfs.package.detectMimeTypeTika()
.apply
)
return Column(udf(_to_seq(sc, [col], _to_java_column)))
def extract_boilerplate(col):
sc = SparkContext.getOrCreate()
udf = (
sc.getOrCreate()
._jvm.io.archivesunleashed.udfs.package.extractBoilerpipeText()
.apply
)
return Column(udf(_to_seq(sc, [col], _to_java_column)))
def extract_date(col, dates):
sc = SparkContext.getOrCreate()
udf = sc.getOrCreate()._jvm.io.archivesunleashed.udfs.package.extractDate().apply
return Column(udf(_to_seq(sc, [col], _to_java_column)))
def extract_domain(col):
sc = SparkContext.getOrCreate()
udf = sc.getOrCreate()._jvm.io.archivesunleashed.udfs.package.extractDomain().apply
return Column(udf(_to_seq(sc, [col], _to_java_column)))
def extract_image_links(col, image_links):
sc = SparkContext.getOrCreate()
udf = (
sc.getOrCreate()
._jvm.io.archivesunleashed.udfs.package.extractImageLinks()
.apply
)
return Column(udf(_to_seq(sc, [col, image_links], _to_java_column)))
def extract_links(col, links):
sc = SparkContext.getOrCreate()
udf = sc.getOrCreate()._jvm.io.archivesunleashed.udfs.package.extractLinks().apply
return Column(udf(_to_seq(sc, [col, links], _to_java_column)))
def get_extension_mime(col, mime):
sc = SparkContext.getOrCreate()
udf = (
sc.getOrCreate()._jvm.io.archivesunleashed.udfs.package.getExtensionMime().apply
)
return Column(udf(_to_seq(sc, [col, mime], _to_java_column)))
def remove_http_header(col):
sc = SparkContext.getOrCreate()
udf = (
sc.getOrCreate()._jvm.io.archivesunleashed.udfs.package.removeHTTPHeader().apply
)
return Column(udf(_to_seq(sc, [col], _to_java_column)))
def remove_html(col):
sc = SparkContext.getOrCreate()
udf = sc.getOrCreate()._jvm.io.archivesunleashed.udfs.package.removeHTML().apply
return Column(udf(_to_seq(sc, [col], _to_java_column)))
def remove_prefix_www(col):
sc = SparkContext.getOrCreate()
udf = (
sc.getOrCreate()._jvm.io.archivesunleashed.udfs.package.removePrefixWWW().apply
)
return Column(udf(_to_seq(sc, [col], _to_java_column)))
|
python_raster_functions/ObjectDetector.py | ArcGIS/raster-deep-learning | 154 | 12774167 | <reponame>ArcGIS/raster-deep-learning<gh_stars>100-1000
'''
Copyright 2018 Esri
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import importlib
import json
import os
import sys
sys.path.append(os.path.dirname(__file__))
from fields import fields
from features import features
import numpy as np
import prf_utils
class GeometryType:
Point = 1
Multipoint = 2
Polyline = 3
Polygon = 4
class ObjectDetector:
def __init__(self):
self.name = 'Object Detector'
self.description = 'This python raster function applies deep learning model to detect objects in imagery'
def initialize(self, **kwargs):
if 'model' not in kwargs:
return
model = kwargs['model']
model_as_file = True
try:
with open(model, 'r') as f:
self.json_info = json.load(f)
except FileNotFoundError:
try:
self.json_info = json.loads(model)
model_as_file = False
except json.decoder.JSONDecodeError:
raise Exception("Invalid model argument")
if 'device' in kwargs:
device = kwargs['device']
if device < -1:
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
device = prf_utils.get_available_device()
os.environ['CUDA_VISIBLE_DEVICES'] = str(device)
else:
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
sys.path.append(os.path.dirname(__file__))
framework = self.json_info['Framework']
if 'ModelConfiguration' in self.json_info:
if isinstance(self.json_info['ModelConfiguration'], str):
ChildModelDetector = getattr(importlib.import_module(
'{}.{}'.format(framework, self.json_info['ModelConfiguration'])), 'ChildObjectDetector')
else:
ChildModelDetector = getattr(importlib.import_module(
'{}.{}'.format(framework, self.json_info['ModelConfiguration']['Name'])), 'ChildObjectDetector')
else:
raise Exception("Invalid model configuration")
self.child_object_detector = ChildModelDetector()
self.child_object_detector.initialize(model, model_as_file)
def getParameterInfo(self):
required_parameters = [
{
'name': 'raster',
'dataType': 'raster',
'required': True,
'displayName': 'Raster',
'description': 'Input Raster'
},
{
'name': 'model',
'dataType': 'string',
'required': True,
'displayName': 'Input Model Definition (EMD) File',
'description': 'Input model definition (EMD) JSON file'
},
{
'name': 'device',
'dataType': 'numeric',
'required': False,
'displayName': 'Device ID',
'description': 'Device ID'
},
{
'name': 'padding',
'dataType': 'numeric',
'value': 0,
'required': False,
'displayName': 'Padding',
'description': 'Padding'
},
{
'name': 'score_threshold',
'dataType': 'numeric',
'value': 0.6,
'required': False,
'displayName': 'Confidence Score Threshold [0.0, 1.0]',
'description': 'Confidence score threshold value [0.0, 1.0]'
},
]
if 'BatchSize' not in self.json_info:
required_parameters.append(
{
'name': 'batch_size',
'dataType': 'numeric',
'required': False,
'value': 1,
'displayName': 'Batch Size',
'description': 'Batch Size'
},
)
return self.child_object_detector.getParameterInfo(required_parameters)
def getConfiguration(self, **scalars):
configuration = self.child_object_detector.getConfiguration(**scalars)
if 'DataRange' in self.json_info:
configuration['dataRange'] = tuple(self.json_info['DataRange'])
configuration['inheritProperties'] = 2|4|8
configuration['inputMask'] = True
return configuration
def getFields(self):
return json.dumps(fields)
def getGeometryType(self):
return GeometryType.Polygon
def vectorize(self, **pixelBlocks):
# set pixel values in invalid areas to 0
raster_mask = pixelBlocks['raster_mask']
raster_pixels = pixelBlocks['raster_pixels']
raster_pixels[np.where(raster_mask == 0)] = 0
pixelBlocks['raster_pixels'] = raster_pixels
polygon_list, scores, classes = self.child_object_detector.vectorize(**pixelBlocks)
# bounding_boxes = bounding_boxes.tolist()
scores = scores.tolist()
classes = classes.tolist()
features['features'] = []
for i in range(len(polygon_list)):
rings = [[]]
for j in range(polygon_list[i].shape[0]):
rings[0].append(
[
polygon_list[i][j][1],
polygon_list[i][j][0]
]
)
features['features'].append({
'attributes': {
'OID': i + 1,
'Class': self.json_info['Classes'][classes[i] - 1]['Name'],
'Confidence': scores[i]
},
'geometry': {
'rings': rings
}
})
return {'output_vectors': json.dumps(features)}
|
test/praatio_test_case.py | timmahrt/praatIO | 208 | 12774177 | <filename>test/praatio_test_case.py
import unittest
import os
class PraatioTestCase(unittest.TestCase):
def __init__(self, *args, **kargs):
super(PraatioTestCase, self).__init__(*args, **kargs)
root = os.path.dirname(os.path.realpath(__file__))
self.dataRoot = os.path.join(root, "files")
self.outputRoot = os.path.join(self.dataRoot, "test_output")
def setUp(self):
if not os.path.exists(self.outputRoot):
os.mkdir(self.outputRoot)
def assertAllAlmostEqual(self, listA, listB):
for valA, valB in zip(listA, listB):
self.assertAlmostEqual(valA, valB)
|
waliki/git/urls.py | luzik/waliki | 324 | 12774189 | <filename>waliki/git/urls.py
import django
try:
from django.conf.urls import patterns, url # django 1.8, 1.9
except ImportError:
from django.conf.urls import url
from waliki.settings import WALIKI_SLUG_PATTERN
from waliki.git.views import whatchanged, WhatchangedFeed, webhook_pull, history, version, diff
_pattern_list = [
url(r'^_whatchanged/(?P<pag>\d+)$', whatchanged, name='waliki_whatchanged'), # noqa
url(r'^_whatchanged$', whatchanged, {'pag': '1'}, name='waliki_whatchanged'), # noqa
url(r'^_whatchanged/rss$', WhatchangedFeed(), name='waliki_whatchanged_rss'),
url(r'^_hooks/pull/(?P<remote>[a-zA-Z0-9]+)$',
webhook_pull, name='waliki_webhook_pull'),
url(r'^(?P<slug>' + WALIKI_SLUG_PATTERN + ')/history/(?P<pag>\d+)$',
history, name='waliki_history'),
url(r'^(?P<slug>' + WALIKI_SLUG_PATTERN + ')/history/$',
history, {'pag': '1'}, name='waliki_history'),
url(r'^(?P<slug>' + WALIKI_SLUG_PATTERN + ')/version/(?P<version>[0-9a-f\^]{4,40})/raw$', version, {'raw': True},
name='waliki_version_raw'),
url(r'^(?P<slug>' + WALIKI_SLUG_PATTERN + \
')/version/(?P<version>[0-9a-f\^]{4,40})$', version, name='waliki_version'),
url(r'^(?P<slug>' + WALIKI_SLUG_PATTERN + ')/diff/(?P<old>[0-9a-f\^]{4,40})\.\.(?P<new>[0-9a-f\^]{4,40})/raw$',
diff, {'raw': True}, name='waliki_diff_raw'),
url(r'^(?P<slug>' + WALIKI_SLUG_PATTERN + \
')/diff/(?P<old>[0-9a-f\^]{4,40})\.\.(?P<new>[0-9a-f\^]{4,40})$', diff, name='waliki_diff'),
]
if django.VERSION[:2] >= (1, 10):
urlpatterns = _pattern_list
else:
urlpatterns = patterns('waliki.git.views',
*_pattern_list
)
|
geocoder/here_reverse.py | termim/geocoder | 1,506 | 12774226 | #!/usr/bin/python
# coding: utf8
from __future__ import absolute_import
from geocoder.location import Location
from geocoder.here import HereResult, HereQuery
class HereReverseResult(HereResult):
@property
def ok(self):
return bool(self.address)
class HereReverse(HereQuery):
"""
HERE Geocoding REST API
=======================
Send a request to the geocode endpoint to find an address
using a combination of country, state, county, city,
postal code, district, street and house number.
API Reference
-------------
https://developer.here.com/rest-apis/documentation/geocoder
"""
provider = 'here'
method = 'reverse'
_RESULT_CLASS = HereReverseResult
_URL = 'http://reverse.geocoder.cit.api.here.com/6.2/reversegeocode.json'
def _build_params(self, location, provider_key, **kwargs):
params = super(HereReverse, self)._build_params(location, provider_key, **kwargs)
del params['searchtext']
location = str(Location(location))
params.update({
'prox': location,
'mode': 'retrieveAddresses',
'gen': 8,
})
return params
if __name__ == '__main__':
g = HereReverse([45.4049053, -75.7077965])
g.debug()
|
Chapter04/4_1_download_data.py | shamir456/Python-Network-Programming-Cookbook-Second-Edition | 125 | 12774228 | #!/usr/bin/env python
# Python Network Programming Cookbook -- Chapter - 4
# This program requires Python 3.5.2 or any later version
# It may run on any other version with/without modifications.
#
# Follow the comments inline to make it run on Python 2.7.x.
import argparse
import urllib.request
# Comment out the above line and uncomment the below for Python 2.7.x.
#import urllib2
REMOTE_SERVER_HOST = 'http://www.cnn.com'
class HTTPClient:
def __init__(self, host):
self.host = host
def fetch(self):
response = urllib.request.urlopen(self.host)
# Comment out the above line and uncomment the below for Python 2.7.x.
#response = urllib2.urlopen(self.host)
data = response.read()
text = data.decode('utf-8')
return text
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='HTTP Client Example')
parser.add_argument('--host', action="store", dest="host", default=REMOTE_SERVER_HOST)
given_args = parser.parse_args()
host = given_args.host
client = HTTPClient(host)
print (client.fetch())
|
client/verta/verta/_cli/deployment/predict.py | stefan-petrov-toptal/modeldb | 835 | 12774233 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
import click
import json
from .deployment import deployment
from ... import Client
@deployment.group()
def predict():
"""Making prediction to a deployment-related entity.
For example, to make a prediction to an endpoint, run
`verta deployment predict endpoint "<endpoint path>" --data "<input data>"`
"""
pass
@predict.command(name="endpoint")
@click.argument("path", nargs=1, required=True)
@click.option("--data", "-d", required=True, help="Input for prediction. Must be a valid JSON string.")
@click.option("--workspace", "-w", help="Workspace to use.")
def predict_endpoint(path, data, workspace):
"""Making prediction via a deployed endpoint.
"""
client = Client()
try:
endpoint = client.get_endpoint(path=path, workspace=workspace)
except ValueError:
raise click.BadParameter("endpoint with path {} not found".format(path))
deployed_model = endpoint.get_deployed_model()
result = deployed_model.predict(json.loads(data))
click.echo(json.dumps(result))
|
changes/verification.py | michaeljoseph/changes | 135 | 12774242 | import logging
from plumbum import CommandNotFound, local
from changes import shell
log = logging.getLogger(__name__)
def get_test_runner():
test_runners = ['tox', 'nosetests', 'py.test']
test_runner = None
for runner in test_runners:
try:
test_runner = local[runner]
except CommandNotFound:
continue
return test_runner
def run_tests():
"""Executes your tests."""
test_runner = get_test_runner()
if test_runner:
result = test_runner()
log.info('Test execution returned:\n%s' % result)
return result
else:
log.info('No test runner found')
return None
def run_test_command(context):
if context.test_command:
result = shell.dry_run(context.test_command, context.dry_run)
log.info('Test command "%s", returned %s', context.test_command, result)
return True
|
enn/losses/prior_losses.py | MaxGhenis/enn | 130 | 12774263 | # python3
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Prior losses are losses that regulate towards the prior.
These might take the form of weight regularization, or sampling "fake data".
These prior_losses are used in e.g. supervised/prior_experiment.py.
"""
from absl import logging
import dataclasses
from enn import base
from enn import utils
import haiku as hk
import jax
import jax.numpy as jnp
import typing_extensions
class FakeInputGenerator(typing_extensions.Protocol):
def __call__(self, batch: base.Batch, key: base.RngKey) -> base.Array:
"""Generates a fake batch of input=x for use in prior regularization."""
@dataclasses.dataclass
class MatchingGaussianData(FakeInputGenerator):
"""Generates a fake batch of input=x for use in prior regularization."""
scale: float = 1.
def __call__(self, batch: base.Batch, key: base.RngKey) -> base.Array:
"""Generates a fake batch of input=x for use in prior regularization."""
return jax.random.normal(key, batch.x.shape) * self.scale
def make_gaussian_dataset(batch_size: int,
input_dim: int,
seed: int = 0) -> base.BatchIterator:
"""Returns a batch iterator over random Gaussian data."""
sample_fn = jax.jit(lambda x: jax.random.normal(x, [batch_size, input_dim]))
def batch_iterator():
rng = hk.PRNGSequence(seed)
while True:
x = sample_fn(next(rng))
yield base.Batch(x, y=jnp.ones([x.shape[0], 1]))
return batch_iterator()
def variance_kl(var: base.Array, pred_log_var: base.Array) -> base.Array:
"""Compute the KL divergence between Gaussian variance with matched means."""
log_var = jnp.log(var)
pred_var = jnp.exp(pred_log_var)
return 0.5 * (pred_log_var - log_var + var / pred_var - 1)
def generate_batched_forward_at_data(
num_index_sample: int,
x: base.Array,
enn: base.EpistemicNetwork,
params: hk.Params,
key: base.RngKey) -> base.Output:
"""Generate enn output for batch of data with indices based on random key."""
batched_indexer = utils.make_batch_indexer(enn.indexer, num_index_sample)
batched_forward = jax.vmap(enn.apply, in_axes=[None, None, 0])
batched_out = batched_forward(params, x, batched_indexer(key))
return batched_out
def l2_training_penalty(batched_out: base.Output):
"""Penalize the L2 magnitude of the training network."""
if isinstance(batched_out, base.OutputWithPrior):
return 0.5 * jnp.mean(jnp.square(batched_out.train))
else:
logging.warning('L2 weight penalty only works for OutputWithPrior.')
return 0.
def distill_mean_regression(batched_out: base.Output,
distill_out: base.Output) -> base.Array:
"""Train the mean of the regression to the distill network."""
observed_mean = jnp.mean(utils.parse_net_output(batched_out), axis=0)
distill_mean = jnp.squeeze(utils.parse_net_output(distill_out))
return jnp.mean(jnp.square(distill_mean - observed_mean))
def distill_mean_classification(batched_out: base.Output,
distill_out: base.Output) -> base.Array:
"""Train the mean of the classification to the distill network."""
batched_logits = utils.parse_net_output(batched_out)
batched_probs = jax.nn.softmax(batched_logits, axis=-1)
mean_probs = jnp.mean(batched_probs, axis=0)
distill_probs = jax.nn.softmax(utils.parse_net_output(distill_out), axis=-1)
return jnp.mean(jnp.sum(
mean_probs * jnp.log(mean_probs / distill_probs), axis=1))
def distill_var_regression(batched_out: base.Output,
distill_out: base.Output) -> base.Array:
"""Train the variance of the regression to the distill network."""
assert isinstance(distill_out, base.OutputWithPrior)
observed_var = jnp.var(utils.parse_net_output(batched_out), axis=0)
return jnp.mean(variance_kl(observed_var, distill_out.extra['log_var']))
def distill_var_classification(batched_out: base.Output,
distill_out: base.Output) -> base.Array:
"""Train the variance of the classification to the distill network."""
assert isinstance(distill_out, base.OutputWithPrior)
batched_logits = utils.parse_net_output(batched_out)
observed_var = jnp.var(jax.nn.softmax(batched_logits, axis=-1))
return jnp.mean(variance_kl(observed_var, distill_out.extra['log_var']))
@dataclasses.dataclass
class RegressionPriorLoss(base.LossFn):
"""Regress fake data back to prior, and distill mean/var to mean_index."""
num_index_sample: int
input_generator: FakeInputGenerator = MatchingGaussianData()
scale: float = 1.
distill_index: bool = False
def __call__(self, enn: base.EpistemicNetwork, params: hk.Params,
batch: base.Batch, key: base.RngKey) -> base.Array:
index_key, data_key = jax.random.split(key)
fake_x = self.input_generator(batch, data_key)
# TODO(author2): Complete prior loss refactor --> MultilossExperiment
batched_out = generate_batched_forward_at_data(
self.num_index_sample, fake_x, enn, params, index_key)
# Regularize towards prior output
loss = self.scale * l2_training_penalty(batched_out)
# Distill aggregate stats to the "mean_index"
if hasattr(enn.indexer, 'mean_index') and self.distill_index:
distill_out = enn.apply(params, fake_x, enn.indexer.mean_index)
loss += distill_mean_regression(batched_out, distill_out)
loss += distill_var_regression(batched_out, distill_out)
return loss, {}
@dataclasses.dataclass
class ClassificationPriorLoss(base.LossFn):
"""Penalize fake data back to prior, and distill mean/var to mean_index."""
num_index_sample: int
input_generator: FakeInputGenerator = MatchingGaussianData()
scale: float = 1.
distill_index: bool = False
def __call__(self, enn: base.EpistemicNetwork, params: hk.Params,
batch: base.Batch, key: base.RngKey) -> base.Array:
index_key, data_key = jax.random.split(key)
fake_x = self.input_generator(batch, data_key)
# TODO(author2): Complete prior loss refactor --> MultilossExperiment
batched_out = generate_batched_forward_at_data(
self.num_index_sample, fake_x, enn, params, index_key)
# Regularize towards prior output
loss = self.scale * l2_training_penalty(batched_out)
# Distill aggregate stats to the "mean_index"
if hasattr(enn.indexer, 'mean_index') and self.distill_index:
distill_out = enn.apply(params, fake_x, enn.indexer.mean_index)
loss += distill_mean_classification(batched_out, distill_out)
loss += distill_var_classification(batched_out, distill_out)
return loss, {}
|
nameko/dependency_providers.py | vlcinsky/nameko | 3,425 | 12774273 | """ Nameko built-in dependencies.
"""
from nameko.extensions import DependencyProvider
class Config(DependencyProvider):
""" Dependency provider for accessing configuration values.
"""
def get_dependency(self, worker_ctx):
return self.container.config.copy()
|
django/contrib/formtools/wizard/storage/exceptions.py | pomarec/django | 285 | 12774282 | <reponame>pomarec/django
from django.core.exceptions import ImproperlyConfigured
class MissingStorage(ImproperlyConfigured):
pass
class NoFileStorageConfigured(ImproperlyConfigured):
pass
|
setup.py | truthiswill/wait4disney | 106 | 12774306 | from setuptools import setup, find_packages
setup(
name = "disney",
version = "1.0",
description = "A history of Shanghai Disney waiting time",
long_description = "A history of Shanghai Disney waiting time",
license = "Apache License",
url = "http://s.gaott.info",
author = "gtt116",
author_email = "<EMAIL>",
packages = find_packages(),
include_package_data = True,
platforms = "any",
install_requires = [],
scripts = [],
entry_points = {
'console_scripts': [
'disney-fetch = disney.fetch:main',
'disney-publish = disney.publish:main',
]
}
)
|
quarkchain/experimental/random_sampling_simulator.py | QuarkChain/pyquarkchain | 237 | 12774326 | <filename>quarkchain/experimental/random_sampling_simulator.py
import random
committee_size = 150
shard_size = 1024
pool_size = 150 * 1024
# Percentage of attackers in pool
attacker_p = 0.15
attacker_n = int(attacker_p * pool_size)
# Attack threshold (a committee with t percent of attackers)
attacker_tn = int(committee_size / 3)
# Monte-carlo trials
trials = 100000
# Pool members 1 - attacker; 2 - honest validator
pool = [1 for i in range(attacker_n)]
pool.extend([0 for i in range(pool_size - attacker_n)])
attacked_trials = 0
for trial in range(trials):
if trial != 0 and trial % 10 == 0:
print("Trial %d, attack prob: %f" % (trial, attacked_trials / trial))
random.shuffle(pool)
for j in range(shard_size):
if sum(pool[j * committee_size : (j + 1) * committee_size]) >= attacker_tn:
attacked_trials += 1
break
print("Attack prob: %f" % (attacked_trials / trials))
|
tests/test_unpipe.py | python-pipe/hellp | 123 | 12774349 | from sspipe import p, px, unpipe
def test_unpipe_active():
a_pipe = px + 1 | px * 5
func = unpipe(a_pipe)
assert func(0) == 5
def test_unpipe_passive():
func = lambda x: (x + 1) * 5
func = unpipe(func)
assert func(0) == 5
|
test/test_bridge.py | jasonpjacobs/systemrdl-compiler | 141 | 12774358 | from unittest_utils import RDLSourceTestCase
class TestBridge(RDLSourceTestCase):
def test_bridge(self):
top = self.compile(
["rdl_src/bridge.rdl"],
"some_bridge"
)
self.assertEqual(
top.find_by_path("some_bridge.ahb.ahb_credits").absolute_address,
0x0
)
self.assertEqual(
top.find_by_path("some_bridge.ahb.ahb_stat").absolute_address,
0x20
)
self.assertEqual(
top.find_by_path("some_bridge.axi.axi_credits").absolute_address,
0x0
)
self.assertEqual(
top.find_by_path("some_bridge.axi.axi_stat").absolute_address,
0x40
)
def test_bridge_errors(self):
self.assertRDLCompileError(
["rdl_err_src/err_bridge.rdl"],
"illegal_wrapper",
r"The 'bridge' property can only be applied to the root address map"
)
self.assertRDLCompileError(
["rdl_err_src/err_bridge.rdl"],
"not_enough_addrmaps",
r"Addrmap 'not_enough_addrmaps' is a bridge and shall contain 2 or more sub-addrmaps"
)
self.assertRDLCompileError(
["rdl_err_src/err_bridge.rdl"],
"illegal_children",
r"Addrmap 'illegal_children' is a bridge which can only contain other addrmaps. Contains a child instance 'y' which is a reg"
)
|
PYTHON/Fibanacci_numbers_by_replacing_prime_numbers_and_multiples_of_5_by_0.py | ayushyado/HACKTOBERFEST2021-2 | 125 | 12774359 | <gh_stars>100-1000
def is_prime(n):
if n > 1:
for i in range(2, n // 2 + 1):
if (n % i) == 0:
return False
else:
return True
else:
return False
def fibonacci(n):
n1, n2 = 1, 1
count = 0
if n == 1:
print(n1)
else:
while count < n:
if not is_prime(n1) and n1 % 5 != 0:
print(n1, end=' ')
else:
print(0, end=' ')
n3 = n1 + n2
n1 = n2
n2 = n3
count += 1
n = int(input("Enter the number:"))
fibonacci(n)
|
cross3d/motionbuilder/motionbuilderscene.py | vedantirb/cross3d | 129 | 12774362 | ##
# \namespace cross3d.softimage.motionbuilderscene
#
# \remarks The MotionBuilderScene class will define all the operations for Motion Builder scene interaction.
#
# \author douglas
# \author Blur Studio
# \date 06/21/12
#
import pyfbsdk as mob
from cross3d.abstract.abstractscene import AbstractScene
from PyQt4.QtGui import QFileDialog
#------------------------------------------------------------------------------------------------------------------------
class MotionBuilderScene( AbstractScene ):
def __init__( self ):
self._fbapp = mob.FBApplication()
AbstractScene.__init__( self )
def saveFileAs(self, filename=''):
"""
Implements AbstractScene.saveFileAs to save the current scene to the inputed name specified. If no name is supplied,
then the user should be prompted to pick a filename
:param filename: Name of the file to save
:return : Success, Bool
"""
if not filename:
filename = unicode(QFileDialog.getSaveFileName(None, 'Save Motion Builder File', '', 'FBX (*.fbx);;All files (*.*)'))
print 'filename', filename, self._fbapp
if filename:
return self._fbapp.FileSave(unicode(filename).encode('utf8'))
return False
def retarget(self, inputRigPath, inputAnimationPath, outputRigPath, outputAnimationPath):
return False
# register the symbol
import cross3d
cross3d.registerSymbol('Scene', MotionBuilderScene)
|
craftassist/agent/voxel_models/detection-transformer/datasets/voc2012.py | kandluis/droidlet | 669 | 12774370 | """
Copyright (c) Facebook, Inc. and its affiliates.
"""
from .voc import VOCDetection
from typing import Iterable
import to_coco_api
VOC_PATH = "/datasets01/VOC/060817/"
class VOCDetection2012(VOCDetection):
def __init__(self, image_set: str = "train", transforms: Iterable = None):
super(VOCDetection, self).__init__(
VOC_PATH, image_set=image_set, year="2012", download=False
)
self.prepare = to_coco_api.PrepareInstance()
self._transforms = transforms
from .voc import make_voc_transforms
def build(image_set, args):
# if we only use voc2012, then we need to adapt trainval and test to
# VOC2012 constraints
if image_set == "test":
image_set = "val"
if image_set == "trainval":
image_set = "train"
return VOCDetection2012(
image_set=image_set, transforms=make_voc_transforms(image_set, args.remove_difficult)
)
|
src/encoded/tests/test_upgrade_organism.py | procha2/encoded | 102 | 12774381 | <filename>src/encoded/tests/test_upgrade_organism.py
import pytest
def test_organism_upgrade(upgrader, organism_1_0):
value = upgrader.upgrade('organism', organism_1_0, target_version='2')
assert value['schema_version'] == '2'
assert value['status'] == 'current'
def test_organism_upgrade_4_5(upgrader, organism_4_0):
value = upgrader.upgrade('organism', organism_4_0, current_version='4', target_version='5')
assert value['schema_version'] == '5'
assert value['status'] == 'released'
organism_4_0['status'] = 'disabled'
organism_4_0['schema_version'] = '4'
value = upgrader.upgrade('organism', organism_4_0, current_version='4', target_version='5')
assert value['schema_version'] == '5'
assert value['status'] == 'deleted'
|
scripts/caffe/convert_caffe_weights_to_npy.py | nnmhuy/flownet2-tf | 438 | 12774382 | <reponame>nnmhuy/flownet2-tf<gh_stars>100-1000
"""
Please read README.md for usage instructions.
Extracts Caffe parameters from a given caffemodel/prototxt to a dictionary of numpy arrays,
ready for conversion to TensorFlow variables. Writes the dictionary to a .npy file.
"""
import argparse
import caffe
import numpy as np
import os
import tempfile
FLAGS = None
ARCHS = {
'C': {
'CAFFEMODEL': '../models/FlowNet2-C/FlowNet2-C_weights.caffemodel',
'DEPLOY_PROTOTXT': '../models/FlowNet2-C/FlowNet2-C_deploy.prototxt.template',
# Mappings between Caffe parameter names and TensorFlow variable names
'PARAMS': {
'conv1': 'FlowNetC/conv1',
'conv2': 'FlowNetC/conv2',
'conv3': 'FlowNetC/conv3',
'conv_redir': 'FlowNetC/conv_redir',
'conv3_1': 'FlowNetC/conv3_1',
'conv4': 'FlowNetC/conv4',
'conv4_1': 'FlowNetC/conv4_1',
'conv5': 'FlowNetC/conv5',
'conv5_1': 'FlowNetC/conv5_1',
'conv6': 'FlowNetC/conv6',
'conv6_1': 'FlowNetC/conv6_1',
'Convolution1': 'FlowNetC/predict_flow6',
'deconv5': 'FlowNetC/deconv5',
'upsample_flow6to5': 'FlowNetC/upsample_flow6to5',
'Convolution2': 'FlowNetC/predict_flow5',
'deconv4': 'FlowNetC/deconv4',
'upsample_flow5to4': 'FlowNetC/upsample_flow5to4',
'Convolution3': 'FlowNetC/predict_flow4',
'deconv3': 'FlowNetC/deconv3',
'upsample_flow4to3': 'FlowNetC/upsample_flow4to3',
'Convolution4': 'FlowNetC/predict_flow3',
'deconv2': 'FlowNetC/deconv2',
'upsample_flow3to2': 'FlowNetC/upsample_flow3to2',
'Convolution5': 'FlowNetC/predict_flow2',
}
},
'S': {
'CAFFEMODEL': '../models/FlowNet2-S/FlowNet2-S_weights.caffemodel.h5',
'DEPLOY_PROTOTXT': '../models/FlowNet2-S/FlowNet2-S_deploy.prototxt.template',
# Mappings between Caffe parameter names and TensorFlow variable names
'PARAMS': {
'conv1': 'FlowNetS/conv1',
'conv2': 'FlowNetS/conv2',
'conv3': 'FlowNetS/conv3',
'conv3_1': 'FlowNetS/conv3_1',
'conv4': 'FlowNetS/conv4',
'conv4_1': 'FlowNetS/conv4_1',
'conv5': 'FlowNetS/conv5',
'conv5_1': 'FlowNetS/conv5_1',
'conv6': 'FlowNetS/conv6',
'conv6_1': 'FlowNetS/conv6_1',
'Convolution1': 'FlowNetS/predict_flow6',
'deconv5': 'FlowNetS/deconv5',
'upsample_flow6to5': 'FlowNetS/upsample_flow6to5',
'Convolution2': 'FlowNetS/predict_flow5',
'deconv4': 'FlowNetS/deconv4',
'upsample_flow5to4': 'FlowNetS/upsample_flow5to4',
'Convolution3': 'FlowNetS/predict_flow4',
'deconv3': 'FlowNetS/deconv3',
'upsample_flow4to3': 'FlowNetS/upsample_flow4to3',
'Convolution4': 'FlowNetS/predict_flow3',
'deconv2': 'FlowNetS/deconv2',
'upsample_flow3to2': 'FlowNetS/upsample_flow3to2',
'Convolution5': 'FlowNetS/predict_flow2',
}
},
'CS': {
'CAFFEMODEL': '../models/FlowNet2-CS/FlowNet2-CS_weights.caffemodel',
'DEPLOY_PROTOTXT': '../models/FlowNet2-CS/FlowNet2-CS_deploy.prototxt.template',
# Mappings between Caffe parameter names and TensorFlow variable names
'PARAMS': {
# Net C
'conv1': 'FlowNetCS/FlowNetC/conv1',
'conv2': 'FlowNetCS/FlowNetC/conv2',
'conv3': 'FlowNetCS/FlowNetC/conv3',
'conv_redir': 'FlowNetCS/FlowNetC/conv_redir',
'conv3_1': 'FlowNetCS/FlowNetC/conv3_1',
'conv4': 'FlowNetCS/FlowNetC/conv4',
'conv4_1': 'FlowNetCS/FlowNetC/conv4_1',
'conv5': 'FlowNetCS/FlowNetC/conv5',
'conv5_1': 'FlowNetCS/FlowNetC/conv5_1',
'conv6': 'FlowNetCS/FlowNetC/conv6',
'conv6_1': 'FlowNetCS/FlowNetC/conv6_1',
'Convolution1': 'FlowNetCS/FlowNetC/predict_flow6',
'deconv5': 'FlowNetCS/FlowNetC/deconv5',
'upsample_flow6to5': 'FlowNetCS/FlowNetC/upsample_flow6to5',
'Convolution2': 'FlowNetCS/FlowNetC/predict_flow5',
'deconv4': 'FlowNetCS/FlowNetC/deconv4',
'upsample_flow5to4': 'FlowNetCS/FlowNetC/upsample_flow5to4',
'Convolution3': 'FlowNetCS/FlowNetC/predict_flow4',
'deconv3': 'FlowNetCS/FlowNetC/deconv3',
'upsample_flow4to3': 'FlowNetCS/FlowNetC/upsample_flow4to3',
'Convolution4': 'FlowNetCS/FlowNetC/predict_flow3',
'deconv2': 'FlowNetCS/FlowNetC/deconv2',
'upsample_flow3to2': 'FlowNetCS/FlowNetC/upsample_flow3to2',
'Convolution5': 'FlowNetCS/FlowNetC/predict_flow2',
# Net S
'net2_conv1': 'FlowNetCS/FlowNetS/conv1',
'net2_conv2': 'FlowNetCS/FlowNetS/conv2',
'net2_conv3': 'FlowNetCS/FlowNetS/conv3',
'net2_conv3_1': 'FlowNetCS/FlowNetS/conv3_1',
'net2_conv4': 'FlowNetCS/FlowNetS/conv4',
'net2_conv4_1': 'FlowNetCS/FlowNetS/conv4_1',
'net2_conv5': 'FlowNetCS/FlowNetS/conv5',
'net2_conv5_1': 'FlowNetCS/FlowNetS/conv5_1',
'net2_conv6': 'FlowNetCS/FlowNetS/conv6',
'net2_conv6_1': 'FlowNetCS/FlowNetS/conv6_1',
'net2_predict_conv6': 'FlowNetCS/FlowNetS/predict_flow6',
'net2_deconv5': 'FlowNetCS/FlowNetS/deconv5',
'net2_net2_upsample_flow6to5': 'FlowNetCS/FlowNetS/upsample_flow6to5',
'net2_predict_conv5': 'FlowNetCS/FlowNetS/predict_flow5',
'net2_deconv4': 'FlowNetCS/FlowNetS/deconv4',
'net2_net2_upsample_flow5to4': 'FlowNetCS/FlowNetS/upsample_flow5to4',
'net2_predict_conv4': 'FlowNetCS/FlowNetS/predict_flow4',
'net2_deconv3': 'FlowNetCS/FlowNetS/deconv3',
'net2_net2_upsample_flow4to3': 'FlowNetCS/FlowNetS/upsample_flow4to3',
'net2_predict_conv3': 'FlowNetCS/FlowNetS/predict_flow3',
'net2_deconv2': 'FlowNetCS/FlowNetS/deconv2',
'net2_net2_upsample_flow3to2': 'FlowNetCS/FlowNetS/upsample_flow3to2',
'net2_predict_conv2': 'FlowNetCS/FlowNetS/predict_flow2',
}
},
'CSS': {
'CAFFEMODEL': '../models/FlowNet2-CSS/FlowNet2-CSS_weights.caffemodel.h5',
'DEPLOY_PROTOTXT': '../models/FlowNet2-CSS/FlowNet2-CSS_deploy.prototxt.template',
# Mappings between Caffe parameter names and TensorFlow variable names
'PARAMS': {
# Net C
'conv1': 'FlowNetCSS/FlowNetCS/FlowNetC/conv1',
'conv2': 'FlowNetCSS/FlowNetCS/FlowNetC/conv2',
'conv3': 'FlowNetCSS/FlowNetCS/FlowNetC/conv3',
'conv_redir': 'FlowNetCSS/FlowNetCS/FlowNetC/conv_redir',
'conv3_1': 'FlowNetCSS/FlowNetCS/FlowNetC/conv3_1',
'conv4': 'FlowNetCSS/FlowNetCS/FlowNetC/conv4',
'conv4_1': 'FlowNetCSS/FlowNetCS/FlowNetC/conv4_1',
'conv5': 'FlowNetCSS/FlowNetCS/FlowNetC/conv5',
'conv5_1': 'FlowNetCSS/FlowNetCS/FlowNetC/conv5_1',
'conv6': 'FlowNetCSS/FlowNetCS/FlowNetC/conv6',
'conv6_1': 'FlowNetCSS/FlowNetCS/FlowNetC/conv6_1',
'Convolution1': 'FlowNetCSS/FlowNetCS/FlowNetC/predict_flow6',
'deconv5': 'FlowNetCSS/FlowNetCS/FlowNetC/deconv5',
'upsample_flow6to5': 'FlowNetCSS/FlowNetCS/FlowNetC/upsample_flow6to5',
'Convolution2': 'FlowNetCSS/FlowNetCS/FlowNetC/predict_flow5',
'deconv4': 'FlowNetCSS/FlowNetCS/FlowNetC/deconv4',
'upsample_flow5to4': 'FlowNetCSS/FlowNetCS/FlowNetC/upsample_flow5to4',
'Convolution3': 'FlowNetCSS/FlowNetCS/FlowNetC/predict_flow4',
'deconv3': 'FlowNetCSS/FlowNetCS/FlowNetC/deconv3',
'upsample_flow4to3': 'FlowNetCSS/FlowNetCS/FlowNetC/upsample_flow4to3',
'Convolution4': 'FlowNetCSS/FlowNetCS/FlowNetC/predict_flow3',
'deconv2': 'FlowNetCSS/FlowNetCS/FlowNetC/deconv2',
'upsample_flow3to2': 'FlowNetCSS/FlowNetCS/FlowNetC/upsample_flow3to2',
'Convolution5': 'FlowNetCSS/FlowNetCS/FlowNetC/predict_flow2',
# Net S 1
'net2_conv1': 'FlowNetCSS/FlowNetCS/FlowNetS/conv1',
'net2_conv2': 'FlowNetCSS/FlowNetCS/FlowNetS/conv2',
'net2_conv3': 'FlowNetCSS/FlowNetCS/FlowNetS/conv3',
'net2_conv3_1': 'FlowNetCSS/FlowNetCS/FlowNetS/conv3_1',
'net2_conv4': 'FlowNetCSS/FlowNetCS/FlowNetS/conv4',
'net2_conv4_1': 'FlowNetCSS/FlowNetCS/FlowNetS/conv4_1',
'net2_conv5': 'FlowNetCSS/FlowNetCS/FlowNetS/conv5',
'net2_conv5_1': 'FlowNetCSS/FlowNetCS/FlowNetS/conv5_1',
'net2_conv6': 'FlowNetCSS/FlowNetCS/FlowNetS/conv6',
'net2_conv6_1': 'FlowNetCSS/FlowNetCS/FlowNetS/conv6_1',
'net2_predict_conv6': 'FlowNetCSS/FlowNetCS/FlowNetS/predict_flow6',
'net2_deconv5': 'FlowNetCSS/FlowNetCS/FlowNetS/deconv5',
'net2_net2_upsample_flow6to5': 'FlowNetCSS/FlowNetCS/FlowNetS/upsample_flow6to5',
'net2_predict_conv5': 'FlowNetCSS/FlowNetCS/FlowNetS/predict_flow5',
'net2_deconv4': 'FlowNetCSS/FlowNetCS/FlowNetS/deconv4',
'net2_net2_upsample_flow5to4': 'FlowNetCSS/FlowNetCS/FlowNetS/upsample_flow5to4',
'net2_predict_conv4': 'FlowNetCSS/FlowNetCS/FlowNetS/predict_flow4',
'net2_deconv3': 'FlowNetCSS/FlowNetCS/FlowNetS/deconv3',
'net2_net2_upsample_flow4to3': 'FlowNetCSS/FlowNetCS/FlowNetS/upsample_flow4to3',
'net2_predict_conv3': 'FlowNetCSS/FlowNetCS/FlowNetS/predict_flow3',
'net2_deconv2': 'FlowNetCSS/FlowNetCS/FlowNetS/deconv2',
'net2_net2_upsample_flow3to2': 'FlowNetCSS/FlowNetCS/FlowNetS/upsample_flow3to2',
'net2_predict_conv2': 'FlowNetCSS/FlowNetCS/FlowNetS/predict_flow2',
# Net S 2
'net3_conv1': 'FlowNetCSS/FlowNetS/conv1',
'net3_conv2': 'FlowNetCSS/FlowNetS/conv2',
'net3_conv3': 'FlowNetCSS/FlowNetS/conv3',
'net3_conv3_1': 'FlowNetCSS/FlowNetS/conv3_1',
'net3_conv4': 'FlowNetCSS/FlowNetS/conv4',
'net3_conv4_1': 'FlowNetCSS/FlowNetS/conv4_1',
'net3_conv5': 'FlowNetCSS/FlowNetS/conv5',
'net3_conv5_1': 'FlowNetCSS/FlowNetS/conv5_1',
'net3_conv6': 'FlowNetCSS/FlowNetS/conv6',
'net3_conv6_1': 'FlowNetCSS/FlowNetS/conv6_1',
'net3_predict_conv6': 'FlowNetCSS/FlowNetS/predict_flow6',
'net3_deconv5': 'FlowNetCSS/FlowNetS/deconv5',
'net3_net3_upsample_flow6to5': 'FlowNetCSS/FlowNetS/upsample_flow6to5',
'net3_predict_conv5': 'FlowNetCSS/FlowNetS/predict_flow5',
'net3_deconv4': 'FlowNetCSS/FlowNetS/deconv4',
'net3_net3_upsample_flow5to4': 'FlowNetCSS/FlowNetS/upsample_flow5to4',
'net3_predict_conv4': 'FlowNetCSS/FlowNetS/predict_flow4',
'net3_deconv3': 'FlowNetCSS/FlowNetS/deconv3',
'net3_net3_upsample_flow4to3': 'FlowNetCSS/FlowNetS/upsample_flow4to3',
'net3_predict_conv3': 'FlowNetCSS/FlowNetS/predict_flow3',
'net3_deconv2': 'FlowNetCSS/FlowNetS/deconv2',
'net3_net3_upsample_flow3to2': 'FlowNetCSS/FlowNetS/upsample_flow3to2',
'net3_predict_conv2': 'FlowNetCSS/FlowNetS/predict_flow2',
},
},
'CSS-ft-sd': {
'CAFFEMODEL': '../models/FlowNet2-CSS-ft-sd/FlowNet2-CSS-ft-sd_weights.caffemodel.h5',
'DEPLOY_PROTOTXT': '../models/FlowNet2-CSS-ft-sd/FlowNet2-CSS-ft-sd_deploy.prototxt.template',
# Mappings between Caffe parameter names and TensorFlow variable names
'PARAMS': {
# Net C
'conv1': 'FlowNetCSS/FlowNetCS/FlowNetC/conv1',
'conv2': 'FlowNetCSS/FlowNetCS/FlowNetC/conv2',
'conv3': 'FlowNetCSS/FlowNetCS/FlowNetC/conv3',
'conv_redir': 'FlowNetCSS/FlowNetCS/FlowNetC/conv_redir',
'conv3_1': 'FlowNetCSS/FlowNetCS/FlowNetC/conv3_1',
'conv4': 'FlowNetCSS/FlowNetCS/FlowNetC/conv4',
'conv4_1': 'FlowNetCSS/FlowNetCS/FlowNetC/conv4_1',
'conv5': 'FlowNetCSS/FlowNetCS/FlowNetC/conv5',
'conv5_1': 'FlowNetCSS/FlowNetCS/FlowNetC/conv5_1',
'conv6': 'FlowNetCSS/FlowNetCS/FlowNetC/conv6',
'conv6_1': 'FlowNetCSS/FlowNetCS/FlowNetC/conv6_1',
'Convolution1': 'FlowNetCSS/FlowNetCS/FlowNetC/predict_flow6',
'deconv5': 'FlowNetCSS/FlowNetCS/FlowNetC/deconv5',
'upsample_flow6to5': 'FlowNetCSS/FlowNetCS/FlowNetC/upsample_flow6to5',
'Convolution2': 'FlowNetCSS/FlowNetCS/FlowNetC/predict_flow5',
'deconv4': 'FlowNetCSS/FlowNetCS/FlowNetC/deconv4',
'upsample_flow5to4': 'FlowNetCSS/FlowNetCS/FlowNetC/upsample_flow5to4',
'Convolution3': 'FlowNetCSS/FlowNetCS/FlowNetC/predict_flow4',
'deconv3': 'FlowNetCSS/FlowNetCS/FlowNetC/deconv3',
'upsample_flow4to3': 'FlowNetCSS/FlowNetCS/FlowNetC/upsample_flow4to3',
'Convolution4': 'FlowNetCSS/FlowNetCS/FlowNetC/predict_flow3',
'deconv2': 'FlowNetCSS/FlowNetCS/FlowNetC/deconv2',
'upsample_flow3to2': 'FlowNetCSS/FlowNetCS/FlowNetC/upsample_flow3to2',
'Convolution5': 'FlowNetCSS/FlowNetCS/FlowNetC/predict_flow2',
# Net S 1
'net2_conv1': 'FlowNetCSS/FlowNetCS/FlowNetS/conv1',
'net2_conv2': 'FlowNetCSS/FlowNetCS/FlowNetS/conv2',
'net2_conv3': 'FlowNetCSS/FlowNetCS/FlowNetS/conv3',
'net2_conv3_1': 'FlowNetCSS/FlowNetCS/FlowNetS/conv3_1',
'net2_conv4': 'FlowNetCSS/FlowNetCS/FlowNetS/conv4',
'net2_conv4_1': 'FlowNetCSS/FlowNetCS/FlowNetS/conv4_1',
'net2_conv5': 'FlowNetCSS/FlowNetCS/FlowNetS/conv5',
'net2_conv5_1': 'FlowNetCSS/FlowNetCS/FlowNetS/conv5_1',
'net2_conv6': 'FlowNetCSS/FlowNetCS/FlowNetS/conv6',
'net2_conv6_1': 'FlowNetCSS/FlowNetCS/FlowNetS/conv6_1',
'net2_predict_conv6': 'FlowNetCSS/FlowNetCS/FlowNetS/predict_flow6',
'net2_deconv5': 'FlowNetCSS/FlowNetCS/FlowNetS/deconv5',
'net2_net2_upsample_flow6to5': 'FlowNetCSS/FlowNetCS/FlowNetS/upsample_flow6to5',
'net2_predict_conv5': 'FlowNetCSS/FlowNetCS/FlowNetS/predict_flow5',
'net2_deconv4': 'FlowNetCSS/FlowNetCS/FlowNetS/deconv4',
'net2_net2_upsample_flow5to4': 'FlowNetCSS/FlowNetCS/FlowNetS/upsample_flow5to4',
'net2_predict_conv4': 'FlowNetCSS/FlowNetCS/FlowNetS/predict_flow4',
'net2_deconv3': 'FlowNetCSS/FlowNetCS/FlowNetS/deconv3',
'net2_net2_upsample_flow4to3': 'FlowNetCSS/FlowNetCS/FlowNetS/upsample_flow4to3',
'net2_predict_conv3': 'FlowNetCSS/FlowNetCS/FlowNetS/predict_flow3',
'net2_deconv2': 'FlowNetCSS/FlowNetCS/FlowNetS/deconv2',
'net2_net2_upsample_flow3to2': 'FlowNetCSS/FlowNetCS/FlowNetS/upsample_flow3to2',
'net2_predict_conv2': 'FlowNetCSS/FlowNetCS/FlowNetS/predict_flow2',
# Net S 2
'net3_conv1': 'FlowNetCSS/FlowNetS/conv1',
'net3_conv2': 'FlowNetCSS/FlowNetS/conv2',
'net3_conv3': 'FlowNetCSS/FlowNetS/conv3',
'net3_conv3_1': 'FlowNetCSS/FlowNetS/conv3_1',
'net3_conv4': 'FlowNetCSS/FlowNetS/conv4',
'net3_conv4_1': 'FlowNetCSS/FlowNetS/conv4_1',
'net3_conv5': 'FlowNetCSS/FlowNetS/conv5',
'net3_conv5_1': 'FlowNetCSS/FlowNetS/conv5_1',
'net3_conv6': 'FlowNetCSS/FlowNetS/conv6',
'net3_conv6_1': 'FlowNetCSS/FlowNetS/conv6_1',
'net3_predict_conv6': 'FlowNetCSS/FlowNetS/predict_flow6',
'net3_deconv5': 'FlowNetCSS/FlowNetS/deconv5',
'net3_net3_upsample_flow6to5': 'FlowNetCSS/FlowNetS/upsample_flow6to5',
'net3_predict_conv5': 'FlowNetCSS/FlowNetS/predict_flow5',
'net3_deconv4': 'FlowNetCSS/FlowNetS/deconv4',
'net3_net3_upsample_flow5to4': 'FlowNetCSS/FlowNetS/upsample_flow5to4',
'net3_predict_conv4': 'FlowNetCSS/FlowNetS/predict_flow4',
'net3_deconv3': 'FlowNetCSS/FlowNetS/deconv3',
'net3_net3_upsample_flow4to3': 'FlowNetCSS/FlowNetS/upsample_flow4to3',
'net3_predict_conv3': 'FlowNetCSS/FlowNetS/predict_flow3',
'net3_deconv2': 'FlowNetCSS/FlowNetS/deconv2',
'net3_net3_upsample_flow3to2': 'FlowNetCSS/FlowNetS/upsample_flow3to2',
'net3_predict_conv2': 'FlowNetCSS/FlowNetS/predict_flow2',
},
},
'SD': {
'CAFFEMODEL': '../models/FlowNet2-SD/FlowNet2-SD_weights.caffemodel.h5',
'DEPLOY_PROTOTXT': '../models/FlowNet2-SD/FlowNet2-SD_deploy.prototxt.template',
# Mappings between Caffe parameter names and TensorFlow variable names
'PARAMS': {
'conv0': 'FlowNetSD/conv0',
'conv1': 'FlowNetSD/conv1',
'conv1_1': 'FlowNetSD/conv1_1',
'conv2': 'FlowNetSD/conv2',
'conv2_1': 'FlowNetSD/conv2_1',
'conv3': 'FlowNetSD/conv3',
'conv3_1': 'FlowNetSD/conv3_1',
'conv4': 'FlowNetSD/conv4',
'conv4_1': 'FlowNetSD/conv4_1',
'conv5': 'FlowNetSD/conv5',
'conv5_1': 'FlowNetSD/conv5_1',
'conv6': 'FlowNetSD/conv6',
'conv6_1': 'FlowNetSD/conv6_1',
'Convolution1': 'FlowNetSD/predict_flow6',
'deconv5': 'FlowNetSD/deconv5',
'upsample_flow6to5': 'FlowNetSD/upsample_flow6to5',
'interconv5': 'FlowNetSD/interconv5',
'Convolution2': 'FlowNetSD/predict_flow5',
'deconv4': 'FlowNetSD/deconv4',
'upsample_flow5to4': 'FlowNetSD/upsample_flow5to4',
'interconv4': 'FlowNetSD/interconv4',
'Convolution3': 'FlowNetSD/predict_flow4',
'deconv3': 'FlowNetSD/deconv3',
'upsample_flow4to3': 'FlowNetSD/upsample_flow4to3',
'interconv3': 'FlowNetSD/interconv3',
'Convolution4': 'FlowNetSD/predict_flow3',
'deconv2': 'FlowNetSD/deconv2',
'upsample_flow3to2': 'FlowNetSD/upsample_flow3to2',
'interconv2': 'FlowNetSD/interconv2',
'Convolution5': 'FlowNetSD/predict_flow2',
},
},
'2': {
'CAFFEMODEL': '../models/FlowNet2/FlowNet2_weights.caffemodel.h5',
'DEPLOY_PROTOTXT': '../models/FlowNet2/FlowNet2_deploy.prototxt.template',
# Mappings between Caffe parameter names and TensorFlow variable names
'PARAMS': {
# Net C
'conv1': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/conv1',
'conv2': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/conv2',
'conv3': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/conv3',
'conv_redir': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/conv_redir',
'conv3_1': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/conv3_1',
'conv4': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/conv4',
'conv4_1': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/conv4_1',
'conv5': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/conv5',
'conv5_1': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/conv5_1',
'conv6': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/conv6',
'conv6_1': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/conv6_1',
'Convolution1': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/predict_flow6',
'deconv5': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/deconv5',
'upsample_flow6to5': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/upsample_flow6to5',
'Convolution2': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/predict_flow5',
'deconv4': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/deconv4',
'upsample_flow5to4': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/upsample_flow5to4',
'Convolution3': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/predict_flow4',
'deconv3': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/deconv3',
'upsample_flow4to3': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/upsample_flow4to3',
'Convolution4': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/predict_flow3',
'deconv2': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/deconv2',
'upsample_flow3to2': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/upsample_flow3to2',
'Convolution5': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/predict_flow2',
# Net S 1
'net2_conv1': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/conv1',
'net2_conv2': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/conv2',
'net2_conv3': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/conv3',
'net2_conv3_1': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/conv3_1',
'net2_conv4': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/conv4',
'net2_conv4_1': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/conv4_1',
'net2_conv5': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/conv5',
'net2_conv5_1': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/conv5_1',
'net2_conv6': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/conv6',
'net2_conv6_1': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/conv6_1',
'net2_predict_conv6': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/predict_flow6',
'net2_deconv5': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/deconv5',
'net2_net2_upsample_flow6to5': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/upsample_flow6to5',
'net2_predict_conv5': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/predict_flow5',
'net2_deconv4': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/deconv4',
'net2_net2_upsample_flow5to4': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/upsample_flow5to4',
'net2_predict_conv4': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/predict_flow4',
'net2_deconv3': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/deconv3',
'net2_net2_upsample_flow4to3': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/upsample_flow4to3',
'net2_predict_conv3': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/predict_flow3',
'net2_deconv2': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/deconv2',
'net2_net2_upsample_flow3to2': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/upsample_flow3to2',
'net2_predict_conv2': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/predict_flow2',
# Net S 2
'net3_conv1': 'FlowNet2/FlowNetCSS/FlowNetS/conv1',
'net3_conv2': 'FlowNet2/FlowNetCSS/FlowNetS/conv2',
'net3_conv3': 'FlowNet2/FlowNetCSS/FlowNetS/conv3',
'net3_conv3_1': 'FlowNet2/FlowNetCSS/FlowNetS/conv3_1',
'net3_conv4': 'FlowNet2/FlowNetCSS/FlowNetS/conv4',
'net3_conv4_1': 'FlowNet2/FlowNetCSS/FlowNetS/conv4_1',
'net3_conv5': 'FlowNet2/FlowNetCSS/FlowNetS/conv5',
'net3_conv5_1': 'FlowNet2/FlowNetCSS/FlowNetS/conv5_1',
'net3_conv6': 'FlowNet2/FlowNetCSS/FlowNetS/conv6',
'net3_conv6_1': 'FlowNet2/FlowNetCSS/FlowNetS/conv6_1',
'net3_predict_conv6': 'FlowNet2/FlowNetCSS/FlowNetS/predict_flow6',
'net3_deconv5': 'FlowNet2/FlowNetCSS/FlowNetS/deconv5',
'net3_net3_upsample_flow6to5': 'FlowNet2/FlowNetCSS/FlowNetS/upsample_flow6to5',
'net3_predict_conv5': 'FlowNet2/FlowNetCSS/FlowNetS/predict_flow5',
'net3_deconv4': 'FlowNet2/FlowNetCSS/FlowNetS/deconv4',
'net3_net3_upsample_flow5to4': 'FlowNet2/FlowNetCSS/FlowNetS/upsample_flow5to4',
'net3_predict_conv4': 'FlowNet2/FlowNetCSS/FlowNetS/predict_flow4',
'net3_deconv3': 'FlowNet2/FlowNetCSS/FlowNetS/deconv3',
'net3_net3_upsample_flow4to3': 'FlowNet2/FlowNetCSS/FlowNetS/upsample_flow4to3',
'net3_predict_conv3': 'FlowNet2/FlowNetCSS/FlowNetS/predict_flow3',
'net3_deconv2': 'FlowNet2/FlowNetCSS/FlowNetS/deconv2',
'net3_net3_upsample_flow3to2': 'FlowNet2/FlowNetCSS/FlowNetS/upsample_flow3to2',
'net3_predict_conv2': 'FlowNet2/FlowNetCSS/FlowNetS/predict_flow2',
# Net SD
'netsd_conv0': 'FlowNet2/FlowNetSD/conv0',
'netsd_conv1': 'FlowNet2/FlowNetSD/conv1',
'netsd_conv1_1': 'FlowNet2/FlowNetSD/conv1_1',
'netsd_conv2': 'FlowNet2/FlowNetSD/conv2',
'netsd_conv2_1': 'FlowNet2/FlowNetSD/conv2_1',
'netsd_conv3': 'FlowNet2/FlowNetSD/conv3',
'netsd_conv3_1': 'FlowNet2/FlowNetSD/conv3_1',
'netsd_conv4': 'FlowNet2/FlowNetSD/conv4',
'netsd_conv4_1': 'FlowNet2/FlowNetSD/conv4_1',
'netsd_conv5': 'FlowNet2/FlowNetSD/conv5',
'netsd_conv5_1': 'FlowNet2/FlowNetSD/conv5_1',
'netsd_conv6': 'FlowNet2/FlowNetSD/conv6',
'netsd_conv6_1': 'FlowNet2/FlowNetSD/conv6_1',
'netsd_Convolution1': 'FlowNet2/FlowNetSD/predict_flow6',
'netsd_deconv5': 'FlowNet2/FlowNetSD/deconv5',
'netsd_upsample_flow6to5': 'FlowNet2/FlowNetSD/upsample_flow6to5',
'netsd_interconv5': 'FlowNet2/FlowNetSD/interconv5',
'netsd_Convolution2': 'FlowNet2/FlowNetSD/predict_flow5',
'netsd_deconv4': 'FlowNet2/FlowNetSD/deconv4',
'netsd_upsample_flow5to4': 'FlowNet2/FlowNetSD/upsample_flow5to4',
'netsd_interconv4': 'FlowNet2/FlowNetSD/interconv4',
'netsd_Convolution3': 'FlowNet2/FlowNetSD/predict_flow4',
'netsd_deconv3': 'FlowNet2/FlowNetSD/deconv3',
'netsd_upsample_flow4to3': 'FlowNet2/FlowNetSD/upsample_flow4to3',
'netsd_interconv3': 'FlowNet2/FlowNetSD/interconv3',
'netsd_Convolution4': 'FlowNet2/FlowNetSD/predict_flow3',
'netsd_deconv2': 'FlowNet2/FlowNetSD/deconv2',
'netsd_upsample_flow3to2': 'FlowNet2/FlowNetSD/upsample_flow3to2',
'netsd_interconv2': 'FlowNet2/FlowNetSD/interconv2',
'netsd_Convolution5': 'FlowNet2/FlowNetSD/predict_flow2',
# Fusion Net
'fuse_conv0': 'FlowNet2/fuse_conv0',
'fuse_conv1': 'FlowNet2/fuse_conv1',
'fuse_conv1_1': 'FlowNet2/fuse_conv1_1',
'fuse_conv2': 'FlowNet2/fuse_conv2',
'fuse_conv2_1': 'FlowNet2/fuse_conv2_1',
'fuse__Convolution5': 'FlowNet2/predict_flow2',
'fuse_deconv1': 'FlowNet2/fuse_deconv1',
'fuse_upsample_flow2to1': 'FlowNet2/fuse_upsample_flow2to1',
'fuse_interconv1': 'FlowNet2/fuse_interconv1',
'fuse__Convolution6': 'FlowNet2/predict_flow1',
'fuse_deconv0': 'FlowNet2/fuse_deconv0',
'fuse_upsample_flow1to0': 'FlowNet2/fuse_upsample_flow1to0',
'fuse_interconv0': 'FlowNet2/fuse_interconv0',
'fuse__Convolution7': 'FlowNet2/predict_flow0',
}
},
}
arch = None
# Setup variables to be injected into prototxt.template
# For now, use the dimensions of the Flying Chair Dataset
vars = {}
vars['TARGET_WIDTH'] = vars['ADAPTED_WIDTH'] = 512
vars['TARGET_HEIGHT'] = vars['ADAPTED_HEIGHT'] = 384
vars['SCALE_WIDTH'] = vars['SCALE_HEIGHT'] = 1.0
def main():
# Create tempfile to hold prototxt
tmp = tempfile.NamedTemporaryFile(mode='w', delete=True)
# Parse prototxt and inject `vars`
proto = open(arch['DEPLOY_PROTOTXT']).readlines()
for line in proto:
for key, value in vars.items():
tag = "$%s$" % key
line = line.replace(tag, str(value))
tmp.write(line)
tmp.flush()
# Instantiate Caffe Model
net = caffe.Net(tmp.name, arch['CAFFEMODEL'], caffe.TEST)
out = {}
for (caffe_param, tf_param) in arch['PARAMS'].items():
# Caffe stores weights as (channels_out, channels_in, h, w)
# but TF expects (h, w, channels_in, channels_out)
out[tf_param + '/weights'] = net.params[caffe_param][0].data.transpose((2, 3, 1, 0))
out[tf_param + '/biases'] = net.params[caffe_param][1].data
np.save(FLAGS.out, out)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--out',
type=str,
required=True,
help='Output file path, eg /foo/bar.npy'
)
parser.add_argument(
'--arch',
type=str,
choices=['C', 'S', 'CS', 'CSS', 'CSS-ft-sd', 'SD', '2'],
required=True,
help='Name of the FlowNet arch: C, S, CS, CSS, CSS-ft-sd, SD or 2'
)
FLAGS = parser.parse_args()
arch = ARCHS[FLAGS.arch]
main()
|
python/app/plugins/http/Tomcat/ajpy.py | taomujian/linbing | 351 | 12774385 | <gh_stars>100-1000
#!/usr/bin/env python
import socket
import struct
def pack_string(s):
if s is None:
return struct.pack(">h", -1)
l = len(s)
return struct.pack(">H%dsb" % l, l, s.encode('utf8'), 0)
def unpack(stream, fmt):
size = struct.calcsize(fmt)
buf = stream.read(size)
return struct.unpack(fmt, buf)
def unpack_string(stream):
size, = unpack(stream, ">h")
if size == -1: # null string
return None
res, = unpack(stream, "%ds" % size)
stream.read(1) # \0
return res
class NotFoundException(Exception):
pass
class AjpBodyRequest(object):
# server == web server, container == servlet
SERVER_TO_CONTAINER, CONTAINER_TO_SERVER = range(2)
MAX_REQUEST_LENGTH = 8186
def __init__(self, data_stream, data_len, data_direction=None):
self.data_stream = data_stream
self.data_len = data_len
self.data_direction = data_direction
def serialize(self):
data = self.data_stream.read(AjpBodyRequest.MAX_REQUEST_LENGTH)
if len(data) == 0:
return struct.pack(">bbH", 0x12, 0x34, 0x00)
else:
res = struct.pack(">H", len(data))
res += data
if self.data_direction == AjpBodyRequest.SERVER_TO_CONTAINER:
header = struct.pack(">bbH", 0x12, 0x34, len(res))
else:
header = struct.pack(">bbH", 0x41, 0x42, len(res))
return header + res
def send_and_receive(self, socket, stream):
while True:
data = self.serialize()
socket.send(data)
r = AjpResponse.receive(stream)
while r.prefix_code != AjpResponse.GET_BODY_CHUNK and r.prefix_code != AjpResponse.SEND_HEADERS:
r = AjpResponse.receive(stream)
if r.prefix_code == AjpResponse.SEND_HEADERS or len(data) == 4:
break
class AjpForwardRequest(object):
_, OPTIONS, GET, HEAD, POST, PUT, DELETE, TRACE, PROPFIND, PROPPATCH, MKCOL, COPY, MOVE, LOCK, UNLOCK, ACL, REPORT, VERSION_CONTROL, CHECKIN, CHECKOUT, UNCHECKOUT, SEARCH, MKWORKSPACE, UPDATE, LABEL, MERGE, BASELINE_CONTROL, MKACTIVITY = range(28)
REQUEST_METHODS = {'GET': GET, 'POST': POST, 'HEAD': HEAD, 'OPTIONS': OPTIONS, 'PUT': PUT, 'DELETE': DELETE, 'TRACE': TRACE}
# server == web server, container == servlet
SERVER_TO_CONTAINER, CONTAINER_TO_SERVER = range(2)
COMMON_HEADERS = ["SC_REQ_ACCEPT",
"SC_REQ_ACCEPT_CHARSET", "SC_REQ_ACCEPT_ENCODING", "SC_REQ_ACCEPT_LANGUAGE", "SC_REQ_AUTHORIZATION",
"SC_REQ_CONNECTION", "SC_REQ_CONTENT_TYPE", "SC_REQ_CONTENT_LENGTH", "SC_REQ_COOKIE", "SC_REQ_COOKIE2",
"SC_REQ_HOST", "SC_REQ_PRAGMA", "SC_REQ_REFERER", "SC_REQ_USER_AGENT"
]
ATTRIBUTES = ["context", "servlet_path", "remote_user", "auth_type", "query_string", "route", "ssl_cert", "ssl_cipher", "ssl_session", "req_attribute", "ssl_key_size", "secret", "stored_method"]
def __init__(self, data_direction=None):
self.prefix_code = 0x02
self.method = None
self.protocol = None
self.req_uri = None
self.remote_addr = None
self.remote_host = None
self.server_name = None
self.server_port = None
self.is_ssl = None
self.num_headers = None
self.request_headers = None
self.attributes = None
self.data_direction = data_direction
def pack_headers(self):
self.num_headers = len(self.request_headers)
res = ""
res = struct.pack(">h", self.num_headers)
for h_name in self.request_headers:
if h_name.startswith("SC_REQ"):
code = AjpForwardRequest.COMMON_HEADERS.index(h_name) + 1
res += struct.pack("BB", 0xA0, code)
else:
res += pack_string(h_name)
res += pack_string(self.request_headers[h_name])
return res
def pack_attributes(self):
res = b""
for attr in self.attributes:
a_name = attr['name']
code = AjpForwardRequest.ATTRIBUTES.index(a_name) + 1
res += struct.pack("b", code)
if a_name == "req_attribute":
aa_name, a_value = attr['value']
res += pack_string(aa_name)
res += pack_string(a_value)
else:
res += pack_string(attr['value'])
res += struct.pack("B", 0xFF)
return res
def serialize(self):
res = ""
res = struct.pack("bb", self.prefix_code, self.method)
res += pack_string(self.protocol)
res += pack_string(self.req_uri)
res += pack_string(self.remote_addr)
res += pack_string(self.remote_host)
res += pack_string(self.server_name)
res += struct.pack(">h", self.server_port)
res += struct.pack("?", self.is_ssl)
res += self.pack_headers()
res += self.pack_attributes()
if self.data_direction == AjpForwardRequest.SERVER_TO_CONTAINER:
header = struct.pack(">bbh", 0x12, 0x34, len(res))
else:
header = struct.pack(">bbh", 0x41, 0x42, len(res))
return header + res
def parse(self, raw_packet):
stream = StringIO(raw_packet)
self.magic1, self.magic2, data_len = unpack(stream, "bbH")
self.prefix_code, self.method = unpack(stream, "bb")
self.protocol = unpack_string(stream)
self.req_uri = unpack_string(stream)
self.remote_addr = unpack_string(stream)
self.remote_host = unpack_string(stream)
self.server_name = unpack_string(stream)
self.server_port = unpack(stream, ">h")
self.is_ssl = unpack(stream, "?")
self.num_headers, = unpack(stream, ">H")
self.request_headers = {}
for i in range(self.num_headers):
code, = unpack(stream, ">H")
if code > 0xA000:
h_name = AjpForwardRequest.COMMON_HEADERS[code - 0xA001]
else:
h_name = unpack(stream, "%ds" % code)
stream.read(1) # \0
h_value = unpack_string(stream)
self.request_headers[h_name] = h_value
def send_and_receive(self, socket, stream, save_cookies=False):
res = []
i = socket.sendall(self.serialize())
if self.method == AjpForwardRequest.POST:
return res
r = AjpResponse.receive(stream)
assert r.prefix_code == AjpResponse.SEND_HEADERS
res.append(r)
if save_cookies and 'Set-Cookie' in r.response_headers:
self.headers['SC_REQ_COOKIE'] = r.response_headers['Set-Cookie']
# read body chunks and end response packets
while True:
r = AjpResponse.receive(stream)
res.append(r)
if r.prefix_code == AjpResponse.END_RESPONSE:
break
elif r.prefix_code == AjpResponse.SEND_BODY_CHUNK:
continue
else:
raise NotImplementedError
break
return res
class AjpResponse(object):
_,_,_,SEND_BODY_CHUNK, SEND_HEADERS, END_RESPONSE, GET_BODY_CHUNK = range(7)
COMMON_SEND_HEADERS = [
"Content-Type", "Content-Language", "Content-Length", "Date", "Last-Modified",
"Location", "Set-Cookie", "Set-Cookie2", "Servlet-Engine", "Status", "WWW-Authenticate"
]
def parse(self, stream):
# read headers
self.magic, self.data_length, self.prefix_code = unpack(stream, ">HHb")
if self.prefix_code == AjpResponse.SEND_HEADERS:
self.parse_send_headers(stream)
elif self.prefix_code == AjpResponse.SEND_BODY_CHUNK:
self.parse_send_body_chunk(stream)
elif self.prefix_code == AjpResponse.END_RESPONSE:
self.parse_end_response(stream)
elif self.prefix_code == AjpResponse.GET_BODY_CHUNK:
self.parse_get_body_chunk(stream)
else:
raise NotImplementedError
def parse_send_headers(self, stream):
self.http_status_code, = unpack(stream, ">H")
self.http_status_msg = unpack_string(stream)
self.num_headers, = unpack(stream, ">H")
self.response_headers = {}
for i in range(self.num_headers):
code, = unpack(stream, ">H")
if code <= 0xA000: # custom header
h_name, = unpack(stream, "%ds" % code)
stream.read(1) # \0
h_value = unpack_string(stream)
else:
h_name = AjpResponse.COMMON_SEND_HEADERS[code-0xA001]
h_value = unpack_string(stream)
self.response_headers[h_name] = h_value
def parse_send_body_chunk(self, stream):
self.data_length, = unpack(stream, ">H")
self.data = stream.read(self.data_length+1)
def parse_end_response(self, stream):
self.reuse, = unpack(stream, "b")
def parse_get_body_chunk(self, stream):
rlen, = unpack(stream, ">H")
return rlen
@staticmethod
def receive(stream):
r = AjpResponse()
r.parse(stream)
return r
|
scripts/soccer.py | jkurdys/ThinkBayes2 | 1,337 | 12774457 | <gh_stars>1000+
"""This file contains code for use with "Think Bayes",
by <NAME>, available from greenteapress.com
Copyright 2014 <NAME>
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
import numpy
import thinkbayes2
import thinkplot
class Soccer(thinkbayes2.Suite):
"""Represents hypotheses about."""
def Likelihood(self, data, hypo):
"""Computes the likelihood of the data under the hypothesis.
hypo:
data:
"""
like = 1
return like
def PredRemaining(self, rem_time, score):
"""Plots the predictive distribution for final number of goals.
rem_time: remaining time in the game in minutes
score: number of goals already scored
"""
# TODO: fill this in
def main():
hypos = numpy.linspace(0, 12, 201)
suite = Soccer(hypos)
thinkplot.Pdf(suite, label='prior')
print('prior mean', suite.Mean())
suite.Update(11)
thinkplot.Pdf(suite, label='posterior 1')
print('after one goal', suite.Mean())
thinkplot.Show()
if __name__ == '__main__':
main()
|
alembic/versions/2019102221_add_shared_file_system_column__75d4288ae265.py | kl-chou/codalab-worksheets | 236 | 12774462 | <filename>alembic/versions/2019102221_add_shared_file_system_column__75d4288ae265.py
"""Add shared-file-system column to workers
Revision ID: 75d4288ae265
Revises: <PASSWORD>
Create Date: 2019-10-22 21:05:26.580918
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = 'd0dd45f443b6'
def upgrade():
op.add_column('worker', sa.Column('shared_file_system', sa.Boolean(), nullable=False, server_default='0'))
def downgrade():
op.drop_column('worker', 'shared_file_system')
|
swarmlib/abc/bees/onlooker_bee.py | alxfmpl/swarmlib | 221 | 12774504 | <filename>swarmlib/abc/bees/onlooker_bee.py
# ------------------------------------------------------------------------------------------------------
# Copyright (c) <NAME>. All rights reserved.
# Licensed under the BSD 3-Clause License. See LICENSE.txt in the project root for license information.
# ------------------------------------------------------------------------------------------------------
from typing import Tuple
from .bee_base import BeeBase
class OnlookerBee(BeeBase):
def explore(self, starting_position: Tuple[float, float], start_value: float) -> None:
"""
Explore new food sources from the given one
Args:
starting_position ([type]): [description]
start_value (float): [description]
"""
self._explore(starting_position, start_value)
|
models/InitialBlock.py | JJavierga/ENet-Real-Time-Semantic-Segmentation | 268 | 12774526 | ###################################################
# Copyright (c) 2019 #
# Authors: @iArunava <<EMAIL>> #
# @AvivSham <<EMAIL>> #
# #
# License: BSD License 3.0 #
# #
# The Code in this file is distributed for free #
# usage and modification with proper linkage back #
# to this repository. #
###################################################
import torch
import torch.nn as nn
class InitialBlock(nn.Module):
def __init__ (self,in_channels = 3,out_channels = 13):
super().__init__()
self.maxpool = nn.MaxPool2d(kernel_size=2,
stride = 2,
padding = 0)
self.conv = nn.Conv2d(in_channels,
out_channels,
kernel_size = 3,
stride = 2,
padding = 1)
self.prelu = nn.PReLU(16)
self.batchnorm = nn.BatchNorm2d(out_channels)
def forward(self, x):
main = self.conv(x)
main = self.batchnorm(main)
side = self.maxpool(x)
x = torch.cat((main, side), dim=1)
x = self.prelu(x)
return x
|
ui/pypesvds/controllers/index.py | onfire73/pypeskg | 117 | 12774550 | import logging
from pylons import request, response, session, tmpl_context as c
from pylons.controllers.util import abort
# added for auth
from authkit.authorize.pylons_adaptors import authorize
from authkit.permissions import RemoteUser, ValidAuthKitUser, UserIn
from pypesvds.lib.base import BaseController, render
log = logging.getLogger(__name__)
class IndexController(BaseController):
@authorize(ValidAuthKitUser())
def index(self):
# Return a rendered template
#return render('/index.mako')
# or, return a response
return render('/pypesvds.mako')
def signout(self):
return render('/signin.html')
|
applications/CoSimulationApplication/mpi_extension/MPIExtension.py | clazaro/Kratos | 778 | 12774561 | import KratosMultiphysics.mpi # importing the MPI-Core, since the MPIExtension directly links to it
from KratosCoSimulationMPIExtension import *
|
apps/Tests/algs/TestAlg.py | erinzm/NEXT-chemistry | 155 | 12774621 | <filename>apps/Tests/algs/TestAlg.py
from apps.Tests.tests.test_api import set_and_get_alg, get_alg, get_exp
class MyAlg:
def initExp(self, butler, dummy):
get_exp(butler)
set_and_get_alg(butler)
return "return_init_exp"
def getQuery(self, butler):
get_alg(butler)
return "return_get_query"
def processAnswer(self, butler):
get_alg(butler)
return "return_process_answer"
def getModel(self, butler):
get_alg(butler)
return "return_process_answer"
|
h2o-py/tests/testdir_apis/Data_Manipulation/pyunit_h2oH2OFrame_countmatches.py | vishalbelsare/h2o-3 | 6,098 | 12774631 | <reponame>vishalbelsare/h2o-3<gh_stars>1000+
from __future__ import print_function
import sys
sys.path.insert(1,"../../../")
from tests import pyunit_utils
import h2o
from h2o.utils.typechecks import assert_is_type
from h2o.frame import H2OFrame
def h2o_H2OFrame_countmatches():
"""
Python API test: h2o.frame.H2OFrame.countmatches(pattern)
Copied from pyunit_countmatches.py
"""
python_lists = [["what","is"], ["going", "on"], ["When", "are"], ["MeetingMeetingon", "gone"]]
h2oframe = h2o.H2OFrame(python_obj=python_lists)
matches = h2oframe.countmatches(['Wh', 'ing', 'on'])
assert_is_type(matches, H2OFrame)
assert matches.shape == h2oframe.shape, "h2o.H2OFrame.countmatches() command is not working."
assert matches.any_na_rm(), "h2o.H2OFrame.countmatches() command is not working."
nomatches = h2oframe.countmatches(['rain','pluck'])
assert not(nomatches.any_na_rm()), "h2o.H2OFrame.countmatches() command is not working."
pyunit_utils.standalone_test(h2o_H2OFrame_countmatches)
|
plenum/test/run_continuously.py | andkononykhin/plenum | 148 | 12774637 | import traceback
import pytest
from plenum.test.testing_utils import setupTestLogging
setupTestLogging()
def run(test, stopOnFail=True, maxTimes=None):
count = 0
passes = 0
fails = 0
while maxTimes is None or count < maxTimes:
exitcode = pytest.main(test)
count += 1
if exitcode:
fails += 1
print("Test failed!")
traceback.print_exc()
if stopOnFail:
break
else:
passes += 1
print("Test passed.")
print("current stats: successes: {} fails: {}".format(passes, fails))
run("monitoring/test_instance_change_with_Delta.py",
stopOnFail=False, maxTimes=100)
|
pandoc-starter/MarkTex/marktex/rawrender/toRaw.py | riciche/SimpleCVReproduction | 923 | 12774652 | <gh_stars>100-1000
import os
from marktex.markast.utils import ImageTool,CleanTool
from marktex.markast.parser import Scanner
from marktex import config
from marktex.markast.document import Document
from marktex.markast.environment import *
from marktex.markast.line import *
from marktex.markast.token import *
from marktex.markast.xmls import *
class MarkRaw():
def __init__(self, doc: Document, input_dir, output_dir=None, texconfig=None, subdoc=False):
self.subdoc = subdoc
if texconfig is None:
texconfig = config
self.config = texconfig
self.input_dir = input_dir
if output_dir is None:
output_dir = "./"
image_dir = os.path.join(output_dir, "images")
self.output_dir = output_dir
self.image_dir = os.path.abspath(image_dir)
self.doc = doc
self.has_toc = False
self.contents = []
def append(self,item):
self.contents.append(item)
@staticmethod
def convert_file(fpath, output_dir=None):
'''
:param fpath:markdown文件的目录
:param image_dir: markdown中的网络图片和本地图片在转换中都会被统一哈希命名并输出到一个目录
默认是markdown文件所在的目录下的"./images"下
:return:
'''
fpre, _ = os.path.split(fpath)
if output_dir is None:
output_dir = fpre
os.makedirs(output_dir, exist_ok=True)
doc = Scanner.analyse_file(fpath)
input_dir, _ = os.path.split(fpath)
mark = MarkRaw(doc, input_dir=input_dir, output_dir=output_dir)
mark.convert()
return mark
def convert(self):
doc = self.doc
if doc.has_toc and not self.subdoc:
pass
for i, envi in enumerate(doc.content):
print(f"\rConverting...{i * 100 / len(doc.content):.3f}%.", end="\0", flush=True)
if isinstance(envi, Quote):
envi = self.fromQuote(envi)
elif isinstance(envi, Paragraph):
envi = self.fromParagraph(envi)
elif isinstance(envi, Itemize):
envi = self.fromItemize(envi)
elif isinstance(envi, Enumerate):
envi = self.fromEnumerate(envi)
elif isinstance(envi, Formula):
envi = self.fromFormula(envi)
elif isinstance(envi, Code):
envi = self.fromCode(envi)
elif isinstance(envi, Table):
envi = self.fromTable(envi)
elif isinstance(envi, MultiBox):
envi = self.fromMultiBox(envi)
else:
raise Exception(f"Doc error {envi},{envi.__class__.__name__}")
self.append(envi)
print(f"\rConverting...100%.")
def fromToken(self, s: Token):
return s.string
def fromBold(self, s: Bold):
return s.string
def fromItalic(self, s: Italic):
return s.string
def fromDeleteLine(self, s: DeleteLine):
return s.string
def fromUnderLine(self, s: UnderLine):
return s.string
def fromInCode(self, s: InCode):
return s.string
def fromInFormula(self, s: InFormula):
return s.string
def fromHyperlink(self, s: Hyperlink):
desc, link = s.desc, s.link
return f" {link},{desc} "
def fromFootnote(self, s: Footnote):
return s.label
def fromInImage(self, s: InImage):
return ""
def fromSection(self, s: Section):
level, content = s.level, s.content
content = self.fromTokenLine(s.content)
return f"{level}-{content}"
def fromImage(self, s: Image):
# cur_dir = os.getcwd() #markdown的相对路径,一定是针对那个markdown的,
# os.chdir(self.input_dir)
link = s.link
link = ImageTool.verify(link, self.image_dir, self.input_dir)
# os.chdir(cur_dir)
if config.give_rele_path:
link = os.path.relpath(link, self.output_dir)
link = link.replace("\\", "/")
return f" img,{link} "
def fromXML(self, token: XML):
return token.content
def fromTokenLine(self, s: TokenLine):
tokens = s.tokens
strs = []
for token in tokens:
if isinstance(token, Bold):
token = self.fromBold(token)
elif isinstance(token, XML):
token = self.fromXML(token)
elif isinstance(token, Italic):
token = self.fromItalic(token)
elif isinstance(token, DeleteLine):
token = self.fromDeleteLine(token)
elif isinstance(token, Footnote):
token = self.fromFootnote(token)
elif isinstance(token, UnderLine):
token = self.fromUnderLine(token)
elif isinstance(token, InCode):
token = self.fromInCode(token)
elif isinstance(token, InFormula):
token = self.fromInFormula(token)
elif isinstance(token, Hyperlink):
token = self.fromHyperlink(token)
elif isinstance(token, InImage):
token = self.fromInImage(token)
elif isinstance(token, Token):
token = self.fromToken(token)
else:
raise Exception(f"TokenLine error {token},{token.__class__.__name__}")
strs.append(token)
return "".join(strs)
def fromRawLine(self, s: RawLine):
return s.s
def fromNewLine(self, s: NewLine):
return "\n"
def fromParagraph(self, s: Paragraph):
t = []
# Section / NewLine / TokenLine / Image
for line in s.buffer:
if isinstance(line, Section):
line = self.fromSection(line)
elif isinstance(line, NewLine):
line = self.fromNewLine(line)
elif isinstance(line, TokenLine):
line = self.fromTokenLine(line)
elif isinstance(line, Image):
line = self.fromImage(line)
else:
raise Exception(f"Paragraph line error {line} is {line.__class__}")
t.append(line)
return "\n".join(t)
def fromQuote(self, s: Quote):
content = s.doc.content
q = []
for envi in content:
if isinstance(envi, Paragraph):
envi = self.fromParagraph(envi)
elif isinstance(envi, Table):
envi = self.fromTable(envi)
elif isinstance(envi, Itemize):
envi = self.fromItemize(envi)
elif isinstance(envi, Enumerate):
envi = self.fromEnumerate(envi)
elif isinstance(envi, Formula):
envi = self.fromFormula(envi)
elif isinstance(envi, Code):
envi = self.fromCode(envi)
else:
raise Exception(f"Quote doc error:{envi},{envi.__class__.__name__}")
q.append(envi)
return "\n".join(q)
def fromItemize(self, s: Itemize):
tokens = [self.fromTokenLine(c) for c in s.buffer]
ui = []
for line in tokens:
ui.append(f" - {line}")
return "\n".join(ui)
def fromMultiBox(self, s: MultiBox):
cl = []
for [ct, s] in s.lines:
cl.append(f"{ct} {s}")
return "\n".join(cl)
def fromEnumerate(self, s: Enumerate):
tokens = [self.fromTokenLine(c) for c in s.buffer]
ui = []
for i,line in enumerate(tokens):
ui.append(f"{i},{line}")
return "\n".join(ui)
def fromFormula(self, s: Formula):
code = [self.fromRawLine(c) for c in s.formula]
data = []
for line in code:
data.append(line)
return "\n".join(data)
def fromCode(self, s: Code):
code = [self.fromRawLine(c) for c in s.code]
c = []
for line in code:
c.append(line)
return "\n".join(c)
def fromTable(self, s: Table):
t = []
for i, row in enumerate(s.tables):
row = [self.fromTokenLine(c) for c in row]
t.append(" & ".join(row))
return "\n".join(t)
def generate_txt(self, filename=None):
'''
输入文件名即可,保存路径在输入时已经确定好了
:param filename:
:return:
'''
filepath = os.path.join(self.output_dir, f"{filename}.txt")
with open(f"{filepath}","w",encoding="utf-8") as w:
w.writelines(self.contents)
print(f"File is output in {os.path.abspath(filepath)} and images is in {os.path.abspath(self.image_dir)}.")
|
host-software/led/led_vm.py | dpejcha/keyplus | 226 | 12774679 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2018 <EMAIL>
# Licensed under the MIT license (http://opensource.org/licenses/MIT)
from sexpr import sexp
import pprint
import copy
import hexdump
DEBUG = 0
def u8(x):
return x & 0xff
def i16(x):
return x & 0xffff
class LEDVMError(Exception):
pass
class OpCodeInfo(object):
def __init__(self, name, data_len, arg_type):
self.name = name
self.data_len = data_len
self.arg_type = arg_type
ARG_NONE = 0
ARG_REFRENCES = 1
class OpCode(object):
SHOW_HSV = 0x00
SHOW_RGB = 0x01
LOAD_PIXEL = 0x02
ADD_VEC3 = 0x03
SUB_VEC3 = 0x04
IF_EQ = 0x05
OP_CODE_TABLE = {
# CODE , MENOMIC , DATA_SIZE
SHOW_HSV : OpCodeInfo("SHOW_HSV" , 0 , OpCodeInfo.ARG_NONE) ,
SHOW_RGB : OpCodeInfo("SHOW_RGB" , 0 , OpCodeInfo.ARG_NONE) ,
LOAD_PIXEL : OpCodeInfo("LOAD_PIXEL" , 3 , OpCodeInfo.ARG_REFRENCES) ,
ADD_VEC3 : OpCodeInfo("ADD_VEC3" , 3 , OpCodeInfo.ARG_REFRENCES) ,
SUB_VEC3 : OpCodeInfo("SUB_VEC3" , 3 , OpCodeInfo.ARG_REFRENCES) ,
IF_EQ : OpCodeInfo("IF_EQ" , 3 , OpCodeInfo.ARG_REFRENCES) ,
}
@staticmethod
def to_string(code):
if code in OpCode.OP_CODE_TABLE:
name = OpCode.OP_CODE_TABLE[code].name
return "{}<{}>".format(name, code)
else:
return "{}<{}>".format("UnknownOpCode", code)
def __init__(self, name, data_len=0):
self.name = name
self.data_len = data_len
class Register(object):
# Register codes
PIXEL_NUM = 0
OUTPUT_TYPE = 1
KEY_STATE = 2
MOUSE_X = 3
MOUSE_Y = 4
OUTPUT_TYPE_RGB = 0
OUTPUT_TYPE_HSV = 1
def __init__(self, name, default_value=0):
self.name = name
self.value = default_value
self.default_value = default_value
class LEDEffectVM(object):
REGISTER_TABLE = {
Register.PIXEL_NUM : Register("PIXEL_NUM", 0),
Register.OUTPUT_TYPE : Register("OUTPUT_TYPE", 0),
Register.KEY_STATE : Register("KEY_STATE", 0),
Register.MOUSE_X : Register("MOUSE_X", 0),
Register.MOUSE_Y : Register("MOUSE_Y", 0),
}
def __init__(self, led_program_table={'main': []}, num_pixels=None):
self.pixels = [(0, 0, 0)] * num_pixels
self.led_program_table = led_program_table
self.set_active_progarm('main')
self.instr_ptr = 0
self.registers = {}
for reg in self.REGISTER_TABLE:
self.registers[reg] = self.REGISTER_TABLE[reg].default_value
def set_active_progarm(self, name):
self._current_program_name = name
self.current_program = self.led_program_table[name]
def goto_start(self):
self.instr_ptr = 0
def rel_jump(self, offset):
self.instr_ptr += (offset)
def get_next_word(self):
if self.instr_ptr >= len(self.current_program):
return None
result = self.current_program[self.instr_ptr]
self.instr_ptr += 1
return result
def read_op_code(self):
code = self.get_next_word()
if code == None:
return None, None
self.vm_assert(code in OpCode.OP_CODE_TABLE, "Invalid OpCode: {}".format(code))
op_code = OpCode.OP_CODE_TABLE[code]
data = []
for i in range(op_code.data_len):
data.append(self.get_next_word())
# if DEBUG >= 1
if DEBUG >= 5:
print("Instruction: {}".format(self.instr_ptr))
print("Current code: {}, data:{}".format(
OpCode.to_string(code), data
)
)
return code, data
REFERENCE_TYPE_IMMEDIATE = 0
REFERENCE_TYPE_REGISTER = 1
REFERENCE_TYPE_PIXEL = 2
def lookup_refrence(self, ref):
# Refrences either an immediate value or another register value
# Format of refrence values (in hex):
# * 00xx -> Single byte immediate value
# * 01xx -> Single byte immediate value
value = (ref >> 0) & 0xff
ref_type = (ref >> 8) & 0xff
if ref_type == self.REFERENCE_TYPE_IMMEDIATE:
return value
elif ref_type == self.REFERENCE_TYPE_PIXEL:
assert(value < 3)
return self.get_current_pixel()[value]
elif ref_type == self.REFERENCE_TYPE_REGISTER:
assert(value in self.REGISTER_TABLE)
return self.registers[value]
def get_pixel(self, pixel_num):
return self.pixels[pixel_num]
def get_pixel_type(self, pixel_num):
return self.registers[Register.OUTPUT_TYPE]
def get_current_pixel(self):
return self.pixels[self.registers[Register.PIXEL_NUM]]
def set_current_pixel(self, x, y, z):
self.pixels[self.registers[Register.PIXEL_NUM]] = (x, y, z)
def execute_op_code(self, code, data):
"""
Return True if the program has finished executing
"""
if code == OpCode.SHOW_HSV:
self.registers[Register.OUTPUT_TYPE] = Register.OUTPUT_TYPE_HSV
return True
elif code == OpCode.SHOW_RGB:
self.registers[Register.OUTPUT_TYPE] = Register.OUTPUT_TYPE_RGB
return True
elif code == OpCode.LOAD_PIXEL:
self.set_current_pixel(
self.lookup_refrence(data[0]),
self.lookup_refrence(data[1]),
self.lookup_refrence(data[2])
)
elif code == OpCode.ADD_VEC3:
old_value = self.get_current_pixel()
self.set_current_pixel(
u8(old_value[0] + self.lookup_refrence(data[0])),
u8(old_value[1] + self.lookup_refrence(data[1])),
u8(old_value[2] + self.lookup_refrence(data[2]))
)
elif code == OpCode.SUB_VEC3:
old_value = self.get_current_pixel()
self.set_current_pixel(
u8(old_value[0] - self.lookup_refrence(data[0])),
u8(old_value[1] - self.lookup_refrence(data[1])),
u8(old_value[2] - self.lookup_refrence(data[2]))
)
elif code == OpCode.IF_EQ:
lhs = self.lookup_refrence(data[0])
rhs = self.lookup_refrence(data[1])
jmp_pos = self.lookup_refrence(data[2])
if DEBUG >= 5:
print("lhs, rhs, == :", lhs, rhs, lhs == rhs)
if lhs != rhs:
self.rel_jump(jmp_pos)
else:
raise LEDVMError("Unknown opcode {}".format(code))
return False
def execute_program(self, program_name):
self.set_active_progarm(program_name)
for (pixel_i, _) in enumerate(self.pixels):
self.execute_program_pixel(pixel_i)
def execute_program_pixel(self, pixel_number):
self.goto_start()
self.registers[Register.PIXEL_NUM] = pixel_number
is_running = True
if DEBUG:
print("Starting program for pixel: {}".format(pixel_number))
while is_running:
(code, data) = self.read_op_code()
if code == None:
break;
if DEBUG:
print("(OpCode {}, Data {})".format(code, data))
is_running = not self.execute_op_code(code, data)
def vm_assert(self, exp, msg=""):
if exp != True:
self.print_core_dump(msg)
if msg == "":
LEDVMError("LEDVMError: unspecified error")
else:
LEDVMError("LEDVMError: {}".format(msg))
def print_core_dump(self, error_msg):
print(
"\n"
"Core dump while executing program '{}':\n"
"Error message: {}\n"
"instr_ptr: {}\n"
"program: {}\n"
.format(
self._current_program_name,
error_msg,
self.instr_ptr,
self.current_program
)
)
class LEDEffectVMParser(object):
def __init__(self):
# The Parser needs the inverse mappings of the op_code/register lookup
# tables, so generate them here
self.op_code_lookup_table = {}
for code in OpCode.OP_CODE_TABLE:
name = OpCode.OP_CODE_TABLE[code].name
self.op_code_lookup_table[name] = code
self.register_lookup_table = {}
for reg in LEDEffectVM.REGISTER_TABLE:
name = LEDEffectVM.REGISTER_TABLE[reg].name
self.register_lookup_table[name] = reg
# def exp_as_arrays(self, exp):
# print(exp)
# arr = exp[0]
# result = []
# for child in arr:
# result.append(self.exp_as_arrays(child))
# return result
def parse_asm(self, program_str):
sexpression = sexp.parseString(program_str, parseAll=True)
if DEBUG:
print(sexpression)
pprint.pprint(sexpression)
# sexpression = self.exp_as_arrays(sexpression)
byte_code = []
byte_code += self.parse_program(sexpression)
return byte_code
def generate_ref(self, ref):
if isinstance(ref, int):
assert(ref <= 255)
ref_type = LEDEffectVM.REFERENCE_TYPE_IMMEDIATE
value = ref
elif isinstance(ref, str):
if ref in self.register_lookup_table:
ref_type = LEDEffectVM.REFERENCE_TYPE_REGISTER
value = self.register_lookup_table[ref]
elif ref in ('r', 'g', 'b', 'h', 's', 'v'):
ref_type = LEDEffectVM.REFERENCE_TYPE_PIXEL
value = {
'r': 0,
'h': 0,
'g': 1,
's': 1,
'b': 2,
'v': 2,
}[ref]
else:
raise LEDVMError("Unknown reference: {}".format(ref))
else:
return None
lo_byte = (value << 0)
hi_byte = (ref_type << 8)
return [lo_byte | hi_byte]
def parse_instruction(self, exp):
if DEBUG:
print("Parse Instruction: ", exp)
name = exp[0]
result = []
if not name in self.op_code_lookup_table:
raise LEDVMError("Unknown opcode menomic: {}".format(name))
op_code = self.op_code_lookup_table[name]
op_info = OpCode.OP_CODE_TABLE[op_code]
# Add the op_code to the result
result += [op_code]
OP_CODE_POS = 1
data = exp[OP_CODE_POS:]
if len(data) != op_info.data_len:
raise LEDVMError("Expected {} arguments to opcode {}, got {}".format(
op_info.data_len,
name,
len(data)
)
)
if op_code == OpCode.IF_EQ:
print(data)
print(data[0], data[1], data[2])
LHS_POS = 0
RHS_POS = 1
JUMP_POS = 2
result += self.generate_ref(data[LHS_POS])
result += self.generate_ref(data[RHS_POS])
if_block_exp = data[JUMP_POS]
ref_data = self.generate_ref(if_block_exp)
if ref_data != None:
result += ref_data
else:
print('ifblock:', if_block_exp)
if_block = self.parse_instruction_list(if_block_exp)
jmp_offset = i16(len(if_block))
result += [jmp_offset]
result += if_block
print('ifBlockResult:', result)
elif op_info.arg_type == OpCodeInfo.ARG_NONE:
pass # Don't need to add data
elif op_info.arg_type == OpCodeInfo.ARG_REFRENCES:
for ref in data:
result += self.generate_ref(ref)
return result
def parse_instruction_list(self, instruction_list):
result = []
for instruction in instruction_list:
result += self.parse_instruction(instruction)
return result
def parse_program(self, exp):
if DEBUG:
print("Parse program: ", exp)
exp = exp[0]
# pprint.pprint(exp)
return self.parse_instruction_list(exp)
if __name__ == "__main__":
init_prog = """
(
(LOAD_PIXEL PIXEL_NUM 255 200)
)
"""
# main_prog = """
# (
# (LOAD_PIXEL r 255 200)
# (ADD_VEC3 1 0 0)
# (IF_EQ v 199
# (
# (ADD_VEC3 1 0 0)
# )
# )
# (IF_EQ v 200
# (
# (SUB_VEC3 1 0 0)
# )
# )
# (SHOW_HSV)
# )
# """
main_prog = """
(
(IF_EQ h 0
(
(LOAD_PIXEL h 255 199)
)
)
(IF_EQ h 255
(
(LOAD_PIXEL h 255 200)
)
)
(IF_EQ v 200
(
(SUB_VEC3 1 0 0)
)
)
(IF_EQ v 199
(
(ADD_VEC3 1 0 0)
)
)
(SHOW_HSV)
)
"""
vm_parser = LEDEffectVMParser()
led_programs = {
"init": vm_parser.parse_asm(init_prog),
"main": vm_parser.parse_asm(main_prog),
}
vm = LEDEffectVM(led_programs, num_pixels=64)
for prog in led_programs:
print(prog, led_programs[prog])
byte_code_as_bytes = bytes([])
for word in led_programs[prog]:
byte_code_as_bytes += bytes([word & 0xff, word>>8 & 0xff])
hexdump.hexdump(byte_code_as_bytes)
vm.execute_program('init')
for i in range(300):
vm.execute_program('main')
print(vm.pixels)
|
model_compiler/src/model_compiler/tensorflow_util.py | yuanliya/Adlik | 548 | 12774690 | # Copyright 2019 ZTE corporation. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
from typing import Any, Mapping, NamedTuple, Optional, Sequence
from itertools import zip_longest
from . import utilities
from .models.data_format import DataFormat
def get_tensor_by_fuzzy_name(graph, name):
if ':' in name:
tensor = graph.get_tensor_by_name(name)
else:
tensor = graph.get_operation_by_name(name).outputs[0]
return tensor
class Config(NamedTuple):
input_names: Optional[Sequence[str]]
data_formats: Sequence[Optional[DataFormat]]
output_names: Optional[Sequence[str]]
@staticmethod
def from_json(value: Mapping[str, Any]) -> 'Config':
return Config(input_names=value.get('input_names'),
data_formats=utilities.get_data_formats(value.get('input_formats')),
output_names=value.get('output_names'))
@staticmethod
def from_env(env: Mapping[str, str]) -> 'Config':
return Config(input_names=utilities.split_by(env.get('INPUT_NAMES'), ','),
data_formats=utilities.get_data_formats(utilities.split_by(env.get('INPUT_FORMATS'), ',')),
output_names=utilities.split_by(env.get('OUTPUT_NAMES'), ','))
def get_input_tensors_from_graph(self, graph):
if self.input_names is None:
input_tensors = [operation.outputs[0]
for operation in graph.get_operations()
if operation.type == 'Placeholder']
else:
input_tensors = [get_tensor_by_fuzzy_name(graph, name) for name in self.input_names]
return input_tensors
def get_output_tensors_from_graph(self, graph):
if self.output_names is None:
output_tensors = [output_tensor for operation in graph.get_operations()
if operation.type not in
['Assign', 'Const', 'Identity', 'IsVariableInitialized', 'NoOp', 'Placeholder', 'SaveV2',
'VarIsInitializedOp']
for output_tensor in operation.outputs
if not output_tensor.consumers()]
else:
output_tensors = [get_tensor_by_fuzzy_name(graph, name) for name in self.output_names]
return output_tensors
def get_inputs(graph, config):
return zip_longest(config.get_input_tensors_from_graph(graph), config.data_formats)
|
autoit_ripper/utils.py | nazywam/AutoIt-Ripper | 112 | 12774697 | from datetime import datetime, timezone
from itertools import cycle
from .lame import LAME
from .mt import MT
def filetime_to_dt(timestamp: int) -> datetime:
return datetime.fromtimestamp(timestamp // 100000000, timezone.utc)
def bytes_to_bitstring(data: bytes) -> str:
return "".join(bin(x)[2:].zfill(8) for x in data)
class BitStream:
def __init__(self, data: bytes) -> None:
self.data = bytes_to_bitstring(data)
def get_bits(self, num: int) -> int:
out = int(self.data[:num], 2)
self.data = self.data[num:]
return out
def xor(data: bytes, key: bytes) -> bytes:
return bytes(a ^ b for a, b in zip(data, cycle(key)))
def decrypt_lame(data: bytes, seed: int) -> bytes:
lame = LAME()
lame.srand(seed)
return bytes([x ^ lame.get_next() for x in data])
def decrypt_mt(data: bytes, seed: int) -> bytes:
key = MT(seed).get_bytes(len(data))
return xor(data, key)
def crc_data(data: bytes) -> int:
if len(data) == 0:
return 0
dwKey_ECX = 0
dwKey_ESI = 1
for b in data:
dwKey_ESI = (b + dwKey_ESI) % 0xFFF1
dwKey_ECX = (dwKey_ECX + dwKey_ESI) % 0xFFF1
return (dwKey_ECX << 0x10) + dwKey_ESI
|
ontobio/bin/timeit.py | alliance-genome/ontobio | 101 | 12774744 | #!/usr/bin/env python3
from ontobio.sparql2ontology import *
from networkx.algorithms.dag import ancestors
import time
def r():
t1 = time.process_time()
get_edges('pato')
t2 = time.process_time()
print(t2-t1)
r()
r()
r()
"""
LRU is much faster, but does not persist. However, should be fast enough
# percache
## ENVO
$ python ./obographs/bin/timeit.py
QUERYING:envo
1.103934
0.0032450000000001644
0.003185999999999911
$ python ./obographs/bin/timeit.py
0.018115000000000048
0.00362800000000002
0.003180000000000016
## GO
$ python ./obographs/bin/timeit.py
QUERYING:go
13.218031
0.04876699999999978
0.04904600000000059
$ python ./obographs/bin/timeit.py
0.05928599999999995
0.045568
0.045347000000000026
# lru
$ python ./obographs/bin/timeit.py
QUERYING:envo
1.0635080000000001
2.0000000000575113e-06
1.000000000139778e-06
$ python ./obographs/bin/timeit.py
QUERYING:go
13.225105000000001
2.000000000279556e-06
0.0
"""
|
tests/test_data/test_datasets/__init__.py | rlleshi/mmaction2 | 1,870 | 12774772 | <filename>tests/test_data/test_datasets/__init__.py
# Copyright (c) OpenMMLab. All rights reserved.
from .base import BaseTestDataset
__all__ = ['BaseTestDataset']
|
fluent.pygments/fluent/pygments/cli.py | shlomyb-di/python-fluent | 155 | 12774778 | import argparse
import sys
from pygments import highlight
from pygments.formatters import Terminal256Formatter
from fluent.pygments.lexer import FluentLexer
def main():
parser = argparse.ArgumentParser()
parser.add_argument('path')
args = parser.parse_args()
with open(args.path) as fh:
code = fh.read()
highlight(code, FluentLexer(), Terminal256Formatter(), sys.stdout)
if __name__ == '__main__':
main()
|
pycozmo/tests/test_image_encoder.py | gimait/pycozmo | 123 | 12774792 |
import unittest
from pycozmo.image_encoder import ImageEncoder, str_to_image, ImageDecoder, image_to_str
from pycozmo.util import hex_dump, hex_load
from pycozmo.tests.image_encoder_fixtures import FIXTURES
class TestImageEncoder(unittest.TestCase):
@staticmethod
def _encode(sim: str) -> str:
im = str_to_image(sim)
encoder = ImageEncoder(im)
buf = encoder.encode()
res = hex_dump(buf)
return res
def assertSameImage(self, sim: str, seq: str) -> None:
buffer = hex_load(seq)
decoder = ImageDecoder(buffer)
decoder.decode()
actual = image_to_str(decoder.image)
self.assertEqual(sim.strip(), actual.strip())
def test_blank(self):
fixture = FIXTURES["blank"]
sim = fixture["image"]
expected = fixture["seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_fill_screen(self):
fixture = FIXTURES["fill_screen"]
sim = fixture["image"]
expected = fixture["seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_fill_screen2(self):
fixture = FIXTURES["fill_screen2"]
sim = fixture["image"]
expected = fixture["alt_seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_top_left(self):
fixture = FIXTURES["top_left"]
sim = fixture["image"]
expected = fixture["alt_seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_top_left_5(self):
fixture = FIXTURES["top_left_5"]
sim = fixture["image"]
expected = fixture["alt_seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_top_left_1_8(self):
fixture = FIXTURES["top_left_1_8"]
sim = fixture["image"]
expected = fixture["alt_seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_top_left_line(self):
fixture = FIXTURES["top_left_line"]
sim = fixture["image"]
expected = fixture["alt_seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_top_line(self):
fixture = FIXTURES["top_line"]
sim = fixture["image"]
expected = fixture["alt_seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_bottom_line(self):
fixture = FIXTURES["bottom_line"]
sim = fixture["image"]
expected = fixture["seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_left_line(self):
fixture = FIXTURES["left_line"]
sim = fixture["image"]
expected = fixture["seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_right_line(self):
fixture = FIXTURES["right_line"]
sim = fixture["image"]
expected = fixture["seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_columns(self):
fixture = FIXTURES["columns"]
sim = fixture["image"]
expected = fixture["seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_rect(self):
fixture = FIXTURES["rect"]
sim = fixture["image"]
expected = fixture["alt_seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_rect2(self):
fixture = FIXTURES["rect2"]
sim = fixture["image"]
expected = fixture["alt_seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_rect3(self):
fixture = FIXTURES["rect3"]
sim = fixture["image"]
expected = fixture["alt_seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_rect4(self):
fixture = FIXTURES["rect4"]
sim = fixture["image"]
expected = fixture["alt_seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_diagonal(self):
fixture = FIXTURES["diagonal"]
sim = fixture["image"]
expected = fixture["alt_seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_diagonal2(self):
fixture = FIXTURES["diagonal2"]
sim = fixture["image"]
expected = fixture["alt_seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_blocks(self):
fixture = FIXTURES["blocks"]
sim = fixture["image"]
expected = fixture["seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_pycozmo(self):
fixture = FIXTURES["pycozmo"]
sim = fixture["image"]
expected = fixture["seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_chessboard_tl(self):
fixture = FIXTURES["chessboard_tl"]
sim = fixture["image"]
expected = fixture["seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_chessboard_bl(self):
fixture = FIXTURES["chessboard_bl"]
sim = fixture["image"]
expected = fixture["alt_seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_chessboard_tr(self):
fixture = FIXTURES["chessboard_tr"]
sim = fixture["image"]
expected = fixture["seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_chessboard_br(self):
fixture = FIXTURES["chessboard_br"]
sim = fixture["image"]
expected = fixture["alt_seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_chessboard2_tl(self):
fixture = FIXTURES["chessboard2_tl"]
sim = fixture["image"]
expected = fixture["seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_chessboard2_bl(self):
fixture = FIXTURES["chessboard2_bl"]
sim = fixture["image"]
expected = fixture["alt_seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_chessboard2_tr(self):
fixture = FIXTURES["chessboard2_tr"]
sim = fixture["image"]
expected = fixture["seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_chessboard2_br(self):
fixture = FIXTURES["chessboard2_br"]
sim = fixture["image"]
expected = fixture["alt_seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
|
tridet/utils/train.py | flipson/dd3d | 227 | 12774798 | <gh_stars>100-1000
# Copyright 2021 Toyota Research Institute. All rights reserved.
import logging
import os
from tabulate import tabulate
from termcolor import colored
from detectron2.utils.events import get_event_storage
LOG = logging.getLogger(__name__)
def get_inference_output_dir(dataset_name, is_last=False, use_tta=False, root_output_dir=None):
if not root_output_dir:
root_output_dir = os.getcwd() # hydra
step = get_event_storage().iter
if is_last:
result_dirname = "final"
else:
result_dirname = f"step{step:07d}"
if use_tta:
result_dirname += "-tta"
output_dir = os.path.join(root_output_dir, "inference", result_dirname, dataset_name)
return output_dir
def print_test_results(test_results):
metric_table = tabulate(
[(k, v) for k, v in test_results.items()],
headers=["metric", "value"],
tablefmt="pipe",
numalign="left",
stralign="left",
)
LOG.info("Test results:\n" + colored(metric_table, "cyan"))
|
recipes/Python/577611_edit_dictionary_values_possibly_restrained/recipe-577611.py | tdiprima/code | 2,023 | 12774821 | <filename>recipes/Python/577611_edit_dictionary_values_possibly_restrained/recipe-577611.py
"""
DICTIONNARY INTERFACE FOR EDITING VALUES
creates labels/edits/menubutton widgets in a TkFrame to edit dictionary values
use: apply(frame,dict,position)
"""
import Tkinter as tk
def cbMenu(controlV,value,btn= None):
controlV.set(str(value))
if not (btn== None):
btn.config(text= str(value))
def updateMB(ctrlV, value):
ctrlV.set(value)
def doLambda(f,*args):
"""Tips: Create lambda within for loop with fixed local variable
without interference across iterations"""
def g(): return f(*args)
return g
def apply(root,d,pos):
"""Creates interface for dictionnary d in root at given grid position """
"TODO: repercuter kwargs"
(x,y,w,h)= pos
lbs= []
saisies= dict()
entries= dict()
for (n,(k,v)) in enumerate(d.iteritems()):
assert (k not in saisies)
l= tk.Label(root,text=str(k))
l.grid(row=n+x,column=y)
if isinstance(v,list):
"""value= list => multiple choice => use menubutton"""
#saisies[k]= tk.StringVar(name=str(n),value= str(v[0]))
saisies[k]= tk.StringVar(value= str(v[0]))
ent=tk.Menubutton(root,textvariable=saisies[k],relief="sunken")
ent.m=tk.Menu(ent,tearoff=0)
ent.config(menu=ent.m)
for (kk,possible) in enumerate(v):
possibleSaved= "%s" %possible
ent.m.add_command(label=str(possible), command= doLambda(updateMB,saisies[k],str(d[k][kk]) ) )
print possible
else:
"""value is not a list => classical edit => use Entry"""
#saisies[k]= tk.StringVar(name=str(n),value= str(v))
saisies[k]= tk.StringVar(value= str(v))
ent= tk.Entry(textvariable=saisies[k])#,width=30)
ent.grid(row=n+x,column=y+1)
entries[k]= ent
return saisies
def get(strVarDict):
d= {}
for (k,v) in strVarDict.iteritems():
#try: v= float(v)
#except: pass
d[k]=v.get()
return d
def main():
"EXAMPLE"
root = tk.Tk()
#d= {'oui':1, 'non':'non'}
d= {'oui':1,'a':'b', 'non':['?','!non'],'mode':[1.1,2.1,3.1]}
v= tk.StringVar(value= "Open File Dialog")
m=tk.Menubutton(root,textvariable=v,relief="raised")
m.grid(row=2,column=1)
mm=tk.Menu(m,tearoff=0)
tk.Button(root, textvariable=v, command=lambda:v.set('oui')).grid(row=1,column=1)
mm.add_command(label="go", command=lambda: cbMenu(v,"non"))
m.config(menu=mm)
s= apply(root,d,(0,2,0,0))
print isinstance(d, dict)
root.mainloop()
#print d
print s
for (k,v) in s.iteritems():
print str(k), '->',str(v.get())
def testindependance():
root = tk.Tk()
d= {'oui':1,'a':'b', 'non':['?','!non'],'mode':[1.1,2.1,3.1]}
s= apply(root,d,(0,2,0,0))
dd= {'oui':1,'a':'b', 'non':['?','!non'],'mode':[1.1,2.1,3.1]}
ss= apply(root,dd,(0,5,0,0))
print "s =",s
print "ss=",ss
print isinstance(d, dict)
root.mainloop()
#print d
#print s
for (k,v) in s.iteritems():
print str(k), '->',str(v.get())
print "-"*10
for (k,v) in ss.iteritems():
print str(k), '->',str(v.get())
print "="*10
print get(s)
print get(ss)
if __name__ == '__main__':
main()
#testindependance()
|
keanu-python/tests/test_cast.py | rs992214/keanu | 153 | 12774830 | <gh_stars>100-1000
from keanu.vertex.vertex_casting import (cast_tensor_arg_to_double, cast_tensor_arg_to_integer,
cast_tensor_arg_to_boolean)
from keanu.vertex import cast_to_boolean_vertex, cast_to_integer_vertex, cast_to_double_vertex
from keanu.vartypes import (primitive_types, numpy_types, pandas_types)
import pytest
import numpy as np
import pandas as pd
from typing import Union, Callable
from keanu.vertex import Gaussian
from keanu.vertex.base import Double, Boolean, Integer
@pytest.mark.parametrize("value", [1, 1., True])
@pytest.mark.parametrize("cast_fn, expected_type",
[(cast_tensor_arg_to_double, float), (cast_tensor_arg_to_integer, int),
(cast_tensor_arg_to_boolean, bool), (cast_to_boolean_vertex, Boolean),
(cast_to_integer_vertex, Integer), (cast_to_double_vertex, Double)])
def test_scalar_cast(value: primitive_types, cast_fn: Callable, expected_type: type) -> None:
assert type(cast_fn(value)) == expected_type
@pytest.mark.parametrize("value", [
np.array([1]),
np.array([1.]),
np.array([True]),
np.array([[[1]]]),
np.array([[1, 4], [5, 38]]),
pd.DataFrame(data=[1]),
pd.DataFrame(data=[1.]),
pd.DataFrame(data=[True]),
pd.DataFrame(data=[[1, 2], [4, 5]]),
pd.Series(data=[1]),
pd.Series(data=[1.]),
pd.Series(data=[True]),
pd.Series(data=[1, 3, 4]),
])
@pytest.mark.parametrize("cast_fn, expected_type", [(cast_tensor_arg_to_double, np.floating),
(cast_tensor_arg_to_integer, np.integer),
(cast_tensor_arg_to_boolean, np.bool_)])
def test_nonscalar_tensor_cast(value: Union[numpy_types, pandas_types], cast_fn: Callable, expected_type: type) -> None:
assert cast_fn(value).dtype == expected_type
@pytest.mark.parametrize("value", [
np.array([1]),
np.array([1.]),
np.array([True]),
np.array([[[1]]]),
np.array([[1, 4], [5, 38]]),
pd.DataFrame(data=[1]),
pd.DataFrame(data=[1.]),
pd.DataFrame(data=[True]),
pd.DataFrame(data=[[1, 2], [4, 5]]),
pd.Series(data=[1]),
pd.Series(data=[1.]),
pd.Series(data=[True]),
pd.Series(data=[1, 3, 4]),
])
@pytest.mark.parametrize("cast_fn, expected_type", [(cast_to_double_vertex, Double), (cast_to_integer_vertex, Integer),
(cast_to_boolean_vertex, Boolean)])
def test_nonscalar_vertex_cast(value: Union[numpy_types, pandas_types], cast_fn: Callable, expected_type: type) -> None:
assert type(cast_fn(value)) == expected_type
@pytest.mark.parametrize("cast_fn, cast_to_type",
[(cast_tensor_arg_to_double, float), (cast_tensor_arg_to_integer, int),
(cast_tensor_arg_to_boolean, bool)])
def test_cant_pass_vertex_to_cast_tensor_arg(cast_fn: Callable, cast_to_type: type) -> None:
gaussian = Gaussian(0., 1.)
with pytest.raises(TypeError, match=r"Cannot cast {} to {}".format(type(gaussian), cast_to_type)):
cast_fn(gaussian)
|
chap8/data/gen_mxnet_imglist.py | wang420349864/dlcv_for_beginners | 1,424 | 12774831 | import os
import sys
input_path = sys.argv[1].rstrip(os.sep)
output_path = sys.argv[2]
filenames = os.listdir(input_path)
with open(output_path, 'w') as f:
for i, filename in enumerate(filenames):
filepath = os.sep.join([input_path, filename])
label = filename[:filename.rfind('.')].split('_')[1]
line = '{}\t{}\t{}\n'.format(i, label, filepath)
f.write(line)
|
Subsets and Splits