filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_10381 | from email.mime.text import MIMEText
import random
import smtplib
import jinja2
import datetime
from pebbles.client import PBClient
from pebbles.models import Instance
from pebbles.tasks.celery_app import logger, get_token, local_config, get_dynamic_config
from pebbles.tasks.provisioning_tasks import run_update
from pebbles.tasks.celery_app import celery_app
@celery_app.task(name="pebbles.tasks.periodic_update")
def periodic_update():
""" Runs periodic updates.
In particular sets old instances up for deprovisioning after they are past
their maximum_lifetime and sets instances up for up updates.
Both deletion and update events are not guaranteed to take place
immediately. If there are more than 10 instances a random sample of 10
updates and deletions will take place to ensure task is safe to run and
won't slow down other tasks.
"""
token = get_token()
pbclient = PBClient(token, local_config['INTERNAL_API_BASE_URL'], ssl_verify=False)
instances = pbclient.get_instances()
deprovision_list = []
update_list = []
for instance in instances:
logger.debug('checking instance for actions %s' % instance['name'])
deprovision_required = False
if instance.get('state') in [Instance.STATE_RUNNING]:
if not instance.get('lifetime_left') and instance.get('maximum_lifetime'):
deprovision_required = True
if deprovision_required:
deprovision_list.append(instance)
elif instance.get('state') not in [Instance.STATE_FAILED]:
update_list.append(instance)
# ToDo: refactor magic number to variable
if len(deprovision_list) > 10:
deprovision_list = random.sample(deprovision_list, 10)
for instance in deprovision_list:
logger.info('deprovisioning triggered for %s (reason: maximum lifetime exceeded)' % instance.get('id'))
pbclient.do_instance_patch(instance['id'], {'to_be_deleted': True})
run_update.delay(instance.get('id'))
if len(update_list) > 10:
update_list = random.sample(update_list, 10)
for instance in update_list:
run_update.delay(instance.get('id'))
@celery_app.task(name="pebbles.tasks.user_blueprint_cleanup")
def user_blueprint_cleanup():
token = get_token()
pbclient = PBClient(token, local_config['INTERNAL_API_BASE_URL'], ssl_verify=False)
users = pbclient.get_users()
for user in users:
if not user.get('is_deleted') and user.get('expiry_date') and datetime.datetime.strptime(user.get('expiry_date'), '%a, %d %b %Y %H:%M:%S -0000') <= datetime.datetime.utcnow():
pbclient.user_delete(user['id'])
blueprints = pbclient.get_blueprints()
for blueprint in blueprints:
if blueprint.get('expiry_time') and datetime.datetime.strptime(blueprint.get('expiry_time'), '%a, %d %b %Y %H:%M:%S -0000') <= datetime.datetime.utcnow():
pbclient.blueprint_delete(blueprint['id'])
@celery_app.task(name="pebbles.tasks.send_mails")
def send_mails(users, text=None):
""" ToDo: document. apparently sends activation emails.
"""
dynamic_config = get_dynamic_config()
j2_env = jinja2.Environment(loader=jinja2.PackageLoader('pebbles', 'templates'))
base_url = dynamic_config['BASE_URL'].strip('/')
# Here email_id is used to send email because sending emails through eppn might not work in some cases
for email_id, token, user_active in users:
if text is None:
activation_url = '%s/#/activate/%s' % (base_url, token)
msg = MIMEText(j2_env.get_template('invitation.txt').render(activation_link=activation_url, instance_name=dynamic_config['INSTALLATION_NAME'], instance_description=dynamic_config['INSTALLATION_DESCRIPTION'], user_active=user_active))
subject = '%s account activation' if not user_active else '%s password reset'
msg['Subject'] = subject % dynamic_config['INSTALLATION_NAME']
else:
msg = MIMEText(text['message'])
subject = text['subject'] + " - %s"
msg['Subject'] = subject % dynamic_config['INSTALLATION_NAME']
msg['To'] = email_id
msg['From'] = dynamic_config['SENDER_EMAIL']
logger.info(msg)
if not dynamic_config['MAIL_SUPPRESS_SEND']:
s = smtplib.SMTP(dynamic_config['MAIL_SERVER'])
if dynamic_config['MAIL_USE_TLS']:
s.starttls()
s.sendmail(msg['From'], [msg['To']], msg.as_string())
s.quit()
else:
logger.info('Mail sending suppressed in config')
|
the-stack_0_10382 | import os
import re
from datetime import timedelta
from typing import Any, Dict, List, Optional
from unittest import mock, skipUnless
from unittest.mock import MagicMock, call, patch
from django.apps import apps
from django.conf import settings
from django.core.management import call_command, find_commands
from django.test import override_settings
from django.utils.timezone import now as timezone_now
from confirmation.models import RealmCreationKey, generate_realm_creation_url
from zerver.lib.actions import do_add_reaction, do_create_user
from zerver.lib.management import CommandError, ZulipBaseCommand, check_config
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.test_helpers import most_recent_message, stdout_suppressed
from zerver.models import (
Message,
Reaction,
Realm,
Recipient,
UserProfile,
get_realm,
get_stream,
get_user_profile_by_email,
)
class TestCheckConfig(ZulipTestCase):
def test_check_config(self) -> None:
check_config()
with self.settings(REQUIRED_SETTINGS=[('asdf', 'not asdf')]):
with self.assertRaisesRegex(CommandError, "Error: You must set asdf in /etc/zulip/settings.py."):
check_config()
@override_settings(WARN_NO_EMAIL=True)
def test_check_send_email(self) -> None:
with self.assertRaisesRegex(CommandError, "Outgoing email not yet configured, see"):
call_command("send_test_email", '[email protected]')
class TestZulipBaseCommand(ZulipTestCase):
def setUp(self) -> None:
super().setUp()
self.zulip_realm = get_realm("zulip")
self.command = ZulipBaseCommand()
def test_get_client(self) -> None:
self.assertEqual(self.command.get_client().name, "ZulipServer")
def test_get_realm(self) -> None:
self.assertEqual(self.command.get_realm(dict(realm_id='zulip')), self.zulip_realm)
self.assertEqual(self.command.get_realm(dict(realm_id=None)), None)
self.assertEqual(self.command.get_realm(dict(realm_id=str(self.zulip_realm.id))),
self.zulip_realm)
with self.assertRaisesRegex(CommandError, "There is no realm with id"):
self.command.get_realm(dict(realm_id='17'))
with self.assertRaisesRegex(CommandError, "There is no realm with id"):
self.command.get_realm(dict(realm_id='mit'))
def test_get_user(self) -> None:
mit_realm = get_realm("zephyr")
user_profile = self.example_user("hamlet")
email = user_profile.delivery_email
self.assertEqual(self.command.get_user(email, self.zulip_realm), user_profile)
self.assertEqual(self.command.get_user(email, None), user_profile)
error_message = f"The realm '{mit_realm}' does not contain a user with email"
with self.assertRaisesRegex(CommandError, error_message):
self.command.get_user(email, mit_realm)
with self.assertRaisesRegex(CommandError, "server does not contain a user with email"):
self.command.get_user('[email protected]', None)
do_create_user(email, 'password', mit_realm, 'full_name')
with self.assertRaisesRegex(CommandError, "server contains multiple users with that email"):
self.command.get_user(email, None)
def test_get_user_profile_by_email(self) -> None:
user_profile = self.example_user("hamlet")
email = user_profile.delivery_email
self.assertEqual(get_user_profile_by_email(email), user_profile)
def get_users_sorted(self, options: Dict[str, Any], realm: Optional[Realm],
**kwargs: Any) -> List[UserProfile]:
user_profiles = self.command.get_users(options, realm, **kwargs)
return sorted(user_profiles, key = lambda x: x.email)
def sorted_users(self, users: List[UserProfile]) -> List[UserProfile]:
return sorted(users, key = lambda x: x.email)
def test_get_users(self) -> None:
expected_user_profiles = self.sorted_users([
self.example_user('hamlet'),
self.example_user('iago'),
])
user_emails = ','.join(u.delivery_email for u in expected_user_profiles)
user_profiles = self.get_users_sorted(dict(users=user_emails), self.zulip_realm)
self.assertEqual(user_profiles, expected_user_profiles)
user_profiles = self.get_users_sorted(dict(users=user_emails), None)
self.assertEqual(user_profiles, expected_user_profiles)
expected_user_profiles = self.sorted_users([
self.mit_user('sipbtest'),
self.example_user('iago'),
])
user_emails = ','.join(u.delivery_email for u in expected_user_profiles)
user_profiles = self.get_users_sorted(dict(users=user_emails), None)
self.assertEqual(user_profiles, expected_user_profiles)
error_message = f"The realm '{self.zulip_realm}' does not contain a user with email"
with self.assertRaisesRegex(CommandError, error_message):
self.command.get_users(dict(users=user_emails), self.zulip_realm)
self.assertEqual(self.command.get_users(dict(users=self.example_email("iago")), self.zulip_realm),
[self.example_user("iago")])
self.assertEqual(self.command.get_users(dict(users=None), None), [])
def test_get_users_with_all_users_argument_enabled(self) -> None:
expected_user_profiles = self.sorted_users([
self.example_user('hamlet'),
self.example_user('iago'),
])
user_emails = ','.join(u.delivery_email for u in expected_user_profiles)
user_profiles = self.get_users_sorted(dict(users=user_emails, all_users=False), self.zulip_realm)
self.assertEqual(user_profiles, expected_user_profiles)
error_message = "You can't use both -u/--users and -a/--all-users."
with self.assertRaisesRegex(CommandError, error_message):
self.command.get_users(dict(users=user_emails, all_users=True), None)
# Test the default mode excluding bots and deactivated users
expected_user_profiles = sorted(UserProfile.objects.filter(realm=self.zulip_realm,
is_active=True, is_bot=False),
key = lambda x: x.email)
user_profiles = self.get_users_sorted(dict(users=None, all_users=True),
self.zulip_realm,
is_bot=False)
self.assertEqual(user_profiles, expected_user_profiles)
# Test the default mode excluding bots and deactivated users
expected_user_profiles = sorted(UserProfile.objects.filter(realm=self.zulip_realm,
is_active=True),
key = lambda x: x.email)
user_profiles = self.get_users_sorted(dict(users=None, all_users=True),
self.zulip_realm)
self.assertEqual(user_profiles, expected_user_profiles)
# Test include_deactivated
expected_user_profiles = sorted(UserProfile.objects.filter(realm=self.zulip_realm,
is_bot=False),
key = lambda x: x.email)
user_profiles = self.get_users_sorted(dict(users=None, all_users=True),
self.zulip_realm,
is_bot=False, include_deactivated=True)
self.assertEqual(user_profiles, expected_user_profiles)
error_message = "You have to pass either -u/--users or -a/--all-users."
with self.assertRaisesRegex(CommandError, error_message):
self.command.get_users(dict(users=None, all_users=False), None)
error_message = "The --all-users option requires a realm; please pass --realm."
with self.assertRaisesRegex(CommandError, error_message):
self.command.get_users(dict(users=None, all_users=True), None)
def test_get_non_bot_users(self) -> None:
expected_user_profiles = sorted(UserProfile.objects.filter(realm=self.zulip_realm,
is_bot=False),
key = lambda x: x.email)
user_profiles = self.get_users_sorted(dict(users=None, all_users=True),
self.zulip_realm,
is_bot=False)
self.assertEqual(user_profiles, expected_user_profiles)
class TestCommandsCanStart(ZulipTestCase):
def setUp(self) -> None:
super().setUp()
self.commands = [
command
for app_config in apps.get_app_configs()
if os.path.dirname(os.path.realpath(app_config.path)) == settings.DEPLOY_ROOT
for command in find_commands(os.path.join(app_config.path, "management"))
]
assert self.commands
def test_management_commands_show_help(self) -> None:
with stdout_suppressed():
for command in self.commands:
with self.subTest(management_command=command):
with self.assertRaises(SystemExit):
call_command(command, '--help')
# zerver/management/commands/runtornado.py sets this to True;
# we need to reset it here. See #3685 for details.
settings.RUNNING_INSIDE_TORNADO = False
class TestSendWebhookFixtureMessage(ZulipTestCase):
COMMAND_NAME = 'send_webhook_fixture_message'
def setUp(self) -> None:
super().setUp()
self.fixture_path = os.path.join('some', 'fake', 'path.json')
self.url = '/some/url/with/hook'
@patch('zerver.management.commands.send_webhook_fixture_message.Command.print_help')
def test_check_if_command_exits_when_fixture_param_is_empty(self, print_help_mock: MagicMock) -> None:
with self.assertRaises(CommandError):
call_command(self.COMMAND_NAME, url=self.url)
print_help_mock.assert_any_call('./manage.py', self.COMMAND_NAME)
@patch('zerver.management.commands.send_webhook_fixture_message.Command.print_help')
def test_check_if_command_exits_when_url_param_is_empty(self, print_help_mock: MagicMock) -> None:
with self.assertRaises(CommandError):
call_command(self.COMMAND_NAME, fixture=self.fixture_path)
print_help_mock.assert_any_call('./manage.py', self.COMMAND_NAME)
@patch('zerver.management.commands.send_webhook_fixture_message.os.path.exists')
def test_check_if_command_exits_when_fixture_path_does_not_exist(
self, os_path_exists_mock: MagicMock) -> None:
os_path_exists_mock.return_value = False
with self.assertRaises(CommandError):
call_command(self.COMMAND_NAME, fixture=self.fixture_path, url=self.url)
os_path_exists_mock.assert_any_call(os.path.join(settings.DEPLOY_ROOT, self.fixture_path))
@patch('zerver.management.commands.send_webhook_fixture_message.os.path.exists')
@patch('zerver.management.commands.send_webhook_fixture_message.Client')
@patch('zerver.management.commands.send_webhook_fixture_message.orjson')
@patch("zerver.management.commands.send_webhook_fixture_message.open", create=True)
def test_check_if_command_post_request_to_url_with_fixture(self,
open_mock: MagicMock,
orjson_mock: MagicMock,
client_mock: MagicMock,
os_path_exists_mock: MagicMock) -> None:
orjson_mock.loads.return_value = {}
orjson_mock.dumps.return_value = b"{}"
os_path_exists_mock.return_value = True
client = client_mock()
with self.assertRaises(CommandError):
call_command(self.COMMAND_NAME, fixture=self.fixture_path, url=self.url)
self.assertTrue(orjson_mock.dumps.called)
self.assertTrue(orjson_mock.loads.called)
self.assertTrue(open_mock.called)
client.post.assert_called_once_with(self.url, b"{}", content_type="application/json",
HTTP_HOST="zulip.testserver")
class TestGenerateRealmCreationLink(ZulipTestCase):
COMMAND_NAME = "generate_realm_creation_link"
@override_settings(OPEN_REALM_CREATION=False)
def test_generate_link_and_create_realm(self) -> None:
email = "[email protected]"
generated_link = generate_realm_creation_url(by_admin=True)
# Get realm creation page
result = self.client_get(generated_link)
self.assert_in_success_response(["Create a new Zulip organization"], result)
# Enter email
with self.assertRaises(Realm.DoesNotExist):
get_realm('test')
result = self.client_post(generated_link, {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(re.search(r'/accounts/do_confirm/\w+$', result["Location"]))
# Bypass sending mail for confirmation, go straight to creation form
result = self.client_get(result["Location"])
self.assert_in_response('action="/accounts/register/"', result)
# Original link is now dead
result = self.client_get(generated_link)
self.assert_in_success_response(["The organization creation link has expired or is not valid."], result)
@override_settings(OPEN_REALM_CREATION=False)
def test_generate_link_confirm_email(self) -> None:
email = "[email protected]"
generated_link = generate_realm_creation_url(by_admin=False)
result = self.client_post(generated_link, {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(re.search(f'/accounts/new/send_confirm/{email}$',
result["Location"]))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started", result)
# Original link is now dead
result = self.client_get(generated_link)
self.assert_in_success_response(["The organization creation link has expired or is not valid."], result)
@override_settings(OPEN_REALM_CREATION=False)
def test_realm_creation_with_random_link(self) -> None:
# Realm creation attempt with an invalid link should fail
random_link = "/new/5e89081eb13984e0f3b130bf7a4121d153f1614b"
result = self.client_get(random_link)
self.assert_in_success_response(["The organization creation link has expired or is not valid."], result)
@override_settings(OPEN_REALM_CREATION=False)
def test_realm_creation_with_expired_link(self) -> None:
generated_link = generate_realm_creation_url(by_admin=True)
key = generated_link[-24:]
# Manually expire the link by changing the date of creation
obj = RealmCreationKey.objects.get(creation_key=key)
obj.date_created = obj.date_created - timedelta(days=settings.REALM_CREATION_LINK_VALIDITY_DAYS + 1)
obj.save()
result = self.client_get(generated_link)
self.assert_in_success_response(["The organization creation link has expired or is not valid."], result)
@skipUnless(settings.ZILENCER_ENABLED, "requires zilencer")
class TestCalculateFirstVisibleMessageID(ZulipTestCase):
COMMAND_NAME = 'calculate_first_visible_message_id'
def test_check_if_command_calls_maybe_update_first_visible_message_id(self) -> None:
func_name = "zilencer.management.commands.calculate_first_visible_message_id.maybe_update_first_visible_message_id"
with patch(func_name) as m:
call_command(self.COMMAND_NAME, "--realm=zulip", "--lookback-hours=30")
m.assert_called_with(get_realm("zulip"), 30)
with patch(func_name) as m:
call_command(self.COMMAND_NAME, "--lookback-hours=35")
calls = [call(realm, 35) for realm in Realm.objects.all()]
m.assert_has_calls(calls, any_order=True)
class TestPasswordRestEmail(ZulipTestCase):
COMMAND_NAME = "send_password_reset_email"
def test_if_command_sends_password_reset_email(self) -> None:
call_command(self.COMMAND_NAME, users=self.example_email("iago"))
from django.core.mail import outbox
self.assertRegex(
outbox[0].from_email,
fr"^Zulip Account Security <{self.TOKENIZED_NOREPLY_REGEX}>\Z",
)
self.assertIn("reset your password", outbox[0].body)
class TestRealmReactivationEmail(ZulipTestCase):
COMMAND_NAME = "send_realm_reactivation_email"
def test_if_realm_not_deactivated(self) -> None:
realm = get_realm('zulip')
with self.assertRaisesRegex(CommandError, f"The realm {realm.name} is already active."):
call_command(self.COMMAND_NAME, "--realm=zulip")
class TestSendToEmailMirror(ZulipTestCase):
COMMAND_NAME = "send_to_email_mirror"
def test_sending_a_fixture(self) -> None:
fixture_path = "zerver/tests/fixtures/email/1.txt"
user_profile = self.example_user('hamlet')
self.login_user(user_profile)
self.subscribe(user_profile, "Denmark")
with self.assertLogs('zerver.lib.email_mirror', level='INFO') as info_log:
call_command(self.COMMAND_NAME, f"--fixture={fixture_path}")
self.assertEqual(info_log.output, [
'INFO:zerver.lib.email_mirror:Successfully processed email to Denmark (zulip)'
])
message = most_recent_message(user_profile)
# last message should be equal to the body of the email in 1.txt
self.assertEqual(message.content, "Email fixture 1.txt body")
def test_sending_a_json_fixture(self) -> None:
fixture_path = "zerver/tests/fixtures/email/1.json"
user_profile = self.example_user('hamlet')
self.login_user(user_profile)
self.subscribe(user_profile, "Denmark")
with self.assertLogs('zerver.lib.email_mirror', level='INFO') as info_log:
call_command(self.COMMAND_NAME, f"--fixture={fixture_path}")
self.assertEqual(info_log.output, [
'INFO:zerver.lib.email_mirror:Successfully processed email to Denmark (zulip)'
])
message = most_recent_message(user_profile)
# last message should be equal to the body of the email in 1.json
self.assertEqual(message.content, "Email fixture 1.json body")
def test_stream_option(self) -> None:
fixture_path = "zerver/tests/fixtures/email/1.txt"
user_profile = self.example_user('hamlet')
self.login_user(user_profile)
self.subscribe(user_profile, "Denmark2")
with self.assertLogs('zerver.lib.email_mirror', level='INFO') as info_log:
call_command(self.COMMAND_NAME, f"--fixture={fixture_path}", "--stream=Denmark2")
self.assertEqual(info_log.output, [
'INFO:zerver.lib.email_mirror:Successfully processed email to Denmark2 (zulip)'
])
message = most_recent_message(user_profile)
# last message should be equal to the body of the email in 1.txt
self.assertEqual(message.content, "Email fixture 1.txt body")
stream_id = get_stream("Denmark2", get_realm("zulip")).id
self.assertEqual(message.recipient.type, Recipient.STREAM)
self.assertEqual(message.recipient.type_id, stream_id)
class TestConvertMattermostData(ZulipTestCase):
COMMAND_NAME = 'convert_mattermost_data'
def test_if_command_calls_do_convert_data(self) -> None:
with patch('zerver.management.commands.convert_mattermost_data.do_convert_data') as m, \
patch('builtins.print') as mock_print:
mm_fixtures = self.fixture_file_name("", "mattermost_fixtures")
output_dir = self.make_import_output_dir("mattermost")
call_command(self.COMMAND_NAME, mm_fixtures, f"--output={output_dir}")
m.assert_called_with(
masking_content=False,
mattermost_data_dir=os.path.realpath(mm_fixtures),
output_dir=os.path.realpath(output_dir),
)
self.assertEqual(mock_print.mock_calls, [
call('Converting data ...')
])
@skipUnless(settings.ZILENCER_ENABLED, "requires zilencer")
class TestInvoicePlans(ZulipTestCase):
COMMAND_NAME = 'invoice_plans'
def test_if_command_calls_invoice_plans_as_needed(self) -> None:
with patch('zilencer.management.commands.invoice_plans.invoice_plans_as_needed') as m:
call_command(self.COMMAND_NAME)
m.assert_called_once()
class TestExport(ZulipTestCase):
COMMAND_NAME = 'export'
def test_command_with_consented_message_id(self) -> None:
realm = get_realm("zulip")
self.send_stream_message(self.example_user("othello"), "Verona",
topic_name="Export",
content="Outbox emoji for export")
message = Message.objects.last()
do_add_reaction(self.example_user("iago"), message, "outbox", "1f4e4", Reaction.UNICODE_EMOJI)
do_add_reaction(self.example_user("hamlet"), message, "outbox", "1f4e4", Reaction.UNICODE_EMOJI)
with patch("zerver.management.commands.export.export_realm_wrapper") as m, \
patch('builtins.print') as mock_print, \
patch('builtins.input', return_value='y') as mock_input:
call_command(self.COMMAND_NAME, "-r=zulip", f"--consent-message-id={message.id}")
m.assert_called_once_with(realm=realm, public_only=False, consent_message_id=message.id,
delete_after_upload=False, threads=mock.ANY, output_dir=mock.ANY,
percent_callback=mock.ANY,
upload=False)
mock_input.assert_called_once_with('Continue? [y/N] ')
self.assertEqual(mock_print.mock_calls, [
call('\033[94mExporting realm\033[0m: zulip'),
call('\n\033[94mMessage content:\033[0m\nOutbox emoji for export\n'),
call('\033[94mNumber of users that reacted outbox:\033[0m 2 / 8 total non-guest users\n'),
])
with self.assertRaisesRegex(CommandError, "Message with given ID does not"), \
patch('builtins.print') as mock_print:
call_command(self.COMMAND_NAME, "-r=zulip", "--consent-message-id=123456")
self.assertEqual(mock_print.mock_calls, [
call('\033[94mExporting realm\033[0m: zulip'),
])
message.last_edit_time = timezone_now()
message.save()
with self.assertRaisesRegex(CommandError, "Message was edited. Aborting..."), \
patch('builtins.print') as mock_print:
call_command(self.COMMAND_NAME, "-r=zulip", f"--consent-message-id={message.id}")
self.assertEqual(mock_print.mock_calls, [
call('\033[94mExporting realm\033[0m: zulip'),
])
message.last_edit_time = None
message.save()
do_add_reaction(self.mit_user("sipbtest"), message, "outbox", "1f4e4", Reaction.UNICODE_EMOJI)
with self.assertRaisesRegex(CommandError, "Users from a different realm reacted to message. Aborting..."), \
patch('builtins.print') as mock_print:
call_command(self.COMMAND_NAME, "-r=zulip", f"--consent-message-id={message.id}")
self.assertEqual(mock_print.mock_calls, [
call('\033[94mExporting realm\033[0m: zulip'),
])
|
the-stack_0_10386 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''The base class for task transformer objects'''
import numpy as np
from librosa import time_to_frames, times_like
from librosa.sequence import viterbi_binary, viterbi_discriminative
import jams
from ..base import Scope
__all__ = ['BaseTaskTransformer']
def fill_value(dtype):
'''Get a fill-value for a given dtype
Parameters
----------
dtype : type
Returns
-------
`np.nan` if `dtype` is real or complex
0 otherwise
'''
if np.issubdtype(dtype, np.floating) or np.issubdtype(dtype, np.complexfloating):
return dtype(np.nan)
return dtype(0)
class BaseTaskTransformer(Scope):
'''Base class for task transformer objects
Attributes
----------
name : str
The name prefix for this transformer object
namespace : str
The JAMS namespace for annotations in this task
sr : number > 0
The sampling rate for audio
hop_length : int > 0
The number of samples between frames
'''
def __init__(self, name, namespace, sr, hop_length):
super(BaseTaskTransformer, self).__init__(name)
# This will trigger an exception if the namespace is not found
jams.schema.is_dense(namespace)
self.namespace = namespace
self.sr = sr
self.hop_length = hop_length
def empty(self, duration):
'''Create an empty jams.Annotation for this task.
This method should be overridden by derived classes.
Parameters
----------
duration : int >= 0
Duration of the annotation
'''
return jams.Annotation(namespace=self.namespace, time=0, duration=0)
def transform(self, jam, query=None):
'''Transform jam object to make data for this task
Parameters
----------
jam : jams.JAMS
The jams container object
query : string, dict, or callable [optional]
An optional query to narrow the elements of `jam.annotations`
to be considered.
If not provided, all annotations are considered.
Returns
-------
data : dict
A dictionary of transformed annotations.
All annotations which can be converted to the target namespace
will be converted.
'''
anns = []
if query:
results = jam.search(**query)
else:
results = jam.annotations
# Find annotations that can be coerced to our target namespace
for ann in results:
try:
anns.append(jams.nsconvert.convert(ann, self.namespace))
except jams.NamespaceError:
pass
duration = jam.file_metadata.duration
# If none, make a fake one
if not anns:
anns = [self.empty(duration)]
# Apply transformations
results = []
for ann in anns:
results.append(self.transform_annotation(ann, duration))
# If the annotation range is None, it spans the entire track
if ann.time is None or ann.duration is None:
valid = [0, duration]
else:
valid = [ann.time, ann.time + ann.duration]
results[-1]['_valid'] = time_to_frames(valid, sr=self.sr,
hop_length=self.hop_length)
# Prefix and collect
return self.merge(results)
def transform_annotation(self, ann, duration):
'''Transform jams.Annotation to make data for a given task.
Parameters
----------
ann : jams.Annotation
The jams annotation containing the data
duration : number > 0
time in seconds of the output duration
Returns
-------
data : dict
A dictionary of transformed annotation.
'''
raise NotImplementedError
def encode_events(self, duration, events, values, dtype=np.bool):
'''Encode labeled events as a time-series matrix.
Parameters
----------
duration : number
The duration of the track
events : ndarray, shape=(n,)
Time index of the events
values : ndarray, shape=(n, m)
Values array. Must have the same first index as `events`.
dtype : numpy data type
Returns
-------
target : ndarray, shape=(n_frames, n_values)
'''
frames = time_to_frames(events, sr=self.sr,
hop_length=self.hop_length)
n_total = int(time_to_frames(duration, sr=self.sr,
hop_length=self.hop_length))
n_alloc = n_total
if np.any(frames):
n_alloc = max(n_total, 1 + int(frames.max()))
target = np.empty((n_alloc, values.shape[1]),
dtype=dtype)
target.fill(fill_value(dtype))
values = values.astype(dtype)
for column, event in zip(values, frames):
target[event] += column
return target[:n_total]
def encode_intervals(self, duration, intervals, values, dtype=np.bool,
multi=True, fill=None):
'''Encode labeled intervals as a time-series matrix.
Parameters
----------
duration : number
The duration (in frames) of the track
intervals : np.ndarray, shape=(n, 2)
The list of intervals
values : np.ndarray, shape=(n, m)
The (encoded) values corresponding to each interval
dtype : np.dtype
The desired output type
multi : bool
If `True`, allow multiple labels per interval.
fill : dtype (optional)
Optional default fill value for missing data.
If not provided, the default is inferred from `dtype`.
Returns
-------
target : np.ndarray, shape=(duration * sr / hop_length, m)
The labeled interval encoding, sampled at the desired frame rate
'''
if fill is None:
fill = fill_value(dtype)
frames = time_to_frames(intervals, sr=self.sr,
hop_length=self.hop_length)
n_total = int(time_to_frames(duration, sr=self.sr,
hop_length=self.hop_length))
values = values.astype(dtype)
n_alloc = n_total
if np.any(frames):
n_alloc = max(n_total, 1 + int(frames.max()))
target = np.empty((n_alloc, values.shape[1]),
dtype=dtype)
target.fill(fill)
for column, interval in zip(values, frames):
if multi:
target[interval[0]:interval[1]] += column
else:
target[interval[0]:interval[1]] = column
return target[:n_total]
def decode_events(self, encoded, transition=None, p_state=None, p_init=None):
'''Decode labeled events into (time, value) pairs
Real-valued inputs are thresholded at 0.5.
Optionally, viterbi decoding can be applied to each event class.
Parameters
----------
encoded : np.ndarray, shape=(n_frames, m)
Frame-level annotation encodings as produced by ``encode_events``.
transition : None or np.ndarray [shape=(2, 2) or (m, 2, 2)]
Optional transition matrix for each event, used for Viterbi
p_state : None or np.ndarray [shape=(m,)]
Optional marginal probability for each event
p_init : None or np.ndarray [shape=(m,)]
Optional marginal probability for each event
Returns
-------
[(time, value)] : iterable of tuples
where `time` is the event time and `value` is an
np.ndarray, shape=(m,) of the encoded value at that time
See Also
--------
librosa.sequence.viterbi_binary
'''
if np.isrealobj(encoded):
if transition is None:
encoded = (encoded >= 0.5)
else:
encoded = viterbi_binary(encoded.T, transition,
p_state=p_state,
p_init=p_init).T
times = times_like(encoded,
sr=self.sr,
hop_length=self.hop_length,
axis=0)
return zip(times, encoded)
def decode_intervals(self, encoded, duration=None, multi=True, sparse=False,
transition=None, p_state=None, p_init=None):
'''Decode labeled intervals into (start, end, value) triples
Parameters
----------
encoded : np.ndarray, shape=(n_frames, m)
Frame-level annotation encodings as produced by
``encode_intervals``
duration : None or float > 0
The max duration of the annotation (in seconds)
Must be greater than the length of encoded array.
multi : bool
If true, allow multiple labels per input frame.
If false, take the most likely label per input frame.
sparse : bool
If true, values are returned as indices, not one-hot.
If false, values are returned as one-hot encodings.
Only applies when `multi=False`.
transition : None or np.ndarray [shape=(m, m) or (2, 2) or (m, 2, 2)]
Optional transition matrix for each interval, used for Viterbi
decoding. If `multi=True`, then transition should be `(2, 2)` or
`(m, 2, 2)`-shaped. If `multi=False`, then transition should be
`(m, m)`-shaped.
p_state : None or np.ndarray [shape=(m,)]
Optional marginal probability for each label.
p_init : None or np.ndarray [shape=(m,)]
Optional marginal probability for each label.
Returns
-------
[(start, end, value)] : iterable of tuples
where `start` and `end` are the interval boundaries (in seconds)
and `value` is an np.ndarray, shape=(m,) of the encoded value
for this interval.
'''
if np.isrealobj(encoded):
if multi:
if transition is None:
encoded = encoded >= 0.5
else:
encoded = viterbi_binary(encoded.T, transition,
p_init=p_init, p_state=p_state).T
elif sparse and encoded.shape[1] > 1:
# map to argmax if it's densely encoded (logits)
if transition is None:
encoded = np.argmax(encoded, axis=1)[:, np.newaxis]
else:
encoded = viterbi_discriminative(encoded.T, transition,
p_init=p_init,
p_state=p_state)[:, np.newaxis]
elif not sparse:
# if dense and multi, map to one-hot encoding
if transition is None:
encoded = (encoded == np.max(encoded, axis=1, keepdims=True))
else:
encoded_ = viterbi_discriminative(encoded.T, transition,
p_init=p_init,
p_state=p_state)
# Map to one-hot encoding
encoded = np.zeros(encoded.shape, dtype=bool)
encoded[np.arange(len(encoded_)), encoded_] = True
if duration is None:
# 1+ is fair here, because encode_intervals already pads
duration = 1 + encoded.shape[0]
else:
duration = 1 + time_to_frames(duration,
sr=self.sr,
hop_length=self.hop_length)
# [0, duration] inclusive
times = times_like(duration + 1,
sr=self.sr, hop_length=self.hop_length)
# Find the change-points of the rows
if sparse:
idx = np.where(encoded[1:] != encoded[:-1])[0]
else:
idx = np.where(np.max(encoded[1:] != encoded[:-1], axis=-1))[0]
idx = np.unique(np.append(idx, encoded.shape[0]))
delta = np.diff(np.append(-1, idx))
# Starting positions can be integrated from changes
position = np.cumsum(np.append(0, delta))
return [(times[p], times[p + d], encoded[p])
for (p, d) in zip(position, delta)]
|
the-stack_0_10390 | """Datasets are defined as scripts and have unique properties.
The Module defines generic dataset properties and models the
functions available for inheritance by the scripts or datasets.
"""
from __future__ import print_function
from weaver.engines import choose_engine
from weaver.lib.models import *
from weaver.lib.process import make_sql
class Script(object):
"""This class defines the properties of a generic dataset.
Each Dataset inherits attributes from this class to define
it's Unique functionality.
"""
def __init__(
self,
title="",
description="",
name="",
urls=dict(),
tables=dict(),
ref="",
public=True,
addendum=None,
citation="Not currently available",
licenses=[{"name": None}],
retriever_minimum_version="",
version="",
encoding="",
message="",
**kwargs
):
self.title = title
self.name = name
self.filename = __name__
self.description = description
self.urls = urls
self.tables = tables
self.ref = ref
self.public = public
self.addendum = addendum
self.citation = citation
self.licenses = licenses
self.keywords = []
self.retriever_minimum_version = retriever_minimum_version
self.encoding = encoding
self.version = version
self.message = message
for key, item in list(kwargs.items()):
setattr(self, key, item[0] if isinstance(item, tuple) else item)
def __str__(self):
desc = self.name
if self.reference_url():
desc += "\n" + self.reference_url()
return desc
def integrate(self, engine=None, debug=False):
"""Generic function to prepare for integration."""
self.engine = self.checkengine(engine)
self.engine.debug = debug
self.engine.db_name = self.name
self.engine.create_db()
def reference_url(self):
if self.ref:
return self.ref
else:
if len(self.urls) == 1:
return self.urls[list(self.urls.keys())[0]]
else:
return None
def checkengine(self, engine=None):
"""Returns the required engine instance"""
if engine is None:
opts = {}
engine = choose_engine(opts)
engine.get_input()
engine.script = self
return engine
def exists(self, engine=None):
if engine:
return engine.exists(self)
else:
return False
def matches_terms(self, terms):
try:
search_string = " ".join(
[self.name, self.description, self.name] + self.keywords
).upper()
for term in terms:
if not term.upper() in search_string:
return False
return True
except:
return False
class BasicTextTemplate(Script):
"""Defines the pre processing required for scripts.
Scripts that need pre processing should use the download function
from this class.
Scripts that require extra tune up, should override this class.
"""
def __init__(self, **kwargs):
Script.__init__(self, **kwargs)
for key in kwargs:
setattr(self, key, kwargs[key])
self.db_table_name = None
def integrate(self, engine=None, debug=False):
"""Create the SQL query to be sent to the Engine
Uses the scripts' integrate function to prepare the engine
and it creates the database to store the result.
"""
Script.integrate(self, engine, debug)
sql_statement = make_sql(self)
result_db = engine.database_name()
result_table = self.result["table"]
db_table_name = "{db_name}.{table_name}".format(
db_name=result_db, table_name=result_table
)
self.db_table_name = db_table_name
Script.db_table_name = db_table_name
drop_query = self.engine.drop_statement("TABLE", db_table_name)
join_query = sql_statement.format(
result_dbi=result_db, result_tablei=result_table
)
try:
if self.engine.debug:
print(drop_query)
self.engine.execute(drop_query)
except:
pass
try:
if self.engine.debug:
print(join_query)
self.engine.execute(join_query)
except Exception as e:
try:
self.connection.rollback()
except Exception as _:
pass
print(e)
print("Process successfully launched in Database.")
print("Please wait for the table to render")
return engine
TEMPLATES = {"default": BasicTextTemplate}
|
the-stack_0_10391 | # -*- coding: utf-8 -*-
"""
Use nose
`$ pip install nose`
`$ nosetests`
"""
from hyde.generator import Generator
from hyde.site import Site
from hyde.tests.util import assert_no_diff
from fswrap import File, Folder
SCSS_SOURCE = File(__file__).parent.child_folder('scss')
TEST_SITE = File(__file__).parent.parent.child_folder('_test')
class TestSassyCSS(object):
def setUp(self):
TEST_SITE.make()
TEST_SITE.parent.child_folder(
'sites/test_jinja').copy_contents_to(TEST_SITE)
SCSS_SOURCE.copy_contents_to(TEST_SITE.child('content/media/css'))
File(TEST_SITE.child('content/media/css/site.css')).delete()
def tearDown(self):
TEST_SITE.delete()
def test_scss(self):
s = Site(TEST_SITE)
s.config.mode = 'prod'
s.config.plugins = ['hyde.ext.plugins.css.SassyCSSPlugin']
source = TEST_SITE.child('content/media/css/site.scss')
target = File(
Folder(s.config.deploy_root_path).child('media/css/site.css'))
gen = Generator(s)
gen.generate_resource_at_path(source)
assert target.exists
text = target.read_all()
expected_text = File(SCSS_SOURCE.child('expected-site.css')).read_all()
assert_no_diff(expected_text, text)
|
the-stack_0_10392 | #!/usr/bin/env python
__all__ = ['douban_download']
import urllib.request, urllib.parse
from ..common import *
def douban_download(url, output_dir = '.', merge = True, info_only = False, **kwargs):
html = get_html(url)
if re.match(r'https?://movie', url):
title = match1(html, 'name="description" content="([^"]+)')
tid = match1(url, 'trailer/(\d+)')
real_url = 'https://movie.douban.com/trailer/video_url?tid=%s' % tid
type, ext, size = url_info(real_url)
print_info(site_info, title, type, size)
if not info_only:
download_urls([real_url], title, ext, size, output_dir, merge = merge)
elif 'subject' in url:
titles = re.findall(r'data-title="([^"]*)">', html)
song_id = re.findall(r'<li class="song-item" id="([^"]*)"', html)
song_ssid = re.findall(r'data-ssid="([^"]*)"', html)
get_song_url = 'http://music.douban.com/j/songlist/get_song_url'
for i in range(len(titles)):
title = titles[i]
datas = {
'sid': song_id[i],
'ssid': song_ssid[i]
}
post_params = urllib.parse.urlencode(datas).encode('utf-8')
try:
resp = urllib.request.urlopen(get_song_url, post_params)
resp_data = json.loads(resp.read().decode('utf-8'))
real_url = resp_data['r']
type, ext, size = url_info(real_url)
print_info(site_info, title, type, size)
except:
pass
if not info_only:
try:
download_urls([real_url], title, ext, size, output_dir, merge = merge)
except:
pass
else:
titles = re.findall(r'"name":"([^"]*)"', html)
real_urls = [re.sub('\\\\/', '/', i) for i in re.findall(r'"rawUrl":"([^"]*)"', html)]
for i in range(len(titles)):
title = titles[i]
real_url = real_urls[i]
type, ext, size = url_info(real_url)
print_info(site_info, title, type, size)
if not info_only:
download_urls([real_url], title, ext, size, output_dir, merge = merge)
site_info = "Douban.com"
download = douban_download
download_playlist = playlist_not_supported('douban')
|
the-stack_0_10397 | # flake8: noqa
# There's no way to ignore "F401 '...' imported but unused" warnings in this
# module, but to preserve other warnings. So, don't check this module at all.
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from transformers.file_utils import (
_BaseLazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_import_structure = {
"configuration_electra": ["ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "ElectraConfig"],
"tokenization_electra": ["ElectraTokenizer"],
}
if is_tokenizers_available():
_import_structure["tokenization_electra_fast"] = ["ElectraTokenizerFast"]
if is_torch_available():
_import_structure["modeling_electra"] = [
"ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"ElectraForMaskedLM",
"ElectraForMultipleChoice",
"ElectraForPreTraining",
"ElectraForQuestionAnswering",
"ElectraForSequenceClassification",
"ElectraForTokenClassification",
"ElectraModel",
"ElectraPreTrainedModel",
"load_tf_weights_in_electra",
]
if is_tf_available():
_import_structure["modeling_tf_electra"] = [
"TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFElectraForMaskedLM",
"TFElectraForMultipleChoice",
"TFElectraForPreTraining",
"TFElectraForQuestionAnswering",
"TFElectraForSequenceClassification",
"TFElectraForTokenClassification",
"TFElectraModel",
"TFElectraPreTrainedModel",
]
if is_flax_available():
_import_structure["modeling_flax_electra"] = [
"FlaxElectraForMaskedLM",
"FlaxElectraForMultipleChoice",
"FlaxElectraForPreTraining",
"FlaxElectraForQuestionAnswering",
"FlaxElectraForSequenceClassification",
"FlaxElectraForTokenClassification",
"FlaxElectraModel",
"FlaxElectraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig
from .tokenization_electra import ElectraTokenizer
if is_tokenizers_available():
from .tokenization_electra_fast import ElectraTokenizerFast
if is_torch_available():
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
if is_tf_available():
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
if is_flax_available():
from .modeling_flax_electra import (
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import importlib
import os
import sys
class _LazyModule(_BaseLazyModule):
"""
Module class that surfaces all objects but only performs associated imports when the objects are requested.
"""
__file__ = globals()["__file__"]
__path__ = [os.path.dirname(__file__)]
def _get_module(self, module_name: str):
return importlib.import_module("." + module_name, self.__name__)
sys.modules[__name__] = _LazyModule(__name__, _import_structure)
|
the-stack_0_10398 | #!/usr/bin/env python3
questions = {
"strong": "Do ye like yer drinks strong?",
"salty": "Do ye like it with a salty tang?",
"bitter": "Are ye a lubber who likes it bitter?",
"sweet": "Would ye like a bit of sweetness with yer poison?",
"fruity": "Are ye one for a fruity finish?",
}
ingredients = {
"strong": ["glug of rum", "slug of whisky", "splash of gin"],
"salty": ["olive on a stick", "salt-dusted rim", "rasher of bacon"],
"bitter": ["shake of bitters", "splash of tonic", "twist of lemon peel"],
"sweet": ["sugar cube", "spoonful of honey", "spash of cola"],
"fruity": ["slice of orange", "dash of cassis", "cherry on top"],
}
def bartender(**data):
for flavor, drink_type in data.items():
question = drink_type
print(question)
print("Hi there, welcome to the Pirate Bartender program")
bartender(**questions)
|
the-stack_0_10401 | import abc
from time import time
from urllib import parse
from ... import gvars
from ...utils import open_connection
class HTTPResponse:
def __init__(self, client):
self.client = client
self.done = False
self.header_size = 0
self.body_size = 0
self.speed = 0
self.start = time()
@property
def size(self):
return self.header_size + self.body_size
def on_header(self, name: bytes, value: bytes):
self.header_size += len(name) + len(value)
def on_message_complete(self):
self.done = True
seconds = time() - self.start
self.speed = int(self.size / 1024 / seconds) # KB/s
def on_body(self, body: bytes):
self.body_size += len(body)
class ClientBase(abc.ABC):
sock = None
target_addr = ("unknown", -1)
def __init__(self, namespace):
self.ns = namespace
def __repr__(self):
return f"{self.__class__.__name__}({self})"
def __str__(self):
return f"{self.bind_address} -- {self.target_address}"
async def __aenter__(self):
return self
async def __aexit__(self, et, e, tb):
await self.close()
async def close(self):
if self.sock:
await self.sock.close()
self.sock = None
@property
@abc.abstractmethod
def proto(self):
""
@property
def bind_address(self) -> str:
return f"{self.ns.bind_addr[0]}:{self.ns.bind_addr[1]}"
@property
def target_address(self) -> str:
return f"{self.target_addr[0]}:{self.target_addr[1]}"
async def connect(self, target_addr, source_addr=None):
self.target_addr = target_addr
if self.sock:
return
self.sock = await open_connection(*self.ns.bind_addr, source_addr=source_addr)
@abc.abstractmethod
async def init(self):
""
async def recv(self, size):
return await self.sock.recv(size)
async def sendall(self, data):
return await self.sock.sendall(data)
async def http_request(
self, uri: str, method: str = "GET", headers: list = None, response_cls=None
):
import httptools
response_cls = response_cls or HTTPResponse
url = parse.urlparse(uri)
host, _, port = url.netloc.partition(":")
try:
port = int(port)
except ValueError:
if url.scheme == "http":
port = 80
elif url.scheme == "https":
port = 443
else:
raise Exception(f"unknown scheme: {url.scheme}")
target_addr = (host, port)
await self.connect(target_addr)
await self.init()
header_list = [f"Host: {self.target_address}".encode()]
if headers:
for header in headers:
if isinstance(header, str):
header = header.encode()
header_list.append(header)
ver = b"HTTP/1.1"
method = method.upper().encode()
url = url.geturl().encode()
data = b"%b %b %b\r\n%b\r\n\r\n" % (method, url, ver, b"\r\n".join(header_list))
await self.sendall(data)
response = response_cls(self)
parser = httptools.HttpResponseParser(response)
while not response.done:
data = await self.recv(gvars.PACKET_SIZE)
if not data:
raise Exception("Incomplete response")
parser.feed_data(data)
return response
|
the-stack_0_10404 | from django.conf import settings
from django.conf.urls import handler404, handler500
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import include, path
handler404 = 'foodgram_project.views.page_not_found' # noqa
handler500 = 'foodgram_project.views.server_error' # noqa
urlpatterns = [
path('auth/', include('users.urls')),
path('auth/', include('django.contrib.auth.urls')),
path('admin/', admin.site.urls),
path('about/', include('about.urls')),
path('', include('foodgram.urls')),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
|
the-stack_0_10405 | import sqlite3 as sql
import queries as qrs
import pandas as pd
# assignment 1
def connect_db(db='../rpg_db.sqlite3'):
return sql.connect(db)
def exec(conn, query):
curs = conn.cursor()
curs.execute(query)
res = curs.fetchall()
return res
# assignment 2
df = pd.DataFrame(pd.read_csv('../buddymove_holidayiq.csv'))
print(df.shape)
print(df.isnull().count())
conn = sql.connect('../buddymove_holidayiq.sqlite3')
# df.to_sql('review', conn)
# how many rows
row_count = 'SELECT COUNT(*) FROM review'
# how many users who reviewed at least 100 'Nature' and at least 100 in 'Shopping'
nature_and_shopping = 'SELECT COUNT(*) FROM review WHERE Nature >= 100 AND Shopping >= 100'
print(exec(conn, row_count))
print(exec(conn, nature_and_shopping))
|
the-stack_0_10406 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
PyCOMPSs Testbench
========================
"""
# Imports
import unittest
from modules.testMpiDecorator import testMpiDecorator
def main():
suite = unittest.TestLoader().loadTestsFromTestCase(testMpiDecorator)
unittest.TextTestRunner(verbosity=2).run(suite)
if __name__ == "__main__":
main()
|
the-stack_0_10408 | #!/usr/bin/python
#
# Converts KML files to BigQuery WKT geography objects (CSV)
from __future__ import print_function
import sys
import re
import xml.etree.ElementTree
e = xml.etree.ElementTree.parse(sys.argv[1]).getroot()
p = re.compile('^\{(.+?)\}')
matches = p.match(e.tag)
xmlns = matches.group(1)
document = e[0]
for pmark in document.findall('.//xmlns:Placemark', { 'xmlns' : xmlns }):
name = pmark.find('./xmlns:name', { 'xmlns' : xmlns })
print('"%s",' % name.text, end='')
for coord in pmark.findall('.//xmlns:coordinates', { 'xmlns' : xmlns }):
coordmatches = re.findall('(\d+\.\d+,\d+\.\d+),0[ \n]', coord.text)
print('"POLYGON((', end='')
i = 0
for m in coordmatches:
if i > 0:
print(',', end='')
print(m.replace(',', ' '), end='')
i += 1
print('))"')
|
the-stack_0_10409 | #!usr/bin/python3.7
#author: kang-newbie
#github: https://github.com/kang-newbie
#contact: https://t.me/kang_nuubi
import os,sys,time
try:
os.mkdir('audio')
except: pass
def os_detek():
if os.name in ['nt', 'win32']:
os.system('cls')
else:
os.system('clear')
banner="""
;;;;;;;;;;;;;;;;;
; KARIN ; Author: KANG-NEWBIE
; SPEECH - TEXT ; Contact: t.me/kang_nuubi
;;;;;;;;;;;;;;;;;
"""
def sarg():
exit("""
Usage:
python %s --lang (language)
Example:
python %s --lang id
or
python %s --lang en"""%(sys.argv[0],sys.argv[0],sys.argv[0]))
if __name__=='__main__':
os_detek()
print(banner)
a=sys.version.split('.')
if a[0] != '3':
exit('use python version 3.x.x')
if len(sys.argv) != 3:
sarg()
if 'id' in sys.argv[2]:
os.system('python3 src/karin_id.py')
elif 'en' in sys.argv[2]:
os.system('python3 src/karin_en.py')
else: sarg()
|
the-stack_0_10411 | # model settings
input_size = 300
model = dict(
type='SingleStageDetector',
#pretrained='open-mmlab://vgg16_caffe',
pretrained='vgg16_caffe-292e1171.pth',
backbone=dict(
type='SSDVGG',
input_size=input_size,
depth=16,
with_last_pool=False,
ceil_mode=True,
out_indices=(3, 4),
out_feature_indices=(22, 34),
l2_norm_scale=20),
neck=None,
bbox_head=dict(
type='SSDHead',
input_size=input_size,
in_channels=(512, 1024, 512, 256, 256, 256),
num_classes=2,
anchor_strides=(8, 16, 32, 64, 100, 300),
basesize_ratio_range=(0.15, 0.9),
anchor_ratios=([2], [2, 3], [2, 3], [2, 3], [2], [2]),
target_means=(.0, .0, .0, .0),
target_stds=(0.1, 0.1, 0.2, 0.2)))
cudnn_benchmark = True
train_cfg = dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.,
ignore_iof_thr=-1,
gt_max_assign_all=False),
smoothl1_beta=1.,
allowed_border=-1,
pos_weight=-1,
neg_pos_ratio=3,
debug=False)
test_cfg = dict(
nms=dict(type='nms', iou_thr=0.45),
min_bbox_size=0,
score_thr=0.02,
max_per_img=200)
# model training and testing settings
# dataset settings
dataset_type = 'CocoDataset'
#data_root = 'data/WIDERFACE2019'
data_root = 'WIDER/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
data = dict(
imgs_per_gpu=4,
workers_per_gpu=1,
train=dict(
type=dataset_type,
ann_file=data_root + '/WIDER_train/instances_train2019.json',
img_prefix=data_root + '/WIDER_train/images/',
img_scale=(300, 300),
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0.5,
with_mask=False,
with_crowd=True,
with_label=True,
extra_aug=dict(
photo_metric_distortion=dict(
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
expand=dict(
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 4)),
random_crop=dict(
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), min_crop_size=0.3)),
resize_keep_ratio=False),
val=dict(
type=dataset_type,
ann_file=data_root + 'WIDER_val/instances_val2019.json',
img_prefix=data_root + 'WIDER_val/images/',
img_scale=(300, 300),
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0,
with_mask=False,
with_crowd=True,
with_label=True),
test=dict(
type=dataset_type,
ann_file=data_root + '/WIDER_val/instances_val2019.json',
img_prefix=data_root + '/WIDER_val/images/',
# img_scale=(640, 640),#no result
img_scale=(300, 300),
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0.0,
with_mask=False,
with_label=False,
test_mode=True,
resize_keep_ratio=False))
# optimizer
optimizer = dict(type='SGD', lr=1e-3, momentum=0.9, weight_decay=5e-4)
optimizer_config = dict()
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=1000,
warmup_ratio=1.0 / 3,
step=[16, 20])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 24
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/ssd300_coco'
load_from = None
resume_from = None
workflow = [('train', 1)]
|
the-stack_0_10412 | from rest_framework import renderers
class PlainTextRenderer(renderers.BaseRenderer):
media_type = 'text/plain'
format = 'text'
def render(self, data, media_type=None, renderer_context=None):
json_data = renderers.JSONRenderer().render(data, media_type, renderer_context)
return str(json_data).encode(self.charset)
|
the-stack_0_10413 | import json
import numpy
import os
import re
import sys
# This script depends on a SJSON parsing package:
# https://pypi.python.org/pypi/SJSON/1.1.0
# https://shelter13.net/projects/SJSON/
# https://bitbucket.org/Anteru/sjson/src
import sjson
def get_clip_names(benchmarks):
clip_names = []
for bench in benchmarks:
run_name = bench['name']
matches = re.search('^([\w\_]+).acl/', run_name)
if matches == None:
print('Failed to find the clip name from benchmark run: {}', run_name)
else:
clip_name = matches.group(1)
clip_names.append(clip_name)
bench['clip_name'] = clip_name
return sorted(list(set(clip_names)))
def get_median_runs(clip_name, benchmarks):
pose = None
bone = None
for bench in benchmarks:
if bench['clip_name'] != clip_name:
continue # Not our clip
if 'Dir:0' not in bench['name']:
continue # Wrong direction
if bench['run_type'] != 'aggregate':
continue # Not an aggregate value
if bench['aggregate_name'] != 'median':
continue # Not our median
if 'Func:0' in bench['name']:
# Decompress pose
pose = bench
elif 'Func:1' in bench['name']:
# Decompress bone
bone = bench
return (pose, bone)
def ns_to_us(time_ns):
return time_ns / 1000.0
def bytessec_to_mbsec(bytes_per_sec):
return bytes_per_sec / (1024.0 * 1024.0)
def ms_to_s(time_ms):
return time_ms / 1000.0
def bytes_to_mb(num_bytes):
return num_bytes / (1024 * 1024)
if __name__ == "__main__":
if sys.version_info < (3, 4):
print('Python 3.4 or higher needed to run this script')
sys.exit(1)
if len(sys.argv) != 2 and len(sys.argv) != 3:
print('Usage: python gen_decomp_delta_stats.py <path/to/input_file.sjson> [-warm]')
sys.exit(1)
input_sjson_file = sys.argv[1]
if not input_sjson_file.endswith('.sjson'):
print('Expected SJSON input file, found: {}'.format(input_sjson_file))
sys.exit(1)
if not os.path.exists(input_sjson_file):
print('Input file not found: {}'.format(input_sjson_file))
sys.exit(1)
with open(input_sjson_file, 'r') as file:
input_sjson_data = sjson.loads(file.read())
clip_names = []
decomp_delta_us_csv_file = open('decomp_delta_forward_stats_us.csv', 'w')
decomp_delta_mbsec_csv_file = open('decomp_delta_forward_stats_mbsec.csv', 'w')
pose_size_per_clip = {}
per_entry_data = []
for entry in input_sjson_data['inputs']:
print('Processing {} ...'.format(entry['stats_dir']))
benchmark_json_file = os.path.join(entry['stats_dir'], 'benchmark_results.json')
with open(benchmark_json_file, 'r') as file:
json_data = json.loads(file.read())
benchmarks = json_data['benchmarks']
if len(clip_names) == 0:
clip_names = get_clip_names(benchmarks)
print('Variants,Config,Version,{}'.format(','.join(clip_names)), file = decomp_delta_us_csv_file)
print('Variants,Config,Version,{}'.format(','.join(clip_names)), file = decomp_delta_mbsec_csv_file)
else:
get_clip_names(benchmarks)
pose_medians_us = {}
bone_medians_us = {}
pose_medians_mbsec = {}
bone_medians_mbsec = {}
for clip_name in clip_names:
print(' Processing {} ...'.format(clip_name))
(pose_median_run, bone_median_run) = get_median_runs(clip_name, benchmarks)
# Convert from nanoseconds into milliseconds
pose_median = ns_to_us(pose_median_run['real_time'])
bone_median = ns_to_us(bone_median_run['real_time'])
pose_medians_us[clip_name] = pose_median
bone_medians_us[clip_name] = bone_median
# Convert from bytes/sec to megabytes/sec
pose_speed = bytessec_to_mbsec(pose_median_run['Speed'])
bone_speed = bytessec_to_mbsec(bone_median_run['Speed'])
pose_medians_mbsec[clip_name] = pose_speed
bone_medians_mbsec[clip_name] = bone_speed
data = {}
data['name'] = entry['name']
data['version'] = entry['version']
data['pose_medians_us'] = pose_medians_us
data['bone_medians_us'] = bone_medians_us
data['pose_medians_mbsec'] = pose_medians_mbsec
data['bone_medians_mbsec'] = bone_medians_mbsec
data['clip_names'] = clip_names
per_entry_data.append(data)
for data in per_entry_data:
pose_medians_us = data['pose_medians_us']
bone_medians_us = data['bone_medians_us']
pose_medians_mbsec = data['pose_medians_mbsec']
bone_medians_mbsec = data['bone_medians_mbsec']
clip_names = data['clip_names']
pose_medians_us_csv = []
bone_medians_us_csv = []
pose_medians_mbsec_csv = []
bone_medians_mbsec_csv = []
for clip_name in clip_names:
pose_size = pose_size_per_clip[clip_name]
pose_cold_median_us = pose_medians_us[clip_name]
bone_cold_median_us = bone_medians_us[clip_name]
pose_medians_us_csv.append(str(pose_cold_median_us))
bone_medians_us_csv.append(str(bone_cold_median_us))
pose_cold_speed_mbsec = pose_medians_mbsec[clip_name]
bone_cold_speed_mbsec = bone_medians_mbsec[clip_name]
# Convert the speed into MB/sec
pose_medians_mbsec_csv.append(str(pose_cold_speed_mbsec))
bone_medians_mbsec_csv.append(str(bone_cold_speed_mbsec))
print('decompress_pose,{},{},{}'.format(data['name'], data['version'], ','.join(pose_medians_us_csv)), file = decomp_delta_us_csv_file)
print('decompress_bone,{},{},{}'.format(data['name'], data['version'], ','.join(bone_medians_us_csv)), file = decomp_delta_us_csv_file)
print('decompress_pose,{},{},{}'.format(data['name'], data['version'], ','.join(pose_medians_mbsec_csv)), file = decomp_delta_mbsec_csv_file)
print('decompress_bone,{},{},{}'.format(data['name'], data['version'], ','.join(bone_medians_mbsec_csv)), file = decomp_delta_mbsec_csv_file)
decomp_delta_us_csv_file.close()
decomp_delta_mbsec_csv_file.close()
|
the-stack_0_10414 | # -*- coding: utf-8 -*-
'''
A salt interface to psutil, a system and process library.
See http://code.google.com/p/psutil.
:depends: - psutil Python module, version 0.3.0 or later
- python-utmp package (optional)
'''
# Import python libs
from __future__ import absolute_import
import time
import datetime
import re
# Import salt libs
from salt.exceptions import SaltInvocationError, CommandExecutionError
# Import third party libs
import salt.ext.six as six
# pylint: disable=import-error
try:
import salt.utils.psutil_compat as psutil
HAS_PSUTIL = True
PSUTIL2 = getattr(psutil, 'version_info', ()) >= (2, 0)
except ImportError:
HAS_PSUTIL = False
# pylint: enable=import-error
def __virtual__():
if not HAS_PSUTIL:
return False, 'The ps module cannot be loaded: python module psutil not installed.'
# Functions and attributes used in this execution module seem to have been
# added as of psutil 0.3.0, from an inspection of the source code. Only
# make this module available if the version of psutil is >= 0.3.0. Note
# that this may need to be tweaked if we find post-0.3.0 versions which
# also have problems running the functions in this execution module, but
# most distributions have already moved to later versions (for example,
# as of Dec. 2013 EPEL is on 0.6.1, Debian 7 is on 0.5.1, etc.).
if psutil.version_info >= (0, 3, 0):
return True
return (False, 'The ps execution module cannot be loaded: the psutil python module version {0} is less than 0.3.0'.format(psutil.version_info))
def _get_proc_cmdline(proc):
'''
Returns the cmdline of a Process instance.
It's backward compatible with < 2.0 versions of psutil.
'''
try:
return proc.cmdline() if PSUTIL2 else proc.cmdline
except (psutil.NoSuchProcess, psutil.AccessDenied):
return ''
def _get_proc_create_time(proc):
'''
Returns the create_time of a Process instance.
It's backward compatible with < 2.0 versions of psutil.
'''
try:
return proc.create_time() if PSUTIL2 else proc.create_time
except (psutil.NoSuchProcess, psutil.AccessDenied):
return None
def _get_proc_name(proc):
'''
Returns the name of a Process instance.
It's backward compatible with < 2.0 versions of psutil.
'''
try:
return proc.name() if PSUTIL2 else proc.name
except (psutil.NoSuchProcess, psutil.AccessDenied):
return []
def _get_proc_status(proc):
'''
Returns the status of a Process instance.
It's backward compatible with < 2.0 versions of psutil.
'''
try:
return proc.status() if PSUTIL2 else proc.status
except (psutil.NoSuchProcess, psutil.AccessDenied):
return None
def _get_proc_username(proc):
'''
Returns the username of a Process instance.
It's backward compatible with < 2.0 versions of psutil.
'''
try:
return proc.username() if PSUTIL2 else proc.username
except (psutil.NoSuchProcess, psutil.AccessDenied, KeyError):
return None
def _get_proc_pid(proc):
'''
Returns the pid of a Process instance.
It's backward compatible with < 2.0 versions of psutil.
'''
return proc.pid
def top(num_processes=5, interval=3):
'''
Return a list of top CPU consuming processes during the interval.
num_processes = return the top N CPU consuming processes
interval = the number of seconds to sample CPU usage over
CLI Examples:
.. code-block:: bash
salt '*' ps.top
salt '*' ps.top 5 10
'''
result = []
start_usage = {}
for pid in psutil.pids():
try:
process = psutil.Process(pid)
user, system = process.cpu_times()
except ValueError:
user, system, _, _ = process.cpu_times()
except psutil.NoSuchProcess:
continue
start_usage[process] = user + system
time.sleep(interval)
usage = set()
for process, start in six.iteritems(start_usage):
try:
user, system = process.cpu_times()
except ValueError:
user, system, _, _ = process.cpu_times()
except psutil.NoSuchProcess:
continue
now = user + system
diff = now - start
usage.add((diff, process))
for idx, (diff, process) in enumerate(reversed(sorted(usage))):
if num_processes and idx >= num_processes:
break
if len(_get_proc_cmdline(process)) == 0:
cmdline = _get_proc_name(process)
else:
cmdline = _get_proc_cmdline(process)
info = {'cmd': cmdline,
'user': _get_proc_username(process),
'status': _get_proc_status(process),
'pid': _get_proc_pid(process),
'create_time': _get_proc_create_time(process),
'cpu': {},
'mem': {},
}
for key, value in six.iteritems(process.cpu_times()._asdict()):
info['cpu'][key] = value
for key, value in six.iteritems(process.memory_info()._asdict()):
info['mem'][key] = value
result.append(info)
return result
def get_pid_list():
'''
Return a list of process ids (PIDs) for all running processes.
CLI Example:
.. code-block:: bash
salt '*' ps.get_pid_list
'''
return psutil.pids()
def proc_info(pid, attrs=None):
'''
Return a dictionary of information for a process id (PID).
CLI Example:
.. code-block:: bash
salt '*' ps.proc_info 2322
salt '*' ps.proc_info 2322 attrs='["pid", "name"]'
pid
PID of process to query.
attrs
Optional list of desired process attributes. The list of possible
attributes can be found here:
http://pythonhosted.org/psutil/#psutil.Process
'''
try:
proc = psutil.Process(pid)
return proc.as_dict(attrs)
except (psutil.NoSuchProcess, psutil.AccessDenied, AttributeError) as exc:
raise CommandExecutionError(exc)
def kill_pid(pid, signal=15):
'''
Kill a process by PID.
.. code-block:: bash
salt 'minion' ps.kill_pid pid [signal=signal_number]
pid
PID of process to kill.
signal
Signal to send to the process. See manpage entry for kill
for possible values. Default: 15 (SIGTERM).
**Example:**
Send SIGKILL to process with PID 2000:
.. code-block:: bash
salt 'minion' ps.kill_pid 2000 signal=9
'''
try:
psutil.Process(pid).send_signal(signal)
return True
except psutil.NoSuchProcess:
return False
def pkill(pattern, user=None, signal=15, full=False):
'''
Kill processes matching a pattern.
.. code-block:: bash
salt '*' ps.pkill pattern [user=username] [signal=signal_number] \\
[full=(true|false)]
pattern
Pattern to search for in the process list.
user
Limit matches to the given username. Default: All users.
signal
Signal to send to the process(es). See manpage entry for kill
for possible values. Default: 15 (SIGTERM).
full
A boolean value indicating whether only the name of the command or
the full command line should be matched against the pattern.
**Examples:**
Send SIGHUP to all httpd processes on all 'www' minions:
.. code-block:: bash
salt 'www.*' ps.pkill httpd signal=1
Send SIGKILL to all bash processes owned by user 'tom':
.. code-block:: bash
salt '*' ps.pkill bash signal=9 user=tom
'''
killed = []
for proc in psutil.process_iter():
name_match = pattern in ' '.join(_get_proc_cmdline(proc)) if full \
else pattern in _get_proc_name(proc)
user_match = True if user is None else user == _get_proc_username(proc)
if name_match and user_match:
try:
proc.send_signal(signal)
killed.append(_get_proc_pid(proc))
except psutil.NoSuchProcess:
pass
if not killed:
return None
else:
return {'killed': killed}
def pgrep(pattern, user=None, full=False):
'''
Return the pids for processes matching a pattern.
If full is true, the full command line is searched for a match,
otherwise only the name of the command is searched.
.. code-block:: bash
salt '*' ps.pgrep pattern [user=username] [full=(true|false)]
pattern
Pattern to search for in the process list.
user
Limit matches to the given username. Default: All users.
full
A boolean value indicating whether only the name of the command or
the full command line should be matched against the pattern.
**Examples:**
Find all httpd processes on all 'www' minions:
.. code-block:: bash
salt 'www.*' ps.pgrep httpd
Find all bash processes owned by user 'tom':
.. code-block:: bash
salt '*' ps.pgrep bash user=tom
'''
procs = []
for proc in psutil.process_iter():
name_match = pattern in ' '.join(_get_proc_cmdline(proc)) if full \
else pattern in _get_proc_name(proc)
user_match = True if user is None else user == _get_proc_username(proc)
if name_match and user_match:
procs.append(_get_proc_pid(proc))
return procs or None
def cpu_percent(interval=0.1, per_cpu=False):
'''
Return the percent of time the CPU is busy.
interval
the number of seconds to sample CPU usage over
per_cpu
if True return an array of CPU percent busy for each CPU, otherwise
aggregate all percents into one number
CLI Example:
.. code-block:: bash
salt '*' ps.cpu_percent
'''
if per_cpu:
result = list(psutil.cpu_percent(interval, True))
else:
result = psutil.cpu_percent(interval)
return result
def cpu_times(per_cpu=False):
'''
Return the percent of time the CPU spends in each state,
e.g. user, system, idle, nice, iowait, irq, softirq.
per_cpu
if True return an array of percents for each CPU, otherwise aggregate
all percents into one number
CLI Example:
.. code-block:: bash
salt '*' ps.cpu_times
'''
if per_cpu:
result = [dict(times._asdict()) for times in psutil.cpu_times(True)]
else:
result = dict(psutil.cpu_times(per_cpu)._asdict())
return result
def virtual_memory():
'''
.. versionadded:: 2014.7.0
Return a dict that describes statistics about system memory usage.
.. note::
This function is only available in psutil version 0.6.0 and above.
CLI Example:
.. code-block:: bash
salt '*' ps.virtual_memory
'''
if psutil.version_info < (0, 6, 0):
msg = 'virtual_memory is only available in psutil 0.6.0 or greater'
raise CommandExecutionError(msg)
return dict(psutil.virtual_memory()._asdict())
def swap_memory():
'''
.. versionadded:: 2014.7.0
Return a dict that describes swap memory statistics.
.. note::
This function is only available in psutil version 0.6.0 and above.
CLI Example:
.. code-block:: bash
salt '*' ps.swap_memory
'''
if psutil.version_info < (0, 6, 0):
msg = 'swap_memory is only available in psutil 0.6.0 or greater'
raise CommandExecutionError(msg)
return dict(psutil.swap_memory()._asdict())
def disk_partitions(all=False):
'''
Return a list of disk partitions and their device, mount point, and
filesystem type.
all
if set to False, only return local, physical partitions (hard disk,
USB, CD/DVD partitions). If True, return all filesystems.
CLI Example:
.. code-block:: bash
salt '*' ps.disk_partitions
'''
result = [dict(partition._asdict()) for partition in
psutil.disk_partitions(all)]
return result
def disk_usage(path):
'''
Given a path, return a dict listing the total available space as well as
the free space, and used space.
CLI Example:
.. code-block:: bash
salt '*' ps.disk_usage /home
'''
return dict(psutil.disk_usage(path)._asdict())
def disk_partition_usage(all=False):
'''
Return a list of disk partitions plus the mount point, filesystem and usage
statistics.
CLI Example:
.. code-block:: bash
salt '*' ps.disk_partition_usage
'''
result = disk_partitions(all)
for partition in result:
partition.update(disk_usage(partition['mountpoint']))
return result
def total_physical_memory():
'''
Return the total number of bytes of physical memory.
CLI Example:
.. code-block:: bash
salt '*' ps.total_physical_memory
'''
if psutil.version_info < (0, 6, 0):
msg = 'virtual_memory is only available in psutil 0.6.0 or greater'
raise CommandExecutionError(msg)
try:
return psutil.virtual_memory().total
except AttributeError:
# TOTAL_PHYMEM is deprecated but with older psutil versions this is
# needed as a fallback.
return psutil.TOTAL_PHYMEM
def num_cpus():
'''
Return the number of CPUs.
CLI Example:
.. code-block:: bash
salt '*' ps.num_cpus
'''
try:
return psutil.cpu_count()
except AttributeError:
# NUM_CPUS is deprecated but with older psutil versions this is needed
# as a fallback.
return psutil.NUM_CPUS
def boot_time(time_format=None):
'''
Return the boot time in number of seconds since the epoch began.
CLI Example:
time_format
Optionally specify a `strftime`_ format string. Use
``time_format='%c'`` to get a nicely-formatted locale specific date and
time (i.e. ``Fri May 2 19:08:32 2014``).
.. _strftime: https://docs.python.org/2/library/datetime.html#strftime-strptime-behavior
.. versionadded:: 2014.1.4
.. code-block:: bash
salt '*' ps.boot_time
'''
try:
b_time = int(psutil.boot_time())
except AttributeError:
# get_boot_time() has been removed in newer psutil versions, and has
# been replaced by boot_time() which provides the same information.
b_time = int(psutil.boot_time())
if time_format:
# Load epoch timestamp as a datetime.datetime object
b_time = datetime.datetime.fromtimestamp(b_time)
try:
return b_time.strftime(time_format)
except TypeError as exc:
raise SaltInvocationError('Invalid format string: {0}'.format(exc))
return b_time
def network_io_counters(interface=None):
'''
Return network I/O statistics.
CLI Example:
.. code-block:: bash
salt '*' ps.network_io_counters
salt '*' ps.network_io_counters interface=eth0
'''
if not interface:
return dict(psutil.net_io_counters()._asdict())
else:
stats = psutil.net_io_counters(pernic=True)
if interface in stats:
return dict(stats[interface]._asdict())
else:
return False
def disk_io_counters(device=None):
'''
Return disk I/O statistics.
CLI Example:
.. code-block:: bash
salt '*' ps.disk_io_counters
salt '*' ps.disk_io_counters device=sda1
'''
if not device:
return dict(psutil.disk_io_counters()._asdict())
else:
stats = psutil.disk_io_counters(perdisk=True)
if device in stats:
return dict(stats[device]._asdict())
else:
return False
def get_users():
'''
Return logged-in users.
CLI Example:
.. code-block:: bash
salt '*' ps.get_users
'''
try:
recs = psutil.users()
return [dict(x._asdict()) for x in recs]
except AttributeError:
# get_users is only present in psutil > v0.5.0
# try utmp
try:
import utmp # pylint: disable=import-error
result = []
while True:
rec = utmp.utmpaccess.getutent()
if rec is None:
return result
elif rec[0] == 7:
started = rec[8]
if isinstance(started, tuple):
started = started[0]
result.append({'name': rec[4], 'terminal': rec[2],
'started': started, 'host': rec[5]})
except ImportError:
return False
def lsof(name):
'''
Retrieve the lsof informations of the given process name.
CLI Example:
.. code-block:: bash
salt '*' ps.lsof apache2
'''
sanitize_name = str(name)
lsof_infos = __salt__['cmd.run']("lsof -c " + sanitize_name)
ret = []
ret.extend([sanitize_name, lsof_infos])
return ret
def netstat(name):
'''
Retrieve the netstat informations of the given process name.
CLI Example:
.. code-block:: bash
salt '*' ps.netstat apache2
'''
sanitize_name = str(name)
netstat_infos = __salt__['cmd.run']("netstat -nap")
found_infos = []
ret = []
for info in netstat_infos.splitlines():
if info.find(sanitize_name) != -1:
found_infos.append(info)
ret.extend([sanitize_name, found_infos])
return ret
def psaux(name):
'''
Retrieve information corresponding to a "ps aux" filtered
with the given pattern. It could be just a name or a regular
expression (using python search from "re" module).
CLI Example:
.. code-block:: bash
salt '*' ps.psaux www-data.+apache2
'''
sanitize_name = str(name)
pattern = re.compile(sanitize_name)
salt_exception_pattern = re.compile("salt.+ps.psaux.+")
ps_aux = __salt__['cmd.run']("ps aux")
found_infos = []
ret = []
nb_lines = 0
for info in ps_aux.splitlines():
found = pattern.search(info)
if found is not None:
# remove 'salt' command from results
if not salt_exception_pattern.search(info):
nb_lines += 1
found_infos.append(info)
pid_count = str(nb_lines) + " occurence(s)."
ret = []
ret.extend([sanitize_name, found_infos, pid_count])
return ret
|
the-stack_0_10415 | # implementation of SLIC Superpixel algorithm
# reference: SLIC Superpixels Compared to State-of-the-art Superpixel Methods
# DOI: 10.1109/TPAMI.2012.120
# website: https://infoscience.epfl.ch/record/177415
# reference: SLIC算法分割超像素原理及Python实现: https://www.kawabangga.com/posts/1923
import cv2 as cv
import numpy as np
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--image', default="Lena.jpg", type=str, help='input image name')
parser.add_argument('--k', default=1000, type=int, help='number of clusters')
parser.add_argument('--m', default=30, type=int, help='balancing parameter')
args = parser.parse_args()
class Block(object):
def __init__(self, num, h, w, l=0, a=0, b=0):
self.number = num
self.h = h
self.w = w
self.l = l
self.a = a
self.b = b
self.pixels = [] # positions of the pixels which belongs to this block
def change_pos(self, h, w):
self.h = h
self.w = w
def change_color(self, l, a, b):
self.l = l
self.a = a
self.b = b
class Cluster(object):
def __init__(self, image, number, m):
self.image = image
self.k = number
self.m = m
self.height = image.shape[0]
self.width = image.shape[1]
self.pixels = self.height * self.width
self.block_length = int(np.sqrt(self.pixels / self.k))
self.label = np.full((self.height, self.width), -1, np.int32)
self.dis = np.full_like(self.label, np.inf, np.float32)
self.blocks = []
self.grad = cv.Laplacian(self.image, cv.CV_64F)
w = 0
h = self.block_length
j = 0
# in case that only half of the last line is covered
for i in range(self.k + 2 * int(self.width / self.block_length)):
w += self.block_length
if (i % 2) == 0:
h -= int((self.block_length - 1) / 2)
if h < 0:
break
else:
h += int((self.block_length - 1) / 2)
if h >= self.height:
break
if w >= self.width:
if (j % 2) == 0:
w = self.block_length
else:
w = int(self.block_length / 2)
h += self.block_length
j += 1
if h >= self.height:
break
self.blocks.append(Block(i, h, w))
self.adjust_blocks()
# adjust the positions of block centers
# move them to the points with the minimum gradients within the 3x3 regions
def adjust_blocks(self):
for block in self.blocks:
min_grad = np.sum(self.grad[block.h][block.w])
min_h = block.h
min_w = block.w
for i in range(-1, 2):
if block.h + i < 0 or block.h + i >= self.height:
continue # in case that the index goes out of boundary
for j in range(-1, 2):
if block.w + j < 0 or block.w + j >= self.width:
continue
new_grad = np.sum(self.grad[block.h + i][block.w + j])
if new_grad < min_grad:
min_grad = new_grad
min_h = block.h + i
min_w = block.w + j
block.change_pos(min_h, min_w)
block.pixels.append((min_h, min_w))
def distance(self, h1, w1, h2, w2):
l1 = int(self.image[h1][w1][0])
l2 = int(self.image[h2][w2][0])
a1 = int(self.image[h1][w1][1])
a2 = int(self.image[h2][w2][1])
b1 = int(self.image[h1][w1][2])
b2 = int(self.image[h2][w2][2])
d_lab = np.sqrt((np.square(l1 - l2) + np.square(a1 - a2) + np.square(b1 - b2)))
d_xy = np.sqrt(np.square(h1 - h2) + np.square(w1 - w2))
distance = d_lab + d_xy * (self.m / self.block_length)
return distance
def assign(self):
for block in self.blocks:
for h2 in range(block.h - 2 * self.block_length, block.h + 2 * self.block_length):
# out of boundary
if h2 < 0:
continue
if h2 >= self.height:
break
# in boundary
for w2 in range(block.w - 2 * self.block_length, block.w + 2 * self.block_length):
# out of boundary
if w2 < 0:
continue
if w2 >= self.width:
break
# in boundary
d = self.distance(block.h, block.w, h2, w2)
if self.label[h2][w2] < 0 or d < self.dis[h2][w2]:
if self.label[h2][w2] >= 0:
self.blocks[int(self.label[h2][w2])].pixels.remove((h2, w2))
self.label[h2][w2] = block.number
self.dis[h2][w2] = d
block.pixels.append((h2, w2))
# re-compute the center of the block
number_pixels = len(block.pixels)
new_h = 0
new_w = 0
for pixel in block.pixels:
new_h += pixel[0]
new_w += pixel[1]
new_h = int(new_h / number_pixels)
new_w = int(new_w / number_pixels)
block.change_pos(new_h, new_w)
block.pixels.append((new_h, new_w))
def color(self):
for block in self.blocks:
avg_l = 0
avg_a = 0
avg_b = 0
length = len(block.pixels)
for pixel in block.pixels:
_l = int(self.image[pixel[0]][pixel[1]][0])
_a = int(self.image[pixel[0]][pixel[1]][1])
_b = int(self.image[pixel[0]][pixel[1]][2])
avg_l += _l
avg_a += _a
avg_b += _b
avg_l = int(avg_l / length)
avg_a = int(avg_a / length)
avg_b = int(avg_b / length)
block.change_color(avg_l, avg_a, avg_b) # use the average color
def output(self):
new_image = np.zeros_like(self.image)
self.color()
for block in self.blocks:
for pixel in block.pixels:
new_image[pixel[0]][pixel[1]][0] = block.l
new_image[pixel[0]][pixel[1]][1] = block.a
new_image[pixel[0]][pixel[1]][2] = block.b
'''
new_image[block.h][block.w][0] = 0
new_image[block.h][block.w][1] = 0
new_image[block.h][block.w][2] = 0
'''
new_image = cv.cvtColor(new_image, cv.COLOR_LAB2BGR)
return new_image
if __name__ == '__main__':
file_name = args.image
cluster_number = args.k
m_param = args.m
img = cv.imread(file_name)
img = cv.cvtColor(img, cv.COLOR_BGR2LAB)
app = Cluster(image=img, number=int(cluster_number), m=int(m_param))
for it in range(10):
app.assign()
out_image = app.output()
name = "_new_" + str(it) + ".jpg"
cv.imwrite(name, out_image)
print(it)
|
the-stack_0_10417 | # coding: utf-8
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ShowProcessResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'code': 'str',
'message': 'str',
'json': 'UploadProcessJson',
'extend': 'str'
}
attribute_map = {
'code': 'code',
'message': 'message',
'json': 'json',
'extend': 'extend'
}
def __init__(self, code=None, message=None, json=None, extend=None):
"""ShowProcessResponse - a model defined in huaweicloud sdk"""
super(ShowProcessResponse, self).__init__()
self._code = None
self._message = None
self._json = None
self._extend = None
self.discriminator = None
if code is not None:
self.code = code
if message is not None:
self.message = message
if json is not None:
self.json = json
if extend is not None:
self.extend = extend
@property
def code(self):
"""Gets the code of this ShowProcessResponse.
code
:return: The code of this ShowProcessResponse.
:rtype: str
"""
return self._code
@code.setter
def code(self, code):
"""Sets the code of this ShowProcessResponse.
code
:param code: The code of this ShowProcessResponse.
:type: str
"""
self._code = code
@property
def message(self):
"""Gets the message of this ShowProcessResponse.
message
:return: The message of this ShowProcessResponse.
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""Sets the message of this ShowProcessResponse.
message
:param message: The message of this ShowProcessResponse.
:type: str
"""
self._message = message
@property
def json(self):
"""Gets the json of this ShowProcessResponse.
:return: The json of this ShowProcessResponse.
:rtype: UploadProcessJson
"""
return self._json
@json.setter
def json(self, json):
"""Sets the json of this ShowProcessResponse.
:param json: The json of this ShowProcessResponse.
:type: UploadProcessJson
"""
self._json = json
@property
def extend(self):
"""Gets the extend of this ShowProcessResponse.
extend
:return: The extend of this ShowProcessResponse.
:rtype: str
"""
return self._extend
@extend.setter
def extend(self, extend):
"""Sets the extend of this ShowProcessResponse.
extend
:param extend: The extend of this ShowProcessResponse.
:type: str
"""
self._extend = extend
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowProcessResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_0_10418 | # -*- coding:utf-8 -*-
__author__ = 'Randolph'
import os
import math
import gensim
import logging
import json
import torch
import numpy as np
import pandas as pd
from scipy import stats
from texttable import Texttable
from gensim.models import KeyedVectors
import torch.nn.utils.rnn as rnn_utils
def option():
"""
Choose training or restore pattern.
Returns:
The OPTION
"""
OPTION = input("[Input] Train or Restore? (T/R): ")
while not (OPTION.upper() in ['T', 'R']):
OPTION = input("[Warning] The format of your input is illegal, please re-input: ")
return OPTION.upper()
def logger_fn(name, input_file, level=logging.INFO):
"""
The Logger.
Args:
name: The name of the logger
input_file: The logger file path
level: The logger level
Returns:
The logger
"""
logger = logging.getLogger(name)
logger.setLevel(level)
log_dir = os.path.dirname(input_file)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
# File Handler
fh = logging.FileHandler(input_file, mode='w')
fh.setFormatter(formatter)
logger.addHandler(fh)
# stream Handler
sh = logging.StreamHandler()
sh.setFormatter(formatter)
sh.setLevel(logging.WARNING)
logger.addHandler(sh)
return logger
def tab_printer(args, logger):
"""
Function to print the logs in a nice tabular format.
Args:
args: Parameters used for the model.
logger: The logger
"""
args = vars(args)
keys = sorted(args.keys())
t = Texttable()
t.add_rows([[k.replace("_", " ").capitalize(), args[k]] for k in keys])
t.add_rows([["Parameter", "Value"]])
logger.info('\n' + t.draw())
def get_model_name():
"""
Get the model name used for test.
Returns:
The model name
"""
MODEL = input("[Input] Please input the model file you want to test, it should be like (1490175368): ")
while not (MODEL.isdigit() and len(MODEL) == 10):
MODEL = input("[Warning] The format of your input is illegal, "
"it should be like (1490175368), please re-input: ")
return MODEL
def create_prediction_file(save_dir, identifiers, predictions):
"""
Create the prediction file.
Args:
save_dir: The all classes predicted results provided by network
identifiers: The data record id
predictions: The predict scores
"""
if not os.path.exists(save_dir):
os.makedirs(save_dir)
preds_file = os.path.abspath(os.path.join(save_dir, 'submission.json'))
with open(preds_file, 'w') as fout:
tmp_dict = {}
for index, predicted_label in enumerate(predictions):
if identifiers[index] not in tmp_dict:
tmp_dict[identifiers[index]] = [predicted_label]
else:
tmp_dict[identifiers[index]].append(predicted_label)
for key in tmp_dict.keys():
data_record = {
'item_id': key,
'label_list': tmp_dict[key],
}
fout.write(json.dumps(data_record, ensure_ascii=False) + '\n')
def evaluation(true_label, pred_label):
"""
Calculate the PCC & DOA.
Args:
true_label: The true labels
pred_label: The predicted labels
Returns:
The value of PCC & DOA
"""
# compute pcc
pcc, _ = stats.pearsonr(pred_label, true_label)
if math.isnan(pcc):
print('[Error]: PCC=nan', true_label, pred_label)
# compute doa
n = 0
correct_num = 0
for i in range(len(true_label) - 1):
for j in range(i + 1, len(true_label)):
if (true_label[i] > true_label[j]) and (pred_label[i] > pred_label[j]):
correct_num += 1
elif (true_label[i] == true_label[j]) and (pred_label[i] == pred_label[j]):
continue
elif (true_label[i] < true_label[j]) and (pred_label[i] < pred_label[j]):
correct_num += 1
n += 1
if n == 0:
print(true_label)
return -1, -1
doa = correct_num / n
return pcc, doa
def course2vec(course2idx_file):
"""
Return the word2vec model matrix.
Args:
course2idx_file: The course2idx file
Returns:
The word2vec model matrix
Raises:
IOError: If word2vec model file doesn't exist
"""
if not os.path.isfile(course2idx_file):
raise IOError("[Error] The word2vec file doesn't exist. ")
with open(course2idx_file, 'r') as handle:
course2idx = json.load(handle)
course_cnt = len(course2idx)
return course_cnt
def load_data_and_labels(input_file):
if not input_file.endswith('.json'):
raise IOError("[Error] The research data is not a json file. "
"Please preprocess the research data into the json file.")
with open(input_file) as fin:
id_list = []
activity_list = []
timestep_list = []
labels_list = []
for index, eachline in enumerate(fin):
data = json.loads(eachline)
id = data['item_id']
activity = data['activity']
timestep = data['timestep']
labels = data['labels']
id_list.append(id)
activity_list.append(activity)
timestep_list.append(timestep)
labels_list.append(labels)
class _Data:
def __init__(self):
pass
@property
def id(self):
return id_list
@property
def activity(self):
return activity_list
@property
def timestep(self):
return timestep_list
@property
def labels(self):
return labels_list
return _Data()
class MyData(torch.utils.data.Dataset):
"""
定义数据读取迭代器结构
"""
def __init__(self, data_seq, data_tsp, data_label):
self.seqs = data_seq
self.tsp = data_tsp
self.labels = data_label
def __len__(self):
return len(self.seqs)
def __getitem__(self, idx):
return self.seqs[idx], self.tsp[idx], self.labels[idx]
def collate_fn(data):
"""
Version for PyTorch
Args:
data: The research data. 0-dim: word token index / 1-dim: data label
Returns:
pad_content: The padded data
lens: The ground truth lengths
labels: The data labels
"""
data.sort(key=lambda x: len(x[0]), reverse=True)
data_lens = [len(i[0]) for i in data]
data_x = [torch.tensor(i[0]) for i in data]
data_tsp = [i[1] for i in data]
data_y = torch.tensor([i[2] for i in data])
pad_content = rnn_utils.pad_sequence(data_x, batch_first=True, padding_value=0.)
return pad_content.unsqueeze(-1), data_lens, data_tsp, data_y |
the-stack_0_10419 | import tempfile
import pathlib
import os
import shutil
from renamer.search import FileSearcher
class TestFileSearcher:
@classmethod
def setup_class(cls):
cls.file_searcher = FileSearcher()
cls.tmpdir = tempfile.mkdtemp()
cls.file1_path = os.path.join(cls.tmpdir, 'file1.txt')
cls.file2_path = os.path.join(cls.tmpdir, 'tmp', 'file2.txt')
cls.keyword = os.path.join(cls.tmpdir, '**')
os.makedirs(os.path.join(cls.tmpdir, 'tmp'))
pathlib.Path(cls.file1_path).touch()
pathlib.Path(cls.file2_path).touch()
@classmethod
def teardown_class(cls):
shutil.rmtree(cls.tmpdir)
def test_search(self):
result = self.file_searcher.search(self.keyword)
assert result == [self.file1_path]
def test_search_recursive(self):
result = self.file_searcher.search(self.keyword, recursive=True)
assert result == [self.file1_path, self.file2_path]
def test_select_file(self):
test_case = {
'files': [
os.path.join(self.tmpdir, 'file1.txt'),
os.path.join(self.tmpdir, 'tmp')],
'want': [
os.path.join(self.tmpdir, 'file1.txt')]}
result = self.file_searcher._select_file(test_case['files'])
assert result == test_case['want']
|
the-stack_0_10422 | import os
from datetime import datetime, timezone
import numpy as np
import pandas as pd
import pyspark.sql.functions as F
import pyspark.sql.types as pt
import pytest
from pyspark.sql import SparkSession
import ibis
from ibis import util
from ibis.backends.tests.base import BackendTest, RoundAwayFromZero
_pyspark_testing_client = None
def get_common_spark_testing_client(data_directory, connect):
spark = (
SparkSession.builder.config('spark.default.parallelism', 4)
.config('spark.driver.bindAddress', '127.0.0.1')
.getOrCreate()
)
_spark_testing_client = connect(spark)
s = _spark_testing_client._session
num_partitions = 4
df_functional_alltypes = (
s.read.csv(
path=str(data_directory / 'functional_alltypes.csv'),
schema=pt.StructType(
[
pt.StructField('index', pt.IntegerType(), True),
pt.StructField('Unnamed: 0', pt.IntegerType(), True),
pt.StructField('id', pt.IntegerType(), True),
# cast below, Spark can't read 0/1 as bool
pt.StructField('bool_col', pt.ByteType(), True),
pt.StructField('tinyint_col', pt.ByteType(), True),
pt.StructField('smallint_col', pt.ShortType(), True),
pt.StructField('int_col', pt.IntegerType(), True),
pt.StructField('bigint_col', pt.LongType(), True),
pt.StructField('float_col', pt.FloatType(), True),
pt.StructField('double_col', pt.DoubleType(), True),
pt.StructField('date_string_col', pt.StringType(), True),
pt.StructField('string_col', pt.StringType(), True),
pt.StructField('timestamp_col', pt.TimestampType(), True),
pt.StructField('year', pt.IntegerType(), True),
pt.StructField('month', pt.IntegerType(), True),
]
),
mode='FAILFAST',
header=True,
)
.repartition(num_partitions)
.sort('index')
)
df_functional_alltypes = df_functional_alltypes.withColumn(
"bool_col", df_functional_alltypes["bool_col"].cast("boolean")
)
df_functional_alltypes.createOrReplaceTempView('functional_alltypes')
df_batting = (
s.read.csv(
path=str(data_directory / 'batting.csv'),
schema=pt.StructType(
[
pt.StructField('playerID', pt.StringType(), True),
pt.StructField('yearID', pt.IntegerType(), True),
pt.StructField('stint', pt.IntegerType(), True),
pt.StructField('teamID', pt.StringType(), True),
pt.StructField('lgID', pt.StringType(), True),
pt.StructField('G', pt.IntegerType(), True),
pt.StructField('AB', pt.DoubleType(), True),
pt.StructField('R', pt.DoubleType(), True),
pt.StructField('H', pt.DoubleType(), True),
pt.StructField('X2B', pt.DoubleType(), True),
pt.StructField('X3B', pt.DoubleType(), True),
pt.StructField('HR', pt.DoubleType(), True),
pt.StructField('RBI', pt.DoubleType(), True),
pt.StructField('SB', pt.DoubleType(), True),
pt.StructField('CS', pt.DoubleType(), True),
pt.StructField('BB', pt.DoubleType(), True),
pt.StructField('SO', pt.DoubleType(), True),
pt.StructField('IBB', pt.DoubleType(), True),
pt.StructField('HBP', pt.DoubleType(), True),
pt.StructField('SH', pt.DoubleType(), True),
pt.StructField('SF', pt.DoubleType(), True),
pt.StructField('GIDP', pt.DoubleType(), True),
]
),
header=True,
)
.repartition(num_partitions)
.sort('playerID')
)
df_batting.createOrReplaceTempView('batting')
df_awards_players = (
s.read.csv(
path=str(data_directory / 'awards_players.csv'),
schema=pt.StructType(
[
pt.StructField('playerID', pt.StringType(), True),
pt.StructField('awardID', pt.StringType(), True),
pt.StructField('yearID', pt.IntegerType(), True),
pt.StructField('lgID', pt.StringType(), True),
pt.StructField('tie', pt.StringType(), True),
pt.StructField('notes', pt.StringType(), True),
]
),
header=True,
)
.repartition(num_partitions)
.sort('playerID')
)
df_awards_players.createOrReplaceTempView('awards_players')
df_simple = s.createDataFrame([(1, 'a')], ['foo', 'bar'])
df_simple.createOrReplaceTempView('simple')
df_struct = s.createDataFrame([((1, 2, 'a'),)], ['struct_col'])
df_struct.createOrReplaceTempView('struct')
df_nested_types = s.createDataFrame(
[([1, 2], [[3, 4], [5, 6]], {'a': [[2, 4], [3, 5]]})],
[
'list_of_ints',
'list_of_list_of_ints',
'map_string_list_of_list_of_ints',
],
)
df_nested_types.createOrReplaceTempView('nested_types')
df_complicated = s.createDataFrame(
[({(1, 3): [[2, 4], [3, 5]]},)], ['map_tuple_list_of_list_of_ints']
)
df_complicated.createOrReplaceTempView('complicated')
df_udf = s.createDataFrame(
[('a', 1, 4.0, 'a'), ('b', 2, 5.0, 'a'), ('c', 3, 6.0, 'b')],
['a', 'b', 'c', 'key'],
)
df_udf.createOrReplaceTempView('udf')
df_udf_nan = s.createDataFrame(
pd.DataFrame(
{
'a': np.arange(10, dtype=float),
'b': [3.0, np.NaN] * 5,
'key': list('ddeefffggh'),
}
)
)
df_udf_nan.createOrReplaceTempView('udf_nan')
df_udf_null = s.createDataFrame(
[
(float(i), None if i % 2 else 3.0, 'ddeefffggh'[i])
for i in range(10)
],
['a', 'b', 'key'],
)
df_udf_null.createOrReplaceTempView('udf_null')
df_udf_random = s.createDataFrame(
pd.DataFrame(
{
'a': np.arange(4, dtype=float).tolist()
+ np.random.rand(3).tolist(),
'b': np.arange(4, dtype=float).tolist()
+ np.random.rand(3).tolist(),
'key': list('ddeefff'),
}
)
)
df_udf_random.createOrReplaceTempView('udf_random')
return _spark_testing_client
def get_pyspark_testing_client(data_directory):
global _pyspark_testing_client
if _pyspark_testing_client is None:
_pyspark_testing_client = get_common_spark_testing_client(
data_directory,
lambda session: ibis.backends.pyspark.Backend().connect(session),
)
return _pyspark_testing_client
class TestConf(BackendTest, RoundAwayFromZero):
supported_to_timestamp_units = {'s'}
@staticmethod
def connect(data_directory):
return get_pyspark_testing_client(data_directory)
@pytest.fixture(scope='session')
def client(data_directory):
client = get_pyspark_testing_client(data_directory)
df = client._session.range(0, 10)
df = df.withColumn("str_col", F.lit('value'))
df.createTempView('basic_table')
df_nans = client._session.createDataFrame(
[
[np.NaN, 'Alfred', None],
[27.0, 'Batman', 'motocycle'],
[3.0, None, 'joker'],
],
['age', 'user', 'toy'],
)
df_nans.createTempView('nan_table')
df_dates = client._session.createDataFrame(
[['2018-01-02'], ['2018-01-03'], ['2018-01-04']], ['date_str']
)
df_dates.createTempView('date_table')
df_arrays = client._session.createDataFrame(
[
['k1', [1, 2, 3], ['a']],
['k2', [4, 5], ['test1', 'test2', 'test3']],
['k3', [6], ['w', 'x', 'y', 'z']],
['k1', [], ['cat', 'dog']],
['k1', [7, 8], []],
],
['key', 'array_int', 'array_str'],
)
df_arrays.createTempView('array_table')
df_time_indexed = client._session.createDataFrame(
[
[datetime(2017, 1, 2, 5, tzinfo=timezone.utc), 1, 1.0],
[datetime(2017, 1, 2, 5, tzinfo=timezone.utc), 2, 2.0],
[datetime(2017, 1, 2, 6, tzinfo=timezone.utc), 1, 3.0],
[datetime(2017, 1, 2, 6, tzinfo=timezone.utc), 2, 4.0],
[datetime(2017, 1, 2, 7, tzinfo=timezone.utc), 1, 5.0],
[datetime(2017, 1, 2, 7, tzinfo=timezone.utc), 2, 6.0],
[datetime(2017, 1, 4, 8, tzinfo=timezone.utc), 1, 7.0],
[datetime(2017, 1, 4, 8, tzinfo=timezone.utc), 2, 8.0],
],
['time', 'key', 'value'],
)
df_time_indexed.createTempView('time_indexed_table')
return client
class IbisWindow:
# Test util class to generate different types of ibis windows
def __init__(self, windows):
self.windows = windows
def get_windows(self):
# Return a list of Ibis windows
return [
ibis.window(
preceding=w[0],
following=w[1],
order_by='time',
group_by='key',
)
for w in self.windows
]
@pytest.fixture
def ibis_windows(request):
return IbisWindow(request.param).get_windows()
def _random_identifier(suffix):
return '__ibis_test_{}_{}'.format(suffix, util.guid())
@pytest.fixture(scope='session', autouse=True)
def test_data_db(client):
try:
name = os.environ.get('IBIS_TEST_DATA_DB', 'ibis_testing')
client.create_database(name)
client.set_database(name)
yield name
finally:
client.drop_database(name, force=True)
@pytest.fixture
def temp_database(client, test_data_db):
name = _random_identifier('database')
client.create_database(name)
try:
yield name
finally:
client.set_database(test_data_db)
client.drop_database(name, force=True)
@pytest.fixture
def temp_table(client):
name = _random_identifier('table')
try:
yield name
finally:
assert client.exists_table(name), name
client.drop_table(name)
@pytest.fixture(scope='session')
def alltypes(client):
return client.table('functional_alltypes').relabel(
{'Unnamed: 0': 'Unnamed:0'}
)
@pytest.fixture(scope='session')
def tmp_dir():
return '/tmp/__ibis_test_{}'.format(util.guid())
@pytest.fixture
def temp_table_db(client, temp_database):
name = _random_identifier('table')
try:
yield temp_database, name
finally:
assert client.exists_table(name, database=temp_database), name
client.drop_table(name, database=temp_database)
@pytest.fixture
def temp_view(client):
name = _random_identifier('view')
try:
yield name
finally:
assert client.exists_table(name), name
client.drop_view(name)
|
the-stack_0_10423 | from abc import ABC, abstractmethod
from .io.chem import load_molecule, build_fp
from .io.backends import PyTablesStorageBackend
from .FPSim2lib.utils import PyPopcount
import numpy as np
class BaseEngine(ABC):
fp_filename = None
storage = None
def __init__(
self,
fp_filename: str,
storage_backend: str,
in_memory_fps: bool,
fps_sort: bool,
) -> None:
self.fp_filename = fp_filename
self.in_memory_fps = in_memory_fps
if storage_backend == "pytables":
self.storage = PyTablesStorageBackend(
fp_filename, in_memory_fps=in_memory_fps, fps_sort=fps_sort
)
@property
def fps(self):
if self.in_memory_fps:
return self.storage.fps
else:
raise Exception("FPs not loaded into memory.")
@property
def popcnt_bins(self):
return self.storage.popcnt_bins
@property
def fp_type(self):
return self.storage.fp_type
@property
def fp_params(self):
return self.storage.fp_params
@property
def rdkit_ver(self):
return self.storage.rdkit_ver
def load_query(self, query_string: str) -> np.ndarray:
"""Loads the query molecule from SMILES, molblock or InChI.
Parameters
----------
query_string : str
SMILES, InChi or molblock.
Returns
-------
query : numpy array
Numpy array query molecule.
"""
rdmol = load_molecule(query_string)
fp = build_fp(rdmol, self.fp_type, self.fp_params, 0)
return np.array(fp, dtype=np.uint64)
@abstractmethod
def similarity(
self, query_string: str, threshold: float, n_workers=1
) -> np.ndarray:
"""Tanimoto similarity search """
|
the-stack_0_10425 | from matrx.actions.action import Action, ActionResult
from matrx.objects.agent_body import AgentBody
def _act_move(grid_world, agent_id, dx, dy):
""" Private MATRX method.
The method that actually mutates the location of an AgentBody based on a
delta-x and delta-y.
Parameters
----------
grid_world : GridWorld
The GridWorld instance in which the agent resides whose location
should be updated.
agent_id : string
The unique identifier for the agent whose location should be changed.
dx : {-1, 0, 1}
The delta change on the x-coordinate.
dy : {-1, 0, 1}
The delta change on the y-coordinate.
Returns
-------
MoveActionResult
The result of the actual change of the location of an AgentBody.
Always returns a success.
"""
agent_avatar = grid_world.get_env_object(agent_id, obj_type=AgentBody)
loc = agent_avatar.location
new_loc = [loc[0] + dx, loc[1] + dy]
grid_world.registered_agents[agent_id].location = new_loc
return MoveActionResult(MoveActionResult.RESULT_SUCCESS, succeeded=True)
def _is_possible_movement(grid_world, agent_id, dx, dy):
""" Private MATRX method.
Wrapper around the check if a certain movement is possible.
Parameters
----------
grid_world : GridWorld
The GridWorld instance in which the agent resides whose location
should be updated.
agent_id : string
The unique identifier for the agent whose location should be changed.
dx : {-1, 0, 1}
The delta change on the x-coordinate.
dy : {-1, 0, 1}
The delta change on the y-coordinate.
Returns
-------
MoveActionResult
The expected result of performing this movement.
See Also
--------
possible_movement : The main method this method wraps.
"""
return _possible_movement(grid_world, agent_id, dx, dy)
def _possible_movement(grid_world, agent_id, dx, dy):
""" Private MATRX method.
Checks if the delta-x and delta-y change in the agent's location is
possible.
Parameters
----------
grid_world : GridWorld
The GridWorld instance in which the agent resides whose location should
be updated.
agent_id : string
The unique identifier for the agent whose location should be changed.
dx : {-1, 0, 1}
The delta change on the x-coordinate.
dy : {-1, 0, 1}
The delta change on the y-coordinate.
Returns
-------
MoveActionResult
Whether the MoveAction is expected to be possible.
Can return the following results (see also
:class:`matrx.actions.move_actions.MoveActionResult`):
* The ActionResult depicting the action's success or failure and reason
for that result.
* RESULT_SUCCESS: When the MoveAction is possible.
* RESULT_NO_MOVE: If the agent is already at the
location it wishes to move to.
* RESULT_OCCUPIED: When the new location is occupied
by an intraversable agent.
* RESULT_NOT_PASSABLE_OBJECT: When the new location is
occupied by an intraversable object.
* RESULT_OUT_OF_BOUNDS: When the new location is
outside the GridWorld's bounds.
"""
agent_avatar = grid_world.get_env_object(agent_id, obj_type=AgentBody)
assert agent_avatar is not None
loc = agent_avatar.location
new_loc = [loc[0] + dx, loc[1] + dy]
if 0 <= new_loc[0] < grid_world.shape[0] and 0 <= new_loc[1] < grid_world.shape[1]:
loc_obj_ids = grid_world.grid[new_loc[1], new_loc[0]]
if loc_obj_ids is None:
# there is nothing at that location
return MoveActionResult(MoveActionResult.RESULT_SUCCESS, succeeded=True)
else:
# Go through all objects at the desired locations
for loc_obj_id in loc_obj_ids:
# Check if loc_obj_id is the id of an agent
if loc_obj_id in grid_world.registered_agents.keys():
# get the actual agent
loc_obj = grid_world.registered_agents[loc_obj_id]
# Check if the agent that takes the move action is not that agent at that location (meaning that
# for some reason the move action has no effect. If this is the case, we send the appropriate
# result
if loc_obj_id == agent_id:
# The desired location contains a different agent and we cannot step at locations with agents
return MoveActionResult(MoveActionResult.RESULT_NO_MOVE, succeeded=False)
# Check if the agent on the other location (if not itself) is traverable. Otherwise we return that
# the location is occupied.
elif not loc_obj.is_traversable:
return MoveActionResult(MoveActionResult.RESULT_OCCUPIED, succeeded=False)
# If there are no agents at the desired location or we can move on top of other agents, we check if
# there are objects in the way that are not passable.
if loc_obj_id in grid_world.environment_objects.keys():
# get the actual object
loc_obj = grid_world.environment_objects[loc_obj_id]
# Check if the object is not passable, if this is not the case is_traversable is False
if not loc_obj.is_traversable:
# The desired location contains an object that is not passable
return MoveActionResult(MoveActionResult.RESULT_NOT_PASSABLE_OBJECT, succeeded=False)
# Either the desired location contains the agent at previous tick, and/or all objects there are passable
return MoveActionResult(MoveActionResult.RESULT_SUCCESS, succeeded=True)
else:
return MoveActionResult(MoveActionResult.RESULT_OUT_OF_BOUNDS, succeeded=False)
class MoveActionResult(ActionResult):
"""ActionResult for a Move action
The results uniquely for Move action are (as class constants):
* RESULT_SUCCESS: When the MoveAction is possible.
* RESULT_NO_MOVE: If the agent is already at the location it wishes to move
to.
* RESULT_OCCUPIED: When the new location is occupied by an intraversable
agent.
* RESULT_NOT_PASSABLE_OBJECT: When the new location is occupied by an
intraversable object.
* RESULT_OUT_OF_BOUNDS: When the new location is outside the GridWorld's
bounds.
Parameters
----------
result : str
A string representing the reason for a (expected) success
or fail of a :class:`matrx.actions.move_actions.Move`.
succeeded : bool
A boolean representing the (expected) success or fail of a
:class:`matrx.actions.move_actions.Move`.
See Also
--------
Move
"""
""" When the move action is success. """
RESULT_SUCCESS = 'Move action success'
""" When the agent is already at the location it tries to move to. """
RESULT_NO_MOVE = 'Move action resulted in a new location with the agent already present.'
""" When the move action would lead the agent outside the world bounds. """
RESULT_OUT_OF_BOUNDS = 'Move action out of bounds'
""" When the move action would lead the agent to a location occupied by
another agent. """
RESULT_OCCUPIED = 'Move action towards occupied space'
""" When the move action would lead the agent to a location occupied by
an intraversable object. """
RESULT_NOT_PASSABLE_OBJECT = 'Move action toward space which is not traversable by agent due object'
def __init__(self, result, succeeded):
super().__init__(result, succeeded)
class Move(Action):
""" The class wrapping all Move actions.
Parameters
----------
duration_in_ticks : int
Optional. Default: ``1``. Should be zero or larger.
The default duration of this action in ticks during which the
:class:`matrx.grid_world.GridWorld` blocks the agent performing other
actions. By default this is 1, meaning that all actions of this type will take
both the tick in which it was decided upon and the subsequent tick.
When the agent is blocked / busy with an action, only the
:meth:`matrx.agents.agent_brain.AgentBrain.filter_observations` method is called for that agent, and the
:meth:`matrx.agents.agent_brain.AgentBrain.decide_on_action` method is skipped.
This means that agents that are busy with an action can only perceive the world but not decide on
a new action untill the action has completed.
An agent can overwrite the duration of an action by returning the ``action_duration`` in the ``action_kwargs``
in the :meth:`matrx.agents.agent_brain.AgentBrain.decide_on_action` method, as so:
``return >action_name<, {'action_duration': >ticks<}``
Attributes
----------
dx : {-1, 0, 1}
The delta change on the x-coordinate.
dy : {-1, 0, 1}
The delta change on the y-coordinate.
See Also
--------
MoveNorth
MoveNorthEast
MoveEast
MoveSouthEast
MoveSouth
MoveSouthWest
MoveWest
MoveNorthWest
"""
def __init__(self, duration_in_ticks=0):
super().__init__(duration_in_ticks)
self.dx = 0
self.dy = 0
def is_possible(self, grid_world, agent_id, world_state, **kwargs):
""" Checks if the move is possible.
Checks for the following:
* If the agent is already at the location it wishes to move to.
* When the new location is occupied by an intraversable agent.
* When the new location is occupied by an intraversable object.
* When the new location is outside the GridWorld's bounds.
Parameters
----------
grid_world : GridWorld
The :class:`matrx.grid_world.GridWorld` instance in which the
agent resides whose location should be updated.
agent_id : str
The unique identifier for the agent whose location should be
changed.
world_state : State
The State object representing the entire world. Can be used to
simplify search of objects and properties when checking if an
action can be performed. Note that this is the State of the
entire world, not that of the agent performing the action.
**kwargs : dict
Not used.
Returns
-------
MoveActionResult
Whether the MoveAction is expected to be possible.
See :class:`matrx.actions.move_actions.MoveActionResult` for the
results it can contain.
"""
result = _is_possible_movement(grid_world, agent_id=agent_id, dx=self.dx, dy=self.dy)
return result
def mutate(self, grid_world, agent_id, world_state, **kwargs):
""" Mutates an agent's location
Changes an agent's location property based on the attributes `dx` and
`dy`.
Parameters
----------
grid_world : GridWorld
The :class:`matrx.grid_world.GridWorld` instance in which the
agent resides whose location should be updated.
world_state : State
The State object representing the entire world. Can be used to
simplify search of objects and properties when performing an
action. Note that this is the State of the entire world, not
that of the agent performing the action.
agent_id : str
The unique identifier for the agent whose location should be
changed.
Returns
-------
MoveActionResult
The result of the actual change of the location of an agent. Always
returns a success.
"""
return _act_move(grid_world, agent_id=agent_id, dx=self.dx, dy=self.dy)
class MoveNorth(Move):
""" Moves the agent North.
Inherits from :class:`matrx.actions.move_actions.Move` and sets the delta-x
and delta-y as follows:
* delta-x = 0
* delta-y = -1
See Also
--------
Move
"""
def __init__(self):
super().__init__()
self.dx = 0
self.dy = -1
class MoveNorthEast(Move):
""" Moves the agent North-East.
Inherits from :class:`matrx.actions.move_actions.Move` and sets the delta-x
and delta-y as follows:
* delta-x = 1
* delta-y = -1
See Also
--------
Move
"""
def __init__(self):
super().__init__()
self.dx = +1
self.dy = -1
class MoveEast(Move):
""" Moves the agent East.
Inherits from :class:`matrx.actions.move_actions.Move` and sets the delta-x
and delta-y as follows:
* delta-x = 1
* delta-y = 0
See Also
--------
Move
"""
def __init__(self):
super().__init__()
self.dx = +1
self.dy = 0
class MoveSouthEast(Move):
""" Moves the agent South-East.
Inherits from :class:`matrx.actions.move_actions.Move` and sets the delta-x
and delta-y as follows:
* delta-x = 1
* delta-y = 1
See Also
--------
Move
"""
def __init__(self):
super().__init__()
self.dx = +1
self.dy = +1
class MoveSouth(Move):
""" Moves the agent South.
Inherits from :class:`matrx.actions.move_actions.Move` and sets the delta-x
and delta-y as follows:
* delta-x = 0
* delta-y = 1
See Also
--------
Move
"""
def __init__(self):
super().__init__()
self.dx = 0
self.dy = +1
class MoveSouthWest(Move):
""" Moves the agent South-West.
Inherits from :class:`matrx.actions.move_actions.Move` and sets the delta-x
and delta-y as follows:
* delta-x = -1
* delta-y = 1
See Also
--------
Move
"""
def __init__(self):
super().__init__()
self.dx = -1
self.dy = +1
class MoveWest(Move):
""" Moves the agent West.
Inherits from :class:`matrx.actions.move_actions.Move` and sets the delta-x
and delta-y as follows:
* delta-x = -1
* delta-y = 0
See Also
--------
Move
"""
def __init__(self):
super().__init__()
self.dx = -1
self.dy = 0
class MoveNorthWest(Move):
""" Moves the agent North-West.
Inherits from :class:`matrx.actions.move_actions.Move` and sets the delta-x
and delta-y as follows:
* delta-x = -1
* delta-y = -1
See Also
--------
Move
"""
def __init__(self):
super().__init__()
self.dx = -1
self.dy = -1
|
the-stack_0_10426 | import torch
import numpy as np
import networkx as nx
class CenterObjective():
def __init__(self, dist, dmax, temp, hardmax=False):
'''
dist: (num customers) * (num locations) matrix
dmax: maximum distance that can be suffered by any customer (e.g., if
no facilities are chosen)
temp: how hard to make the softmax over customers
'''
self.dmax = dmax
dist, order = torch.sort(dist, dim=1)
self.order = order
dmax_vec = dmax*torch.ones(dist.shape[0], 1)
off_one = torch.cat((dist[:, 1:], dmax_vec), dim=1)
self.m = dist - off_one
self.temp = temp
self.hardmax = hardmax
def __call__(self, x):
'''
Evaluates E_S[softmax_{customers} min_{i \in S} dist(customer, i)] where
the expectation is over the set of facility locations S. Every
location is included in S independently with probability x_i.
'''
x_sort = x[self.order]
probs = 1 - torch.cumprod(1 - x_sort, dim=1)
vals = self.dmax + (self.m*probs).sum(dim=1)
if self.hardmax:
return vals.max()
weights = torch.softmax(self.temp*vals, dim=0)
return torch.dot(vals, weights)
def gonzalez_kcenter(dist, K):
'''
Algorithm of Gonzalez (1985) which iteratively selects the point furthest
from the current solution
Gonzalez, Teofilo F. (1985). "Clustering to minimize the maximum intercluster
distance". Theoretical Computer Science.
'''
S = [np.random.choice(list(range(dist.shape[1])))]
while len(S) < K:
dist_to_S = dist[:, S].min(dim = 1)[0]
S.append(dist_to_S.argmax().item())
x = torch.zeros(dist.shape[1])
x[S] = 1
return x
def greedy_kcenter(dist, dmax, K):
'''
Greedily add locations to minimize the kcenter objective
'''
obj = CenterObjective(dist, dmax, None, True)
x = torch.zeros(dist.shape[1])
currval = obj(x)
for _ in range(K):
best_i = 0
for i in range(dist.shape[1]):
if x[i] < 0.5:
x[i] = 1
obj_val = obj(x)
if obj_val < currval:
currval = obj_val
best_i = i
x[i] = 0
x[best_i] = 1
return x
def make_all_dists(bin_adj, dmax, use_weights=False):
g = nx.from_numpy_array(bin_adj.cpu().detach().numpy())
if not use_weights:
lengths = nx.shortest_path_length(g)
else:
lengths = nx.shortest_path_length(g, weight='weight')
dist = torch.zeros_like(bin_adj)
for u, lens_u in lengths:
for v in range(bin_adj.shape[0]):
if v in lens_u:
dist[u,v] = lens_u[v]
else:
dist[u,v] = dmax
return dist
def make_dists_igraph(adj):
import igraph
adj = adj.detach().numpy()
dense = np.random.rand(adj.shape[0], adj.shape[1])
e1 = dense.nonzero()[0]
e1 = e1.reshape(e1.shape[0], 1)
e2 = dense.nonzero()[1]
e2 = e2.reshape(e2.shape[0], 1)
stuff = np.concatenate((e1, e2), axis=1)
allstuff = np.concatenate((stuff, adj.flatten().reshape(stuff.shape[0], 1)), axis=1)
np.savetxt('tmp_twostage', allstuff, fmt = '%d %d %f')
g = igraph.Graph.Read_Ncol('tmp_twostage', weights=True, directed=True)
dists = g.shortest_paths(weights='weight')
dists = torch.tensor(np.array(dists))
return dists.float()
def rounding(x):
'''
Fast pipage rounding implementation for uniform matroid
'''
i = 0
j = 1
x = x.clone()
for t in range(len(x)-1):
if x[i] == 0 and x[j] == 0:
i = max((i,j)) + 1
elif x[i] + x[j] < 1:
if np.random.rand() < x[i]/(x[i] + x[j]):
x[i] = x[i] + x[j]
x[j] = 0
j = max((i,j)) + 1
else:
x[j] = x[i] + x[j]
x[i] = 0
i = max((i,j)) + 1
else:
if np.random.rand() < (1 - x[j])/(2 - x[i] - x[j]):
x[j] = x[i] + x[j] - 1
x[i] = 1
i = max((i,j)) + 1
else:
x[i] = x[i] + x[j] - 1
x[j] = 1
j = max((i,j)) + 1
return x
|
the-stack_0_10427 | import scipy.stats
import numpy as np
from math import ceil
from .. import img_as_float
from ..restoration._denoise_cy import _denoise_bilateral, _denoise_tv_bregman
from .._shared.utils import warn
import pywt
import skimage.color as color
import numbers
def denoise_bilateral(image, win_size=None, sigma_color=None, sigma_spatial=1,
bins=10000, mode='constant', cval=0, multichannel=False):
"""Denoise image using bilateral filter.
This is an edge-preserving, denoising filter. It averages pixels based on
their spatial closeness and radiometric similarity [1]_.
Spatial closeness is measured by the Gaussian function of the Euclidean
distance between two pixels and a certain standard deviation
(`sigma_spatial`).
Radiometric similarity is measured by the Gaussian function of the
Euclidean distance between two color values and a certain standard
deviation (`sigma_color`).
Parameters
----------
image : ndarray, shape (M, N[, 3])
Input image, 2D grayscale or RGB.
win_size : int
Window size for filtering.
If win_size is not specified, it is calculated as
``max(5, 2 * ceil(3 * sigma_spatial) + 1)``.
sigma_color : float
Standard deviation for grayvalue/color distance (radiometric
similarity). A larger value results in averaging of pixels with larger
radiometric differences. Note, that the image will be converted using
the `img_as_float` function and thus the standard deviation is in
respect to the range ``[0, 1]``. If the value is ``None`` the standard
deviation of the ``image`` will be used.
sigma_spatial : float
Standard deviation for range distance. A larger value results in
averaging of pixels with larger spatial differences.
bins : int
Number of discrete values for Gaussian weights of color filtering.
A larger value results in improved accuracy.
mode : {'constant', 'edge', 'symmetric', 'reflect', 'wrap'}
How to handle values outside the image borders. See
`numpy.pad` for detail.
cval : string
Used in conjunction with mode 'constant', the value outside
the image boundaries.
multichannel : bool
Whether the last axis of the image is to be interpreted as multiple
channels or another spatial dimension.
Returns
-------
denoised : ndarray
Denoised image.
References
----------
.. [1] http://users.soe.ucsc.edu/~manduchi/Papers/ICCV98.pdf
Examples
--------
>>> from skimage import data, img_as_float
>>> astro = img_as_float(data.astronaut())
>>> astro = astro[220:300, 220:320]
>>> noisy = astro + 0.6 * astro.std() * np.random.random(astro.shape)
>>> noisy = np.clip(noisy, 0, 1)
>>> denoised = denoise_bilateral(noisy, sigma_color=0.05, sigma_spatial=15,
... multichannel=True)
"""
if multichannel:
if image.ndim != 3:
if image.ndim == 2:
raise ValueError("Use ``multichannel=False`` for 2D grayscale "
"images. The last axis of the input image "
"must be multiple color channels not another "
"spatial dimension.")
else:
raise ValueError("Bilateral filter is only implemented for "
"2D grayscale images (image.ndim == 2) and "
"2D multichannel (image.ndim == 3) images, "
"but the input image has {0} dimensions. "
"".format(image.ndim))
elif image.shape[2] not in (3, 4):
if image.shape[2] > 4:
msg = ("The last axis of the input image is interpreted as "
"channels. Input image with shape {0} has {1} channels "
"in last axis. ``denoise_bilateral`` is implemented "
"for 2D grayscale and color images only")
warn(msg.format(image.shape, image.shape[2]))
else:
msg = "Input image must be grayscale, RGB, or RGBA; " \
"but has shape {0}."
warn(msg.format(image.shape))
else:
if image.ndim > 2:
raise ValueError("Bilateral filter is not implemented for "
"grayscale images of 3 or more dimensions, "
"but input image has {0} dimension. Use "
"``multichannel=True`` for 2-D RGB "
"images.".format(image.shape))
if win_size is None:
win_size = max(5, 2 * int(ceil(3 * sigma_spatial)) + 1)
return _denoise_bilateral(image, win_size, sigma_color, sigma_spatial,
bins, mode, cval)
def denoise_tv_bregman(image, weight, max_iter=100, eps=1e-3, isotropic=True):
"""Perform total-variation denoising using split-Bregman optimization.
Total-variation denoising (also know as total-variation regularization)
tries to find an image with less total-variation under the constraint
of being similar to the input image, which is controlled by the
regularization parameter ([1]_, [2]_, [3]_, [4]_).
Parameters
----------
image : ndarray
Input data to be denoised (converted using img_as_float`).
weight : float
Denoising weight. The smaller the `weight`, the more denoising (at
the expense of less similarity to the `input`). The regularization
parameter `lambda` is chosen as `2 * weight`.
eps : float, optional
Relative difference of the value of the cost function that determines
the stop criterion. The algorithm stops when::
SUM((u(n) - u(n-1))**2) < eps
max_iter : int, optional
Maximal number of iterations used for the optimization.
isotropic : boolean, optional
Switch between isotropic and anisotropic TV denoising.
Returns
-------
u : ndarray
Denoised image.
References
----------
.. [1] http://en.wikipedia.org/wiki/Total_variation_denoising
.. [2] Tom Goldstein and Stanley Osher, "The Split Bregman Method For L1
Regularized Problems",
ftp://ftp.math.ucla.edu/pub/camreport/cam08-29.pdf
.. [3] Pascal Getreuer, "Rudin–Osher–Fatemi Total Variation Denoising
using Split Bregman" in Image Processing On Line on 2012–05–19,
http://www.ipol.im/pub/art/2012/g-tvd/article_lr.pdf
.. [4] http://www.math.ucsb.edu/~cgarcia/UGProjects/BregmanAlgorithms_JacquelineBush.pdf
"""
return _denoise_tv_bregman(image, weight, max_iter, eps, isotropic)
def _denoise_tv_chambolle_nd(image, weight=0.1, eps=2.e-4, n_iter_max=200):
"""Perform total-variation denoising on n-dimensional images.
Parameters
----------
image : ndarray
n-D input data to be denoised.
weight : float, optional
Denoising weight. The greater `weight`, the more denoising (at
the expense of fidelity to `input`).
eps : float, optional
Relative difference of the value of the cost function that determines
the stop criterion. The algorithm stops when:
(E_(n-1) - E_n) < eps * E_0
n_iter_max : int, optional
Maximal number of iterations used for the optimization.
Returns
-------
out : ndarray
Denoised array of floats.
Notes
-----
Rudin, Osher and Fatemi algorithm.
"""
ndim = image.ndim
p = np.zeros((image.ndim, ) + image.shape, dtype=image.dtype)
g = np.zeros_like(p)
d = np.zeros_like(image)
i = 0
while i < n_iter_max:
if i > 0:
# d will be the (negative) divergence of p
d = -p.sum(0)
slices_d = [slice(None), ] * ndim
slices_p = [slice(None), ] * (ndim + 1)
for ax in range(ndim):
slices_d[ax] = slice(1, None)
slices_p[ax+1] = slice(0, -1)
slices_p[0] = ax
d[tuple(slices_d)] += p[tuple(slices_p)]
slices_d[ax] = slice(None)
slices_p[ax+1] = slice(None)
out = image + d
else:
out = image
E = (d ** 2).sum()
# g stores the gradients of out along each axis
# e.g. g[0] is the first order finite difference along axis 0
slices_g = [slice(None), ] * (ndim + 1)
for ax in range(ndim):
slices_g[ax+1] = slice(0, -1)
slices_g[0] = ax
g[tuple(slices_g)] = np.diff(out, axis=ax)
slices_g[ax+1] = slice(None)
norm = np.sqrt((g ** 2).sum(axis=0))[np.newaxis, ...]
E += weight * norm.sum()
tau = 1. / (2.*ndim)
norm *= tau / weight
norm += 1.
p -= tau * g
p /= norm
E /= float(image.size)
if i == 0:
E_init = E
E_previous = E
else:
if np.abs(E_previous - E) < eps * E_init:
break
else:
E_previous = E
i += 1
return out
def denoise_tv_chambolle(image, weight=0.1, eps=2.e-4, n_iter_max=200,
multichannel=False):
"""Perform total-variation denoising on n-dimensional images.
Parameters
----------
image : ndarray of ints, uints or floats
Input data to be denoised. `image` can be of any numeric type,
but it is cast into an ndarray of floats for the computation
of the denoised image.
weight : float, optional
Denoising weight. The greater `weight`, the more denoising (at
the expense of fidelity to `input`).
eps : float, optional
Relative difference of the value of the cost function that
determines the stop criterion. The algorithm stops when:
(E_(n-1) - E_n) < eps * E_0
n_iter_max : int, optional
Maximal number of iterations used for the optimization.
multichannel : bool, optional
Apply total-variation denoising separately for each channel. This
option should be true for color images, otherwise the denoising is
also applied in the channels dimension.
Returns
-------
out : ndarray
Denoised image.
Notes
-----
Make sure to set the multichannel parameter appropriately for color images.
The principle of total variation denoising is explained in
http://en.wikipedia.org/wiki/Total_variation_denoising
The principle of total variation denoising is to minimize the
total variation of the image, which can be roughly described as
the integral of the norm of the image gradient. Total variation
denoising tends to produce "cartoon-like" images, that is,
piecewise-constant images.
This code is an implementation of the algorithm of Rudin, Fatemi and Osher
that was proposed by Chambolle in [1]_.
References
----------
.. [1] A. Chambolle, An algorithm for total variation minimization and
applications, Journal of Mathematical Imaging and Vision,
Springer, 2004, 20, 89-97.
Examples
--------
2D example on astronaut image:
>>> from skimage import color, data
>>> img = color.rgb2gray(data.astronaut())[:50, :50]
>>> img += 0.5 * img.std() * np.random.randn(*img.shape)
>>> denoised_img = denoise_tv_chambolle(img, weight=60)
3D example on synthetic data:
>>> x, y, z = np.ogrid[0:20, 0:20, 0:20]
>>> mask = (x - 22)**2 + (y - 20)**2 + (z - 17)**2 < 8**2
>>> mask = mask.astype(np.float)
>>> mask += 0.2*np.random.randn(*mask.shape)
>>> res = denoise_tv_chambolle(mask, weight=100)
"""
im_type = image.dtype
if not im_type.kind == 'f':
image = img_as_float(image)
if multichannel:
out = np.zeros_like(image)
for c in range(image.shape[-1]):
out[..., c] = _denoise_tv_chambolle_nd(image[..., c], weight, eps,
n_iter_max)
else:
out = _denoise_tv_chambolle_nd(image, weight, eps, n_iter_max)
return out
def _bayes_thresh(details, var):
"""BayesShrink threshold for a zero-mean details coeff array."""
# Equivalent to: dvar = np.var(details) for 0-mean details array
dvar = np.mean(details*details)
eps = np.finfo(details.dtype).eps
thresh = var / np.sqrt(max(dvar - var, eps))
return thresh
def _universal_thresh(img, sigma):
""" Universal threshold used by the VisuShrink method """
return sigma*np.sqrt(2*np.log(img.size))
def _sigma_est_dwt(detail_coeffs, distribution='Gaussian'):
"""Calculate the robust median estimator of the noise standard deviation.
Parameters
----------
detail_coeffs : ndarray
The detail coefficients corresponding to the discrete wavelet
transform of an image.
distribution : str
The underlying noise distribution.
Returns
-------
sigma : float
The estimated noise standard deviation (see section 4.2 of [1]_).
References
----------
.. [1] D. L. Donoho and I. M. Johnstone. "Ideal spatial adaptation
by wavelet shrinkage." Biometrika 81.3 (1994): 425-455.
DOI:10.1093/biomet/81.3.425
"""
# Consider regions with detail coefficients exactly zero to be masked out
detail_coeffs = detail_coeffs[np.nonzero(detail_coeffs)]
if distribution.lower() == 'gaussian':
# 75th quantile of the underlying, symmetric noise distribution
denom = scipy.stats.norm.ppf(0.75)
sigma = np.median(np.abs(detail_coeffs)) / denom
else:
raise ValueError("Only Gaussian noise estimation is currently "
"supported")
return sigma
def _wavelet_threshold(image, wavelet, method=None, threshold=None,
sigma=None, mode='soft', wavelet_levels=None):
"""Perform wavelet thresholding.
Parameters
----------
image : ndarray (2d or 3d) of ints, uints or floats
Input data to be denoised. `image` can be of any numeric type,
but it is cast into an ndarray of floats for the computation
of the denoised image.
wavelet : string
The type of wavelet to perform. Can be any of the options
pywt.wavelist outputs. For example, this may be any of ``{db1, db2,
db3, db4, haar}``.
method : {'BayesShrink', 'VisuShrink'}, optional
Thresholding method to be used. The currently supported methods are
"BayesShrink" [1]_ and "VisuShrink" [2]_. If it is set to None, a
user-specified ``threshold`` must be supplied instead.
threshold : float, optional
The thresholding value to apply during wavelet coefficient
thresholding. The default value (None) uses the selected ``method`` to
estimate appropriate threshold(s) for noise removal.
sigma : float, optional
The standard deviation of the noise. The noise is estimated when sigma
is None (the default) by the method in [2]_.
mode : {'soft', 'hard'}, optional
An optional argument to choose the type of denoising performed. It
noted that choosing soft thresholding given additive noise finds the
best approximation of the original image.
wavelet_levels : int or None, optional
The number of wavelet decomposition levels to use. The default is
three less than the maximum number of possible decomposition levels
(see Notes below).
Returns
-------
out : ndarray
Denoised image.
References
----------
.. [1] Chang, S. Grace, Bin Yu, and Martin Vetterli. "Adaptive wavelet
thresholding for image denoising and compression." Image Processing,
IEEE Transactions on 9.9 (2000): 1532-1546.
DOI: 10.1109/83.862633
.. [2] D. L. Donoho and I. M. Johnstone. "Ideal spatial adaptation
by wavelet shrinkage." Biometrika 81.3 (1994): 425-455.
DOI: 10.1093/biomet/81.3.425
"""
wavelet = pywt.Wavelet(wavelet)
# original_extent is used to workaround PyWavelets issue #80
# odd-sized input results in an image with 1 extra sample after waverecn
original_extent = tuple(slice(s) for s in image.shape)
# Determine the number of wavelet decomposition levels
if wavelet_levels is None:
# Determine the maximum number of possible levels for image
dlen = wavelet.dec_len
wavelet_levels = np.min(
[pywt.dwt_max_level(s, dlen) for s in image.shape])
# Skip coarsest wavelet scales (see Notes in docstring).
wavelet_levels = max(wavelet_levels - 3, 1)
coeffs = pywt.wavedecn(image, wavelet=wavelet, level=wavelet_levels)
# Detail coefficients at each decomposition level
dcoeffs = coeffs[1:]
if sigma is None:
# Estimate the noise via the method in [2]_
detail_coeffs = dcoeffs[-1]['d' * image.ndim]
sigma = _sigma_est_dwt(detail_coeffs, distribution='Gaussian')
if method is not None and threshold is not None:
warn(("Thresholding method {} selected. The user-specified threshold "
"will be ignored.").format(method))
if threshold is None:
var = sigma**2
if method is None:
raise ValueError(
"If method is None, a threshold must be provided.")
elif method == "BayesShrink":
# The BayesShrink thresholds from [1]_ in docstring
threshold = [{key: _bayes_thresh(level[key], var) for key in level}
for level in dcoeffs]
elif method == "VisuShrink":
# The VisuShrink thresholds from [2]_ in docstring
threshold = _universal_thresh(image, sigma)
else:
raise ValueError("Unrecognized method: {}".format(method))
if np.isscalar(threshold):
# A single threshold for all coefficient arrays
denoised_detail = [{key: pywt.threshold(level[key],
value=threshold,
mode=mode) for key in level}
for level in dcoeffs]
else:
# Dict of unique threshold coefficients for each detail coeff. array
denoised_detail = [{key: pywt.threshold(level[key],
value=thresh[key],
mode=mode) for key in level}
for thresh, level in zip(threshold, dcoeffs)]
denoised_coeffs = [coeffs[0]] + denoised_detail
return pywt.waverecn(denoised_coeffs, wavelet)[original_extent]
def denoise_wavelet(image, sigma=None, wavelet='db1', mode='soft',
wavelet_levels=None, multichannel=False,
convert2ycbcr=False, method='BayesShrink'):
"""Perform wavelet denoising on an image.
Parameters
----------
image : ndarray ([M[, N[, ...P]][, C]) of ints, uints or floats
Input data to be denoised. `image` can be of any numeric type,
but it is cast into an ndarray of floats for the computation
of the denoised image.
sigma : float or list, optional
The noise standard deviation used when computing the wavelet detail
coefficient threshold(s). When None (default), the noise standard
deviation is estimated via the method in [2]_.
wavelet : string, optional
The type of wavelet to perform and can be any of the options
``pywt.wavelist`` outputs. The default is `'db1'`. For example,
``wavelet`` can be any of ``{'db2', 'haar', 'sym9'}`` and many more.
mode : {'soft', 'hard'}, optional
An optional argument to choose the type of denoising performed. It
noted that choosing soft thresholding given additive noise finds the
best approximation of the original image.
wavelet_levels : int or None, optional
The number of wavelet decomposition levels to use. The default is
three less than the maximum number of possible decomposition levels.
multichannel : bool, optional
Apply wavelet denoising separately for each channel (where channels
correspond to the final axis of the array).
convert2ycbcr : bool, optional
If True and multichannel True, do the wavelet denoising in the YCbCr
colorspace instead of the RGB color space. This typically results in
better performance for RGB images.
method : {'BayesShrink', 'VisuShrink'}, optional
Thresholding method to be used. The currently supported methods are
"BayesShrink" [1]_ and "VisuShrink" [2]_. Defaults to "BayesShrink".
Returns
-------
out : ndarray
Denoised image.
Notes
-----
The wavelet domain is a sparse representation of the image, and can be
thought of similarly to the frequency domain of the Fourier transform.
Sparse representations have most values zero or near-zero and truly random
noise is (usually) represented by many small values in the wavelet domain.
Setting all values below some threshold to 0 reduces the noise in the
image, but larger thresholds also decrease the detail present in the image.
If the input is 3D, this function performs wavelet denoising on each color
plane separately. The output image is clipped between either [-1, 1] and
[0, 1] depending on the input image range.
When YCbCr conversion is done, every color channel is scaled between 0
and 1, and `sigma` values are applied to these scaled color channels.
Many wavelet coefficient thresholding approaches have been proposed. By
default, ``denoise_wavelet`` applies BayesShrink, which is an adaptive
thresholding method that computes separate thresholds for each wavelet
sub-band as described in [1]_.
If ``method == "VisuShrink"``, a single "universal threshold" is applied to
all wavelet detail coefficients as described in [2]_. This threshold
is designed to remove all Gaussian noise at a given ``sigma`` with high
probability, but tends to produce images that appear overly smooth.
References
----------
.. [1] Chang, S. Grace, Bin Yu, and Martin Vetterli. "Adaptive wavelet
thresholding for image denoising and compression." Image Processing,
IEEE Transactions on 9.9 (2000): 1532-1546.
DOI: 10.1109/83.862633
.. [2] D. L. Donoho and I. M. Johnstone. "Ideal spatial adaptation
by wavelet shrinkage." Biometrika 81.3 (1994): 425-455.
DOI: 10.1093/biomet/81.3.425
Examples
--------
>>> from skimage import color, data
>>> img = img_as_float(data.astronaut())
>>> img = color.rgb2gray(img)
>>> img += 0.1 * np.random.randn(*img.shape)
>>> img = np.clip(img, 0, 1)
>>> denoised_img = denoise_wavelet(img, sigma=0.1)
"""
if method not in ["BayesShrink", "VisuShrink"]:
raise ValueError(
('Invalid method: {}. The currently supported methods are '
'"BayesShrink" and "VisuShrink"').format(method))
image = img_as_float(image)
if multichannel:
if isinstance(sigma, numbers.Number) or sigma is None:
sigma = [sigma] * image.shape[-1]
if multichannel:
if convert2ycbcr:
out = color.rgb2ycbcr(image)
for i in range(3):
# renormalizing this color channel to live in [0, 1]
min, max = out[..., i].min(), out[..., i].max()
channel = out[..., i] - min
channel /= max - min
out[..., i] = denoise_wavelet(channel, wavelet=wavelet,
method=method, sigma=sigma[i],
mode=mode,
wavelet_levels=wavelet_levels)
out[..., i] = out[..., i] * (max - min)
out[..., i] += min
out = color.ycbcr2rgb(out)
else:
out = np.empty_like(image)
for c in range(image.shape[-1]):
out[..., c] = _wavelet_threshold(image[..., c],
wavelet=wavelet,
method=method,
sigma=sigma[c], mode=mode,
wavelet_levels=wavelet_levels)
else:
out = _wavelet_threshold(image, wavelet=wavelet, method=method,
sigma=sigma, mode=mode,
wavelet_levels=wavelet_levels)
clip_range = (-1, 1) if image.min() < 0 else (0, 1)
return np.clip(out, *clip_range)
def estimate_sigma(image, average_sigmas=False, multichannel=False):
"""
Robust wavelet-based estimator of the (Gaussian) noise standard deviation.
Parameters
----------
image : ndarray
Image for which to estimate the noise standard deviation.
average_sigmas : bool, optional
If true, average the channel estimates of `sigma`. Otherwise return
a list of sigmas corresponding to each channel.
multichannel : bool
Estimate sigma separately for each channel.
Returns
-------
sigma : float or list
Estimated noise standard deviation(s). If `multichannel` is True and
`average_sigmas` is False, a separate noise estimate for each channel
is returned. Otherwise, the average of the individual channel
estimates is returned.
Notes
-----
This function assumes the noise follows a Gaussian distribution. The
estimation algorithm is based on the median absolute deviation of the
wavelet detail coefficients as described in section 4.2 of [1]_.
References
----------
.. [1] D. L. Donoho and I. M. Johnstone. "Ideal spatial adaptation
by wavelet shrinkage." Biometrika 81.3 (1994): 425-455.
DOI:10.1093/biomet/81.3.425
Examples
--------
>>> import skimage.data
>>> from skimage import img_as_float
>>> img = img_as_float(skimage.data.camera())
>>> sigma = 0.1
>>> img = img + sigma * np.random.standard_normal(img.shape)
>>> sigma_hat = estimate_sigma(img, multichannel=False)
"""
if multichannel:
nchannels = image.shape[-1]
sigmas = [estimate_sigma(
image[..., c], multichannel=False) for c in range(nchannels)]
if average_sigmas:
sigmas = np.mean(sigmas)
return sigmas
elif image.shape[-1] <= 4:
msg = ("image is size {0} on the last axis, but multichannel is "
"False. If this is a color image, please set multichannel "
"to True for proper noise estimation.")
warn(msg.format(image.shape[-1]))
coeffs = pywt.dwtn(image, wavelet='db2')
detail_coeffs = coeffs['d' * image.ndim]
return _sigma_est_dwt(detail_coeffs, distribution='Gaussian')
|
the-stack_0_10428 | from recsys.preprocess import *
from sklearn import model_selection
import numpy as np
from recsys.utility import *
RANDOM_STATE = 42
np.random.seed(RANDOM_STATE)
train = get_train()
target_playlist = get_target_playlists()
target_tracks = get_target_tracks()
# Uncomment if you want to test
# train, test, target_playlist, target_tracks = train_test_split(train, test_size=0.20)
most_popular = get_most_popular_tracks(train)
tracks_in_playlist = get_playlist_track_list2(train)
tracks_to_suggest = most_popular.index.values
predictions = []
predictions = pd.DataFrame(target_playlist)
predictions.index = target_playlist['playlist_id']
predictions['track_ids'] = [np.array([]) for i in range(len(predictions))]
for it,row in target_playlist.iterrows():
count = 0
i = 0
pred = []
while count < 5:
if tracks_to_suggest[i] not in tracks_in_playlist.loc[row['playlist_id']]['track_ids']:
# Predict track i
# IMPORTANT: should we check if the track to suggest is in target_tracks?
pred.append(tracks_to_suggest[i])
count += 1
i += 1
predictions.loc[row['playlist_id']] = predictions.loc[row['playlist_id']].set_value('track_ids', np.array(pred))
# To evaluate, just use:
# evaluate(recommendations=predictions, test=test)
# Make the dataframe friendly for output -> convert np.array in string
predictions['track_ids'] = predictions['track_ids'].apply(lambda x : ' '.join(map(str, x)))
predictions.to_csv('results.csv', index=False)
|
the-stack_0_10429 | """
Engines API
Allow clients to fetch Analytics through APIs. # noqa: E501
The version of the OpenAPI document: v3:[pa,spar,vault,pub,quant,fi,axp,afi,npo,bpm,fpo,others],v1:[fiab]
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from fds.analyticsapi.engines.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class QuantFormula(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('source',): {
'SCREENINGEXPRESSION': "ScreeningExpression",
'FQLEXPRESSION': "FqlExpression",
'UNIVERSALSCREENPARAMETER': "UniversalScreenParameter",
'ALLUNIVERSALSCREENPARAMETERS': "AllUniversalScreenParameters",
},
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'source': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'source': 'source', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, source, *args, **kwargs): # noqa: E501
"""QuantFormula - a model defined in OpenAPI
Args:
source (str):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.source = source
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
|
the-stack_0_10432 | import torch
from torch import nn
from torch.nn import functional as F
from models import infogan
class Encoder(nn.Module):
def __init__(self, latent_dim: int):
super().__init__()
self.h1_nchan = 64
self.conv1 = nn.Sequential(
nn.Conv2d(1, self.h1_nchan, kernel_size=4, stride=2, padding=1),
nn.LeakyReLU(.1, inplace=True)
)
self.h2_nchan = 128
self.conv2 = nn.Sequential(
nn.Conv2d(self.h1_nchan, self.h2_nchan, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(self.h2_nchan),
nn.LeakyReLU(.1, inplace=True)
)
self.h3_dim = 1024
self.fc1 = nn.Sequential(
nn.Linear(7 * 7 * self.h2_nchan, self.h3_dim),
nn.BatchNorm1d(self.h3_dim),
nn.LeakyReLU(.1, inplace=True)
)
self.fc2_mean = nn.Linear(self.h3_dim, latent_dim)
self.fc2_logvar = nn.Linear(self.h3_dim, latent_dim)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x).view(-1, 7 * 7 * self.h2_nchan)
x = self.fc1(x)
mean = self.fc2_mean(x)
logvar = self.fc2_logvar(x)
return mean, logvar
Decoder = infogan.Generator
def sample_noise(num, dim, device=None) -> torch.Tensor:
return torch.randn(num, dim, device=device)
class VAE(nn.Module):
def __init__(self, latent_dim: int):
super().__init__()
self.latent_dim = latent_dim
self.enc = Encoder(self.latent_dim)
self.dec = Decoder(self.latent_dim)
self.apply(_weights_init)
def sample_latent(self, num: int):
return sample_noise(num, self.latent_dim, self.device)
def sample_posterior(self, data, num: int = 1):
noise = torch.randn(data.shape[0], num, self.latent_dim, device=self.device)
mean, logvar = self.enc(data)
latent = mean.unsqueeze(1) + (.5 * logvar).exp().unsqueeze(1) * noise
def forward(self, data):
noise = self.sample_latent(data.shape[0])
mean, logvar = self.enc(data)
latent = mean + (.5 * logvar).exp() * noise
recon = self.dec(latent)
return mean, logvar, latent, recon
@property
def device(self):
return next(self.parameters()).device
def _weights_init(m):
classname = m.__class__.__name__
if 'Conv' in classname:
nn.init.xavier_uniform_(m.weight.data)
if m.bias is not None:
nn.init.constant_(m.bias.data, 0.)
elif 'BatchNorm' in classname:
nn.init.normal_(m.weight.data, 1.0, 0.02)
nn.init.constant_(m.bias.data, 0.)
class Trainer(nn.Module):
def __init__(self, model: VAE, beta: float = 1., lr: float = 1e-3):
super().__init__()
self.model = model
self.beta = beta
params = list(self.model.enc.parameters()) + list(self.model.dec.parameters())
self.opt = torch.optim.Adam(params, lr=lr, betas=(.5, .99))
def step(self, real_data, verbose: bool = False):
mean, logvar, latent, fake_data = self.model(real_data)
rec_loss = F.binary_cross_entropy(fake_data, (real_data > .5).float(), size_average=False)
# rec_loss = F.binary_cross_entropy(fake_data, real_data, size_average=False)
kl_div = -.5 * (1. + logvar - mean ** 2 - logvar.exp()).sum()
self.opt.zero_grad()
(rec_loss + self.beta * kl_div).backward()
self.opt.step()
if verbose:
print(f"rec_loss = {rec_loss.item():6g}, KL_div = {kl_div.item():6g}, ")
def forward(self, real_data, verbose: bool = False):
self.step(real_data, verbose)
|
the-stack_0_10433 | import gym
__all__ = ['SkipWrapper']
def SkipWrapper(repeat_count):
class SkipWrapper(gym.Wrapper):
"""
Generic common frame skipping wrapper
Will perform action for `x` additional steps
"""
def __init__(self, env):
super(SkipWrapper, self).__init__(env)
self.repeat_count = repeat_count
self.stepcount = 0
def _step(self, action):
done = False
total_reward = 0
current_step = 0
while current_step < (self.repeat_count + 1) and not done:
self.stepcount += 1
obs, reward, done, info = self.env.step(action)
total_reward += reward
current_step += 1
if 'skip.stepcount' in info:
raise gym.error.Error('Key "skip.stepcount" already in info. Make sure you are not stacking ' \
'the SkipWrapper wrappers.')
info['skip.stepcount'] = self.stepcount
return obs, total_reward, done, info
def _reset(self, **kwargs):
self.stepcount = 0
return self.env.reset(**kwargs)
return SkipWrapper
|
the-stack_0_10435 | #!/usr/bin/env python
# coding: utf-8
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from __future__ import unicode_literals
# Command line :
# python -m benchmark.VAR.GG.NN
import os
import logging
from config import SEED
from config import _ERROR
from config import _TRUTH
import numpy as np
import pandas as pd
from visual.misc import set_plot_config
set_plot_config()
from utils.log import set_logger
from utils.log import flush
from utils.log import print_line
from utils.model import get_model
from utils.model import get_optimizer
from utils.model import train_or_load_classifier
from utils.evaluation import evaluate_summary_computer
from utils.images import gather_images
from visual.misc import plot_params
from problem.gamma_gauss import GGConfig as Config
from problem.gamma_gauss import Generator
from problem.gamma_gauss import param_generator
from problem.gamma_gauss import GGNLL as NLLComputer
from model.neural_network import NeuralNetClassifier
from archi.classic import L4 as ARCHI
from ...my_argparser import NET_parse_args
DATA_NAME = 'GG'
BENCHMARK_NAME = 'VAR-'+DATA_NAME
N_ITER = 30
def build_model(args, i_cv):
args.net = ARCHI(n_in=1, n_out=2, n_unit=args.n_unit)
args.optimizer = get_optimizer(args)
model = get_model(args, NeuralNetClassifier)
model.set_info(DATA_NAME, BENCHMARK_NAME, i_cv)
return model
# =====================================================================
# MAIN
# =====================================================================
def main():
# BASIC SETUP
logger = set_logger()
args = NET_parse_args(main_description="Training launcher for INFERNO on GG benchmark")
logger.info(args)
flush(logger)
# INFO
model = build_model(args, -1)
os.makedirs(model.results_directory, exist_ok=True)
# RUN
logger.info(f'Running runs [{args.start_cv},{args.end_cv}[')
results = [run(args, i_cv) for i_cv in range(args.start_cv, args.end_cv)]
results = pd.concat(results, ignore_index=True)
# EVALUATION
results.to_csv(os.path.join(model.results_directory, 'threshold.csv'))
print(results)
print("DONE !")
def run(args, i_cv):
logger = logging.getLogger()
print_line()
logger.info('Running iter n°{}'.format(i_cv))
print_line()
# LOAD/GENERATE DATA
logger.info('Set up data generator')
config = Config()
seed = SEED + i_cv * 5
train_generator = Generator(seed)
valid_generator = Generator(seed+1)
test_generator = Generator(seed+2)
# SET MODEL
logger.info('Set up classifier')
model = build_model(args, i_cv)
os.makedirs(model.results_path, exist_ok=True)
flush(logger)
# TRAINING / LOADING
train_or_load_classifier(model, train_generator, config.CALIBRATED, config.N_TRAINING_SAMPLES, retrain=args.retrain)
# MEASUREMENT
result_row = {'i_cv': i_cv}
results = []
for test_config in config.iter_test_config():
logger.info(f"Running test set : {test_config.TRUE}, {test_config.N_TESTING_SAMPLES} samples")
for threshold in np.linspace(0, 1, 500):
result_row = {'i_cv': i_cv}
result_row['threshold'] = threshold
result_row.update(test_config.TRUE.to_dict(prefix='true_'))
result_row['n_test_samples'] = test_config.N_TESTING_SAMPLES
X, y, w = valid_generator.generate(*config.TRUE, n_samples=config.N_VALIDATION_SAMPLES)
proba = model.predict_proba(X)
decision = proba[:, 1]
selected = decision > threshold
beta = np.sum(y[selected] == 0)
gamma = np.sum(y[selected] == 1)
result_row['beta'] = beta
result_row['gamma'] = gamma
X, y, w = test_generator.generate(*config.TRUE, n_samples=config.N_VALIDATION_SAMPLES)
proba = model.predict_proba(X)
decision = proba[:, 1]
selected = decision > threshold
n_selected = np.sum(selected)
n_selected_bkg = np.sum(y[selected] == 0)
n_selected_sig = np.sum(y[selected] == 1)
result_row['n'] = n_selected
result_row['b'] = n_selected_bkg
result_row['s'] = n_selected_sig
result_row['s_sqrt_n'] = n_selected_sig / np.sqrt(n_selected)
result_row['s_sqrt_b'] = n_selected_sig / np.sqrt(n_selected)
results.append(result_row.copy())
results = pd.DataFrame(results)
print(results)
return results
if __name__ == '__main__':
main()
|
the-stack_0_10437 | import argparse
from tdw.controller import Controller
from tdw.remote_build_launcher import RemoteBuildLauncher
class MinimalRemote(Controller):
"""
A minimal example of how to use the launch binaries daemon to
start and connect to a build on a remote node. Note: the remote
must be running binary_manager.py.
"""
def __init__(self):
args = self.parse_args()
build_info = RemoteBuildLauncher.launch_build(args.listening_port,
args.build_address,
args.controller_address)
super().__init__(port=build_info["build_port"])
def parse_args(self):
"""
Helper function that parses command line arguments .
Returns parsed args.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--listening_port",
default="5556",
type=str,
help="Port on which binary_manager is listening",
)
parser.add_argument(
"--build_address",
default="node14-ccncluster.stanford.edu",
type=str,
help="IP/hostname on which to launch build",
)
parser.add_argument(
"--controller_address",
default="node05-ccncluster.stanford.edu",
type=str,
help="Address of controller",
)
args = parser.parse_args()
return args
def run(self):
# Create an empty environment.
self.communicate({"$type": "create_empty_environment"})
for i in range(100):
# Do nothing. Receive a response from the build.
resp = self.communicate([])
print(resp)
self.communicate({"$type": "terminate"})
if __name__ == "__main__":
MinimalRemote().run()
|
the-stack_0_10438 | # Copyright (C) 2019 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Test bulk issuetracker synchronization."""
# pylint: disable=too-many-lines,invalid-name
import unittest
from collections import OrderedDict
import ddt
import mock
from flask import g
from ggrc import settings
from ggrc import db
from ggrc import views
from ggrc.notifications import data_handlers
from ggrc.integrations import integrations_errors
from ggrc.integrations import issuetracker_bulk_sync
from ggrc.integrations import constants
from ggrc.integrations.synchronization_jobs import sync_utils
from ggrc.models import all_models, inflector
from ggrc.models.hooks.issue_tracker import issue_tracker_params_builder
from integration.ggrc import TestCase, generator
from integration.ggrc.api_helper import Api
from integration.ggrc.models import factories
class TestBulkIssuesSync(TestCase):
"""Base class for bulk issuetracker synchronization tests."""
def setUp(self):
"""Set up for test methods."""
super(TestBulkIssuesSync, self).setUp()
self.api = Api()
self.gen = generator.ObjectGenerator()
self.role_people = {
"Audit Captains": factories.PersonFactory(email="[email protected]"),
"Creators": factories.PersonFactory(email="[email protected]"),
"Assignees": factories.PersonFactory(email="[email protected]"),
"Verifiers": factories.PersonFactory(email="[email protected]"),
}
self.issue_id = "42"
def setup_assessments(self, asmnt_count, issue_id=None, enabled=True):
"""Create Audit with couple of Assessments and linked IssueTrackerIssues.
Args:
asmnt_count: Count of Assessments in Audit.
Returns:
Tuple with Audit id and list of Assessment ids.
"""
with factories.single_commit():
audit = factories.AuditFactory()
audit.add_person_with_role_name(
self.role_people["Audit Captains"],
"Audit Captains",
)
factories.IssueTrackerIssueFactory(
enabled=enabled,
issue_tracked_obj=audit,
issue_id=issue_id,
issue_type=constants.DEFAULT_ISSUETRACKER_VALUES['issue_type'],
component_id=12345,
hotlist_id=12345,
issue_priority="P2",
issue_severity="S2",
)
assessment_ids = []
for _ in range(asmnt_count):
asmnt = factories.AssessmentFactory(audit=audit)
factories.RelationshipFactory(source=audit, destination=asmnt)
for role_name in ["Creators", "Assignees", "Verifiers"]:
asmnt.add_person_with_role_name(
self.role_people[role_name],
role_name,
)
factories.IssueTrackerIssueFactory(
enabled=enabled,
issue_tracked_obj=asmnt,
issue_id=issue_id,
title=None,
)
assessment_ids.append(asmnt.id)
return audit.id, assessment_ids
@staticmethod
def setup_issues(issue_count, issue_id=None, enabled=True):
"""Create issues with enabled integration."""
with factories.single_commit():
issue_ids = []
for _ in range(issue_count):
issue = factories.IssueFactory()
factories.IssueTrackerIssueFactory(
enabled=enabled,
issue_tracked_obj=issue,
issue_id=issue_id,
title=None,
)
issue_ids.append(issue.id)
return issue_ids
def issuetracker_sync_mock(self, sync_func_name):
"""IssueTracker sync method mock."""
return mock.patch.object(
sync_utils,
sync_func_name,
return_value={"issueId": self.issue_id}
)
def generate_children_issues_for(self, parent_type, parent_id, child_type):
"""Generate IssueTracker issue for objects with provided type and ids.
Args:
obj_type: Type of objects. Now only 'Assessment' supported.
obj_ids: List with ids of objects.
Returns:
Response with result of issues generation.
"""
with self.issuetracker_sync_mock("create_issue"):
return self.api.send_request(
self.api.client.post,
api_link="/generate_children_issues",
data={
"parent": {"type": parent_type, "id": parent_id},
"child_type": child_type,
}
)
def generate_issues_for(self, object_info):
"""Generate IssueTracker issues for provided objects."""
with self.issuetracker_sync_mock("create_issue"):
return self.api.send_request(
self.api.client.post,
api_link="/generate_issues",
data={
"objects": [{
"type": type_,
"id": id_,
"hotlist_ids": hotlist_id,
"component_id": component_id,
} for type_, id_, hotlist_id, component_id in object_info],
}
)
def update_issues_for(self, object_info):
"""Update IssueTracker issues for provided objects."""
with self.issuetracker_sync_mock("update_issue"):
return self.api.send_request(
self.api.client.post,
api_link="/update_issues",
data={
"objects": [{
"type": type_,
"id": id_,
"hotlist_ids": hotlist_id,
"component_id": component_id,
} for type_, id_, hotlist_id, component_id in object_info],
}
)
def assert_obj_issues(self, issuetracker_info, assignee=None):
"""Check correctness of created IssueTracker issues."""
for type_, id_, hotlist_id, component_id in issuetracker_info:
obj = inflector.get_model(type_).query.get(id_)
issue = obj.issuetracker_issue
self.assertEqual(issue.enabled, 1)
self.assertEqual(issue.title, obj.title)
self.assertEqual(issue.component_id, component_id)
self.assertEqual(issue.hotlist_id, hotlist_id)
self.assertEqual(
issue.issue_type,
constants.DEFAULT_ISSUETRACKER_VALUES['issue_type']
)
self.assertEqual(issue.issue_priority, "P2")
self.assertEqual(issue.issue_severity, "S2")
self.assertEqual(issue.assignee, assignee)
self.assertEqual(issue.cc_list, "")
self.assertEqual(issue.issue_id, self.issue_id)
self.assertEqual(
issue.issue_url,
"http://issue/{}".format(self.issue_id)
)
def assert_children_asmnt_issues(self, asmnt_ids):
"""Check if Assessments IssueTracker issues inherit data from Audit."""
assessments = all_models.Assessment.query.filter(
all_models.Assessment.id.in_(asmnt_ids)
)
for asmnt in assessments:
issue = asmnt.issuetracker_issue
parent_issue = asmnt.audit.issuetracker_issue
self.assertEqual(issue.enabled, 1)
self.assertEqual(issue.title, asmnt.title)
self.assertEqual(issue.component_id, parent_issue.component_id)
self.assertEqual(issue.hotlist_id, parent_issue.hotlist_id)
self.assertEqual(issue.issue_type, parent_issue.issue_type)
self.assertEqual(issue.issue_priority, parent_issue.issue_priority)
self.assertEqual(issue.issue_severity, parent_issue.issue_severity)
self.assertEqual(issue.assignee, "[email protected]")
self.assertEqual(issue.cc_list, "")
self.assertEqual(issue.issue_id, self.issue_id)
self.assertEqual(
issue.issue_url,
"http://issue/{}".format(self.issue_id)
)
def assert_not_updated(self, object_type, object_ids):
"""Check if IssueTracker issues have empty fields.
Args:
object_type: Type of objects which issues should be checked.
object_ids: List with ids for objects which issues should be checked.
Raise:
AssertionError if relevant Issues have non-empty base fields.
"""
issues = all_models.IssuetrackerIssue.query.filter(
all_models.IssuetrackerIssue.object_type == object_type,
all_models.IssuetrackerIssue.object_id.in_(object_ids),
)
for issue in issues:
self.assertEqual(issue.issue_id, None)
self.assertEqual(issue.assignee, None)
self.assertEqual(issue.cc_list, "")
self.assertEqual(issue.title, None)
@ddt.ddt
class TestBulkIssuesGenerate(TestBulkIssuesSync):
"""Test bulk issues generation."""
@ddt.data("Assessment", "Issue")
def test_integration_disabled_on_bulk_create_error(self, model):
"""Test if {} integration was disabled if bulk creation failed"""
user = all_models.Person.query.first()
with factories.single_commit():
obj = factories.get_model_factory(model)(
modified_by=user
)
iti = factories.IssueTrackerIssueFactory(
issue_tracked_obj=obj,
enabled=True,
issue_id=None,
)
bulk_creator = issuetracker_bulk_sync.IssueTrackerBulkCreator()
objects = [issuetracker_bulk_sync.IssuetrackedObjInfo(obj)]
with mock.patch.object(bulk_creator, "sync_issue") as sync_mock:
sync_mock.side_effect = integrations_errors.HttpError("error")
bulk_creator.handle_issuetracker_sync(objects)
sync_mock.assert_called_once()
self.assertFalse(iti.enabled)
def test_get_objects_method_assmt(self):
"""Test get_issuetracked_objects() for not linked assessments."""
_, assessment_ids_enabled = self.setup_assessments(3)
_, assessment_ids_disabled = self.setup_assessments(2, enabled=False)
assessment_ids = assessment_ids_enabled + assessment_ids_disabled
creator = issuetracker_bulk_sync.IssueTrackerBulkCreator
result = creator.get_issuetracked_objects("Assessment", assessment_ids)
result_ids = [assmt.id for assmt in result]
self.assertEqual(set(assessment_ids_enabled), set(result_ids))
def test_get_objects_method_issue(self):
"""Test get_issuetracked_objects() for not linked issues."""
issue_ids_enabled = self.setup_issues(3)
issue_ids_disabled = self.setup_issues(2, enabled=False)
issue_ids = issue_ids_enabled + issue_ids_disabled
creator = issuetracker_bulk_sync.IssueTrackerBulkCreator
result = creator.get_issuetracked_objects("Issue", issue_ids)
result_ids = [issue.id for issue in result]
self.assertEqual(set(issue_ids_enabled), set(result_ids))
def test_issue_generate_call(self):
"""Test generate_issue call creates task for bulk generate."""
user = all_models.Person.query.filter_by(email="[email protected]").one()
setattr(g, '_current_user', user)
data = {
"revision_ids": [1, 2, 3],
}
result = views.background_update_issues(data)
self.assert200(result)
bg_task = all_models.BackgroundTask.query.one()
self.assertEqual(bg_task.status, "Success")
def test_asmnt_bulk_generate(self):
"""Test bulk generation of issues for Assessments."""
_, assessment_ids = self.setup_assessments(3)
asmnt_issuetracker_info = [
("Assessment", id_, "123", "321") for id_ in assessment_ids
]
response = self.generate_issues_for(asmnt_issuetracker_info)
self.assert200(response)
self.assertEqual(response.json.get("errors"), [])
self.assert_obj_issues(asmnt_issuetracker_info, "[email protected]")
@unittest.skip("Not implemented.")
def test_permission_check(self):
"""Test generation if user has rights on part of objects."""
_, assessment_ids = self.setup_assessments(3)
with_rights_ids = assessment_ids[:2]
without_rights_ids = assessment_ids[2:]
_, assignee_user = self.gen.generate_person(user_role="Creator")
with factories.single_commit():
for id_ in with_rights_ids:
assessment = all_models.Assessment.query.get(id_)
assessment.add_person_with_role_name(assignee_user, "Creators")
self.api.set_user(assignee_user)
asmnt_issuetracker_info = [
("Assessment", id_, "123", "321") for id_ in assessment_ids
]
response = self.generate_issues_for(asmnt_issuetracker_info)
self.assert200(response)
forbidden_err = "403 Forbidden: You don't have the permission to access " \
"the requested resource. It is either read-protected or " \
"not readable by the server."
expected_errors = [
["Assessment", id_, forbidden_err] for id_ in without_rights_ids
]
self.assertEqual(response.json.get("errors"), expected_errors)
with_rights_info = [
("Assessment", id_, "123", "321") for id_ in with_rights_ids
]
self.assert_obj_issues(with_rights_info, "[email protected]")
self.assert_not_updated("Assessment", without_rights_ids)
def test_issue_bulk_generate(self):
"""Test bulk generation of issuetracker issues for Issue."""
issue_ids = []
with factories.single_commit():
person = factories.PersonFactory()
person_email = person.email
for _ in range(3):
issue = factories.IssueFactory(modified_by=person)
for role_name in ["Admin", "Primary Contacts"]:
issue.add_person_with_role_name(person, role_name)
factories.IssueTrackerIssueFactory(
enabled=True,
issue_tracked_obj=issue,
issue_id=None,
title='',
component_id=12345,
hotlist_id=54321,
issue_priority="P2",
issue_severity="S2",
)
issue_ids.append(issue.id)
issue_issuetracker_info = [
("Issue", id_, None, None) for id_ in issue_ids
]
response = self.generate_issues_for(issue_issuetracker_info)
self.assert200(response)
self.assertEqual(response.json.get("errors"), [])
issues = all_models.IssuetrackerIssue.query.filter(
all_models.IssuetrackerIssue.object_type == "Issue",
all_models.IssuetrackerIssue.object_id.in_(issue_ids)
).all()
for issue in issues:
parent_obj = issue.Issue_issue_tracked
self.assertEqual(issue.enabled, 1)
self.assertEqual(issue.title, parent_obj.title)
self.assertEqual(issue.component_id, "12345")
self.assertEqual(issue.hotlist_id, "54321")
self.assertEqual(issue.issue_priority, "P2")
self.assertEqual(issue.issue_severity, "S2")
self.assertEqual(issue.assignee, person_email)
self.assertEqual(issue.cc_list, "")
self.assertEqual(issue.issue_id, self.issue_id)
self.assertEqual(
issue.issue_url,
"http://issue/{}".format(self.issue_id)
)
def test_rate_limited_generate(self):
"""Test tickets generation when issuetracker raise 429 error."""
_, assessment_ids = self.setup_assessments(3)
error = integrations_errors.HttpError(data="Test Error", status=429)
with mock.patch(
"ggrc.integrations.issues.Client.create_issue",
side_effect=error
) as create_issue_mock:
with mock.patch("time.sleep"):
response = self.api.send_request(
self.api.client.post,
api_link="/generate_issues",
data={
"objects": [{
"type": "Assessment",
"id": id_
} for id_ in assessment_ids],
}
)
self.assert200(response)
expected_errors = [
["Assessment", id_, "429 Test Error"]
for id_ in assessment_ids
]
self.assertEqual(response.json.get("errors"), expected_errors)
# 3 times for each assessment
self.assertEqual(create_issue_mock.call_count, 9)
def test_exception_notification(self):
"""Test notification about failed bulk update."""
filename = "test.csv"
updater = issuetracker_bulk_sync.IssueTrackerBulkUpdater()
with mock.patch("ggrc.notifications.common.send_email") as send_mock:
updater.send_notification(filename, "[email protected]", failed=True)
self.assertEqual(send_mock.call_count, 1)
(email, title, body), _ = send_mock.call_args_list[0]
self.assertEqual(title, updater.ISSUETRACKER_SYNC_TITLE)
self.assertEqual(email, "[email protected]")
self.assertIn(updater.ERROR_TITLE.format(filename=filename), body)
self.assertIn(updater.EXCEPTION_TEXT, body)
def test_succeeded_notification(self):
"""Test notification about succeeded bulk generation."""
creator = issuetracker_bulk_sync.IssueTrackerBulkCreator()
filename = "test_file.csv"
recipient = "[email protected]"
with mock.patch("ggrc.notifications.common.send_email") as send_mock:
creator.send_notification(filename, recipient)
self.assertEqual(send_mock.call_count, 1)
(email, title, body), _ = send_mock.call_args_list[0]
self.assertEqual(title, creator.ISSUETRACKER_SYNC_TITLE)
self.assertEqual(email, recipient)
self.assertIn(creator.SUCCESS_TITLE.format(filename=filename), body)
self.assertIn(creator.SUCCESS_TEXT, body)
def test_error_notification(self):
"""Test notification about bulk generation with errors"""
creator = issuetracker_bulk_sync.IssueTrackerBulkCreator()
filename = "test_file.csv"
recipient = "[email protected]"
assmt = factories.AssessmentFactory()
with mock.patch("ggrc.notifications.common.send_email") as send_mock:
creator.send_notification(filename, recipient, errors=[(assmt, "")])
self.assertEqual(send_mock.call_count, 1)
(email, title, body), _ = send_mock.call_args_list[0]
self.assertEqual(title, creator.ISSUETRACKER_SYNC_TITLE)
self.assertEqual(email, recipient)
self.assertIn(creator.ERROR_TITLE.format(filename=filename), body)
self.assertIn(assmt.slug, body)
self.assertIn(assmt.title, body)
self.assertIn(data_handlers.get_object_url(assmt), body)
@ddt.ddt
class TestBulkIssuesChildGenerate(TestBulkIssuesSync):
"""Test bulk issues generation for child objects."""
def test_get_objects_method_assmt(self):
"""Test get_issuetracked_objects() for linked assessments."""
_, assessment_ids_enabled = self.setup_assessments(3, issue_id=123)
_, assessment_ids_disabled = self.setup_assessments(2,
issue_id=123,
enabled=False)
assessment_ids = assessment_ids_enabled + assessment_ids_disabled
updater = issuetracker_bulk_sync.IssueTrackerBulkUpdater
result = updater.get_issuetracked_objects("Assessment", assessment_ids)
result_ids = [assmt.id for assmt in result]
self.assertEqual(set(assessment_ids_enabled), set(result_ids))
def test_get_objects_method_issue(self):
"""Test get_issuetracked_objects() for linked issues."""
issue_ids_enabled = self.setup_issues(3, issue_id=123)
issue_ids_disabled = self.setup_issues(2, issue_id=123, enabled=False)
issue_ids = issue_ids_enabled + issue_ids_disabled
updater = issuetracker_bulk_sync.IssueTrackerBulkUpdater
result = updater.get_issuetracked_objects("Issue", issue_ids)
result_ids = [issue.id for issue in result]
self.assertEqual(set(issue_ids_enabled), set(result_ids))
def test_asmnt_bulk_child_generate(self):
"""Test generation of issues for all Assessments in Audit."""
audit_id, assessment_ids = self.setup_assessments(3)
with mock.patch("ggrc.notifications.common.send_email"):
response = self.generate_children_issues_for(
"Audit", audit_id, "Assessment"
)
self.assert200(response)
self.assertEqual(response.json.get("errors"), [])
self.assert_children_asmnt_issues(assessment_ids)
def test_norights(self):
"""Test generation if user doesn't have rights on Audit and Assessment."""
audit_id, assessment_ids = self.setup_assessments(3)
_, side_user = self.gen.generate_person(user_role="Creator")
self.api.set_user(side_user)
response = self.generate_children_issues_for(
"Audit", audit_id, "Assessment"
)
self.assert200(response)
self.assert_not_updated("Assessment", assessment_ids)
def test_partially_rights(self):
"""Test generation if user has rights on part of Assessments."""
audit_id, assessment_ids = self.setup_assessments(3)
changed_asmnt_id = assessment_ids[0]
norights_asmnt_ids = assessment_ids[1:]
_, assignee_user = self.gen.generate_person(user_role="Creator")
audit_role = factories.AccessControlRoleFactory(
name="Edit Role",
object_type="Audit",
update=True
)
with factories.single_commit():
assessment = all_models.Assessment.query.get(changed_asmnt_id)
assessment.add_person_with_role_name(assignee_user, "Creators")
acl = factories.AccessControlListFactory(
object_id=audit_id,
object_type="Audit",
ac_role_id=audit_role.id,
)
factories.AccessControlPersonFactory(
person=assignee_user,
ac_list=acl,
)
self.api.set_user(assignee_user)
response = self.generate_children_issues_for(
"Audit", audit_id, "Assessment"
)
self.assert200(response)
self.assert_children_asmnt_issues([changed_asmnt_id])
self.assert_not_updated("Assessment", norights_asmnt_ids)
@ddt.data(
issuetracker_bulk_sync.WRONG_COMPONENT_ERR,
issuetracker_bulk_sync.WRONG_HOTLIST_ERR,
)
def test_invalid_component_id(self, error):
"""Test generation of issues if '{}' error raised."""
audit_id, assessment_ids = self.setup_assessments(3)
error = error.format("12345")
with mock.patch("ggrc.notifications.common.send_email"):
with mock.patch(
"ggrc.integrations.issues.Client.create_issue",
side_effect=integrations_errors.HttpError(error)
) as create_issue_mock:
response = self.api.send_request(
self.api.client.post,
api_link="/generate_children_issues",
data={
"parent": {"type": "Audit", "id": audit_id},
"child_type": "Assessment"
}
)
self.assert200(response)
self.assertEqual(
response.json.get("errors"),
[["Assessment", assessment_ids[0], "500 {}".format(error)]]
)
self.assertEqual(create_issue_mock.call_count, 1)
query = all_models.IssuetrackerIssue.query.filter(
all_models.IssuetrackerIssue.issue_id.isnot(None)
)
self.assertEqual(query.count(), 0)
def test_related_assessments(self):
"""Assessment with empty issuetracker_issue should be synced"""
with factories.single_commit():
audit = factories.AuditFactory()
factories.IssueTrackerIssueFactory(
issue_tracked_obj=audit,
issue_id=None,
component_id=12345,
hotlist_id=54321,
issue_priority="P2",
issue_severity="S2",
)
assess1 = factories.AssessmentFactory(audit=audit)
assess1_id = assess1.id
assess2 = factories.AssessmentFactory(audit=audit)
assess2_id = assess2.id
factories.IssueTrackerIssueFactory(
issue_tracked_obj=assess2,
issue_id=None,
component_id=9999,
hotlist_id=7777,
issue_priority="P1",
issue_severity="S1",
)
self.assertIsNone(assess1.issuetracker_issue)
with mock.patch("ggrc.notifications.common.send_email"):
response = self.generate_children_issues_for(
audit.type, audit.id, assess1.type
)
self.assert200(response)
self.assertEqual(response.json.get("errors"), [])
assess1 = all_models.Assessment.query.get(assess1_id)
self.assertIsNotNone(
assess1.issuetracker_issue,
"issuetracker_issue was not created for assessment {}".format(
assess1.id
)
)
self.assertEqual("12345", assess1.issuetracker_issue.component_id)
self.assertEqual("54321", assess1.issuetracker_issue.hotlist_id)
self.assertEqual("P2", assess1.issuetracker_issue.issue_priority)
self.assertEqual("S2", assess1.issuetracker_issue.issue_severity)
assess2 = all_models.Assessment.query.get(assess2_id)
self.assertEqual("9999", assess2.issuetracker_issue.component_id)
self.assertEqual("7777", assess2.issuetracker_issue.hotlist_id)
self.assertEqual("P1", assess2.issuetracker_issue.issue_priority)
self.assertEqual("S1", assess2.issuetracker_issue.issue_severity)
def test_bg_operation_status(self):
"""Test background operation status endpoint."""
audit_id, _ = self.setup_assessments(3)
response = self.generate_children_issues_for(
"Audit", audit_id, "Assessment"
)
self.assert200(response)
url = "background_task_status/{}/{}".format("audit", audit_id)
response = self.api.client.get(url)
self.assert200(response)
self.assertEqual(response.json.get("status"), "Success")
self.assertEqual(
response.json.get("operation"),
"generate_children_issues"
)
self.assertEqual(response.json.get("errors"), [])
def test_task_already_run_status(self):
"""Test if new task started when another is in progress."""
audit_id, _ = self.setup_assessments(1)
response = self.generate_children_issues_for(
"Audit", audit_id, "Assessment"
)
self.assert200(response)
db.session.query(all_models.BackgroundTask).update({"status": "Running"})
db.session.commit()
with factories.single_commit():
asmnt = factories.AssessmentFactory(audit_id=audit_id)
audit = all_models.Audit.query.get(audit_id)
factories.RelationshipFactory(source=audit, destination=asmnt)
factories.IssueTrackerIssueFactory(
issue_tracked_obj=asmnt,
issue_id=None,
title=None,
)
response = self.generate_children_issues_for(
"Audit", audit_id, "Assessment"
)
self.assert400(response)
self.assertEqual(
response.json["message"],
"Task 'generate_children_issues' already run for Audit {}.".format(
audit_id
)
)
url = "background_task_status/{}/{}".format("audit", audit_id)
response = self.api.client.get(url)
self.assert200(response)
self.assertEqual(response.json.get("status"), "Running")
self.assertEqual(
response.json.get("operation"),
"generate_children_issues"
)
self.assertEqual(response.json.get("errors"), [])
def test_task_failed_status(self):
"""Test background task status if it failed."""
audit_id, _ = self.setup_assessments(2)
with mock.patch(
"ggrc.integrations.issuetracker_bulk_sync."
"IssueTrackerBulkChildCreator.sync_issuetracker",
side_effect=Exception("Test Error")
):
response = self.generate_children_issues_for(
"Audit", audit_id, "Assessment"
)
self.assert200(response)
url = "background_task_status/{}/{}".format("audit", audit_id)
response = self.api.client.get(url)
self.assert200(response)
self.assertEqual(response.json.get("status"), "Failure")
self.assertEqual(
response.json.get("operation"),
"generate_children_issues"
)
self.assertEqual(response.json.get("errors"), [])
def test_errors_task_status(self):
"""Test background task status if it failed."""
audit_id, assessment_ids = self.setup_assessments(2)
with mock.patch(
"ggrc.integrations.issues.Client.create_issue",
side_effect=integrations_errors.HttpError("Test Error")
):
response = self.api.send_request(
self.api.client.post,
api_link="/generate_children_issues",
data={
"parent": {"type": "Audit", "id": audit_id},
"child_type": "Assessment"
}
)
self.assert200(response)
url = "background_task_status/{}/{}".format("audit", audit_id)
response = self.api.client.get(url)
self.assert200(response)
self.assertEqual(response.json.get("status"), "Success")
self.assertEqual(
response.json.get("errors"),
[["Assessment", id_, "500 Test Error"] for id_ in assessment_ids]
)
def test_child_err_notification(self):
"""Test notification about failed bulk child generation."""
audit_id, _ = self.setup_assessments(3)
_, side_user = self.gen.generate_person(user_role="Creator")
self.api.set_user(side_user)
with mock.patch("ggrc.notifications.common.send_email") as send_mock:
response = self.generate_children_issues_for(
"Audit", audit_id, "Assessment"
)
self.assert200(response)
self.assertEqual(send_mock.call_count, 1)
(email, title, body), _ = send_mock.call_args_list[0]
cur_user = all_models.Person.query.get(side_user.id)
child_creator = issuetracker_bulk_sync.IssueTrackerBulkChildCreator
self.assertEqual(email, cur_user.email)
self.assertEqual(title, child_creator.ISSUETRACKER_SYNC_TITLE)
self.assertIn("There were some errors in generating tickets", body)
def test_child_notification(self):
"""Test notification about succeeded bulk child generation."""
audit_id, _ = self.setup_assessments(3)
with mock.patch("ggrc.notifications.common.send_email") as send_mock:
response = self.generate_children_issues_for(
"Audit", audit_id, "Assessment"
)
self.assert200(response)
self.assertEqual(send_mock.call_count, 1)
(email, title, body), _ = send_mock.call_args_list[0]
child_creator = issuetracker_bulk_sync.IssueTrackerBulkChildCreator
self.assertEqual(email, "[email protected]")
self.assertEqual(title, child_creator.ISSUETRACKER_SYNC_TITLE)
title = all_models.Audit.query.get(audit_id).title
self.assertIn(
"Tickets generation for audit \"{}\" was completed".format(title),
body
)
def test_proper_revisions_creation(self):
"""Test all revisions are created for new IssuetrackerIssues"""
with factories.single_commit():
asmnt = factories.AssessmentFactory()
factories.IssueTrackerIssueFactory(issue_tracked_obj=asmnt.audit)
response = self.generate_children_issues_for(
"Audit", asmnt.audit.id, "Assessment"
)
self.assert200(response)
revisions = db.session.query(
all_models.Revision.action,
all_models.IssuetrackerIssue.object_type,
all_models.IssuetrackerIssue.object_id
).join(
all_models.IssuetrackerIssue,
all_models.Revision.resource_id == all_models.IssuetrackerIssue.id
).filter(
all_models.Revision.resource_type == 'IssuetrackerIssue',
all_models.IssuetrackerIssue.object_id.in_(
(asmnt.id, asmnt.audit.id)
)
).all()
expected_revisions = {
(u'created', u'Assessment', asmnt.id),
(u'modified', u'Assessment', asmnt.id),
(u'created', u'Audit', asmnt.audit.id)
}
self.assertEquals(set(revisions), expected_revisions)
@ddt.ddt
class TestBulkIssuesUpdate(TestBulkIssuesSync):
"""Test bulk issues update."""
def test_asmnt_bulk_update(self):
"""Test bulk update of issues for Assessments."""
_, assessment_ids = self.setup_assessments(3)
issues = all_models.IssuetrackerIssue.query.filter(
all_models.IssuetrackerIssue.object_type == "Assessment",
all_models.IssuetrackerIssue.object_id.in_(assessment_ids)
)
for issue in issues:
issue.enabled = 1
issue.title = ""
issue.component_id = "1"
issue.hotlist_id = "1"
issue.issue_type = constants.DEFAULT_ISSUETRACKER_VALUES['issue_type']
issue.issue_priority = "P2"
issue.issue_severity = "S2"
issue.assignee = "[email protected]"
issue.cc_list = ""
issue.issue_id = 123
issue.issue_url = "http://issue/{}".format(self.issue_id)
db.session.commit()
asmnt_issuetracker_info = [
("Assessment", id_, "123", "321") for id_ in assessment_ids
]
response = self.update_issues_for(asmnt_issuetracker_info)
self.assert200(response)
self.assertEqual(response.json.get("errors"), [])
self.assert_obj_issues(asmnt_issuetracker_info)
def test_issue_bulk_generate(self):
"""Test bulk update of issues for Issues."""
issue_ids = []
with factories.single_commit():
for _ in range(3):
issue = factories.IssueFactory()
factories.IssueTrackerIssueFactory(
enabled=True,
issue_tracked_obj=issue,
issue_id=self.issue_id,
title="",
component_id=12345,
hotlist_id=54321,
issue_priority="P2",
issue_severity="S2",
)
issue_ids.append(issue.id)
with factories.single_commit():
person = factories.PersonFactory()
for issue in all_models.Issue.query.all():
issue.modified_by = person
for role_name in ["Admin", "Primary Contacts"]:
issue.add_person_with_role_name(person, role_name)
# Verify that IssueTracker issues hasn't updated data
issues = all_models.IssuetrackerIssue.query.filter(
all_models.IssuetrackerIssue.object_type == "Issue",
all_models.IssuetrackerIssue.object_id.in_(issue_ids)
).all()
for issue in issues:
parent_obj = issue.Issue_issue_tracked
self.assertNotEqual(issue.title, parent_obj.title)
self.assertEqual(issue.assignee, None)
issue_issuetracker_info = [
("Issue", id_, None, None) for id_ in issue_ids
]
response = self.update_issues_for(issue_issuetracker_info)
self.assert200(response)
self.assertEqual(response.json.get("errors"), [])
# IssueTracker issues should be updated with proper values
issues = all_models.IssuetrackerIssue.query.filter(
all_models.IssuetrackerIssue.object_type == "Issue",
all_models.IssuetrackerIssue.object_id.in_(issue_ids)
).all()
for issue in issues:
parent_obj = issue.Issue_issue_tracked
self.assertEqual(issue.title, parent_obj.title)
self.assertEqual(issue.cc_list, "")
def test_rate_limited_update(self):
"""Test tickets update when issuetracker raise 429 error."""
_, assessment_ids = self.setup_assessments(3)
for issue in all_models.IssuetrackerIssue.query.all():
issue.issue_id = self.issue_id
db.session.commit()
error = integrations_errors.HttpError(data="Test Error", status=429)
with mock.patch(
"ggrc.integrations.issues.Client.update_issue",
side_effect=error
) as update_issue_mock:
with mock.patch("time.sleep"):
response = self.api.send_request(
self.api.client.post,
api_link="/update_issues",
data={
"objects": [{
"type": "Assessment",
"id": id_
} for id_ in assessment_ids],
}
)
self.assert200(response)
expected_errors = [
["Assessment", id_, "429 Test Error"]
for id_ in assessment_ids
]
self.assertEqual(response.json.get("errors"), expected_errors)
# 3 times for each assessment
self.assertEqual(update_issue_mock.call_count, 9)
@ddt.data("Issue", "Assessment")
def test_get_issue_json(self, model):
"""Test get_issue_json method issue's update"""
with factories.single_commit():
factory = factories.get_model_factory(model)
obj = factory()
factories.IssueTrackerIssueFactory(
enabled=True,
issue_tracked_obj=obj,
title='title',
component_id=111,
hotlist_id=222,
issue_type="PROCESS",
issue_priority="P2",
issue_severity="S2",
)
expected_result = {
'component_id': 111,
'severity': u'S2',
'title': u'title',
'hotlist_ids': [222],
'priority': u'P2',
'type': u'PROCESS'
}
updater = issuetracker_bulk_sync.IssueTrackerBulkUpdater()
# pylint: disable=protected-access
result = updater._get_issue_json(obj)
self.assertEqual(expected_result, result)
@ddt.ddt
class TestBulkCommentUpdate(TestBulkIssuesSync):
"""Test adding comments to IssueTracker issues via bulk."""
@ddt.data(
("Issue", ["c1", "c2", "c3"]),
("Assessment", ["c1", "c2", "c3"]),
)
@ddt.unpack
@mock.patch("ggrc.integrations.issues.Client.update_issue")
def test_comment_bulk_update(self, model, comments, update_mock):
"""Test bulk comment's update requests are sent correctly"""
with factories.single_commit():
factory = factories.get_model_factory(model)
obj = factory()
factories.IssueTrackerIssueFactory(
enabled=True,
issue_tracked_obj=obj,
issue_id=123,
)
request_data = {
"comments": [
{"type": obj.type, "id": obj.id, "comment_description": comment}
for comment in comments
],
"mail_data": {"user_email": "[email protected]"},
}
updater = issuetracker_bulk_sync.IssueTrackerCommentUpdater()
result = updater.sync_issuetracker(request_data)
builder = issue_tracker_params_builder.IssueParamsBuilder
template = builder.COMMENT_TMPL
url_builder = builder.get_ggrc_object_url
self.assert200(result)
# pylint: disable=consider-using-enumerate
for i in range(len(comments)):
self.assertEqual(update_mock.call_args_list[i][0][0], 123)
self.assertEqual(
update_mock.call_args_list[i][0][1]["comment"],
template.format(author="[email protected]",
model=model,
comment=comments[i],
link=url_builder(obj))
)
@ddt.data("Issue", "Assessment")
@mock.patch.object(settings, "ISSUE_TRACKER_ENABLED", True)
def test_comment_update_call(self,
model):
"""Test bulk update calls appropriate methods"""
with factories.single_commit():
factory = factories.get_model_factory(model)
obj = factory()
factories.IssueTrackerIssueFactory(
enabled=True,
issue_tracked_obj=obj,
issue_id=123,
)
comments = "c1;;c2;;c3"
with mock.patch.object(issuetracker_bulk_sync.IssueTrackerCommentUpdater,
"sync_issuetracker",
return_value=([], [])) as comment_mock:
with mock.patch.object(issuetracker_bulk_sync.IssueTrackerBulkCreator,
"sync_issuetracker",
return_value=([], [])) as create_mock:
with mock.patch.object(issuetracker_bulk_sync.IssueTrackerBulkCreator,
"sync_issuetracker",
return_value=([], [])) as upd_mock:
response = self.import_data(OrderedDict([
("object_type", model),
("Code*", obj.slug),
("Comments", comments),
]))
expected_comments = [
{'comment_description': comment, 'type': model, 'id': obj.id}
for comment in comments.split(";;")
]
self._check_csv_response(response, {})
self.assertEqual(comment_mock.call_args[0][0]["comments"],
expected_comments)
upd_mock.assert_called_once()
create_mock.assert_not_called()
|
the-stack_0_10439 | import time
from collections import OrderedDict
import json
import os
import re
import praw
from dotenv import load_dotenv, find_dotenv
import requests
load_dotenv(find_dotenv())
client_id=os.environ['REDDIT_CLIENT_ID']
client_secret=os.environ['REDDIT_CLIENT_SECRET']
password=os.environ['REDDIT_PASSWORD']
username=os.environ['REDDIT_USERNAME']
subreddit=os.environ['SUBREDDIT']
user_agent='user-agent for /u/PhDComicsBot'
if __name__ == "__main__":
reddit = praw.Reddit(client_id=client_id,
client_secret=client_secret,
password=password,
username=username,
user_agent=user_agent)
while True:
print("Collecting all comics")
r = requests.get("http://phdcomics.com/comics/archive_list.php")
# Save for debugging if something goes wrong
with open('index.html', 'w') as outfile:
outfile.write(r.text)
comic_dict = OrderedDict()
# We have no idea!
regex = r'href\=.*?comicid=(\d+)>\s*<b>(.*?)</b>.*?<font.*?>(.*?)</font>'
BASE_URL = 'http://www.phdcomics.com/comics/archive.php?comicid='
# Some have newlines so use re.S to enable dot to match multilines
items = re.findall(regex, r.text, re.S)
for comic_id, date, title in items:
comic_url = BASE_URL + comic_id
comic_dict[comic_id] = {'id': comic_id, 'link': comic_url, 'date': date, 'title': title}
print("Saving it to data.json")
with open('data.json', 'w') as outfile:
json.dump(comic_dict, outfile)
last_comic_id = 0
try:
with open('last_comic.txt', 'r') as infile:
last_comic_id = int(infile.read())
except ValueError:
print("File is empty. Something wrong happened. Better exit the program")
exit(1)
except FileNotFoundError:
print("File not found so this must be the first run")
for comic_id in comic_dict:
if int(comic_id) <= last_comic_id:
continue
date = comic_dict[comic_id]['date']
title = comic_dict[comic_id]['title']
title = "{0} ({1})".format(title, date)
comic_url = BASE_URL + comic_id
print("Submitting {0} with title '{1}'".format(comic_url, title))
reddit.subreddit(subreddit).submit(title, url=comic_url)
print("Saving the latest comic id : {}".format(comic_id))
with open('last_comic.txt', 'w') as outfile:
outfile.write(comic_id)
break
time.sleep(24 * 60 * 60) # Sleep for a day
|
the-stack_0_10440 | from flask import Flask, request
import requests
import geopy
import re
# import geopy.distance
from geopy.geocoders import Nominatim
import json
from datetime import datetime
import constants
from twilio.twiml.messaging_response import MessagingResponse
# Create Flask app instance
app = Flask(__name__)
# Create geolocator object as an instance of geopy's Nominatim class
geolocator = Nominatim(user_agent="covid-bot", timeout=5)
# Base API URL
base_url = 'https://cdn-api.co-vin.in/api'
# The root endpoint
@app.route("/")
def hello():
return "Hello, World!"
# The /bot webhook endpoint
@app.route('/bot', methods=['POST'])
def bot():
# Get the incoming message request data
incoming_values = request.values
print("Incoming Values:\n", incoming_values)
# Get Geolocation sent by user
latitude = incoming_values.get('Latitude', '')
longitude = incoming_values.get('Longitude', '')
# Geopy geolocator API expects coordinates as a single comma separated string of latitude and longitude
geo_coordinates_string = ", ".join((latitude, longitude))
# Get the incoming message from incoming_values
incoming_msg = incoming_values.get('Body', '').lower()
if incoming_msg in constants.greeting_tokens:
# Return greeting message
return as_twilio_response(constants.welcome_message)
if 'help' in incoming_msg:
# Return help message
return as_twilio_response(constants.help_message)
if latitude:
geo_location_dict = get_reverse_geocode(geo_coordinates_string)
date_now = datetime.today().strftime('%d-%m-%Y')
# Get the location wise response
location_response = get_location_response(geo_location_dict, date_now)
return as_twilio_response(location_response)
m = re.match(r"^\d+$", incoming_msg)
if m:
date_now = datetime.today().strftime('%d-%m-%Y')
return as_twilio_response(get_location_response_by_pincode(m.string, date_now))
return as_twilio_response('Could not understand your message. Please type "help".')
# Helper functions
def as_twilio_response(message: str) -> str:
resp = MessagingResponse()
msg = resp.message()
msg.body(message)
return str(resp)
def get_response(url):
response = requests.get(url, headers={'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:88.0) Gecko/20100101 Firefox/88.0'})
return response.json()
# Get the address dict
def get_reverse_geocode(coordinates):
location = geolocator.reverse(coordinates, exactly_one=True)
address_dict = location.raw['address']
print("Address Dict:", address_dict)
return address_dict
def get_location_response_by_pincode(pincode, date_now):
appointment_api_by_pin = base_url + '/v2/appointment/sessions/public/findByPin?pincode={pincode}&date={date_now}'.format(pincode=pincode, date_now=date_now)
appointment_data = get_response(appointment_api_by_pin)
appointment_response = f'''
'''
sessions = appointment_data.get("sessions", [])
if sessions:
for idx, each in enumerate(sessions):
serial_number = idx + 1
name = each.get("name", "")
address = each.get("address", "")
district = each.get("district_name", "")
from_time = each.get("from", "")
to_time = each.get("to", "")
fee_type = each.get("fee_type", "")
fee = each.get("fee", 0)
available_capacity = each.get("available_capacity", 0)
min_age_limit = each.get("min_age_limit", 18)
vaccine = each.get("vaccine", "")
each_response = f'''
{serial_number}. {name}
{address}, {district}
Vaccine: {vaccine}, {fee_type}
Available: {available_capacity}
'''
appointment_response += each_response
else:
appointment_response = "0"
location_message = f'''
Your location pincode is {pincode}.
Available vaccine slots today: {appointment_response}
Visit www.cowin.gov.in to book your vaccination
'''
return location_message
def get_location_response(geo_location_dict, date_now):
pincode = geo_location_dict.get('postcode', '')
return get_location_response_by_pincode(pincode, date_now)
if __name__ == '__main__':
app.run()
|
the-stack_0_10442 | import unittest
from fds.analyticsapi.engines.api.linked_pa_templates_api import LinkedPATemplatesApi
from fds.analyticsapi.engines.model.linked_pa_template_parameters_root import LinkedPATemplateParametersRoot
from fds.analyticsapi.engines.model.linked_pa_template_parameters import LinkedPATemplateParameters
from fds.analyticsapi.engines.model.template_content_types import TemplateContentTypes
from fds.analyticsapi.engines.model.linked_pa_template_summary import LinkedPATemplateSummary
from fds.analyticsapi.engines.model.linked_pa_template_update_parameters import LinkedPATemplateUpdateParameters
from fds.analyticsapi.engines.model.linked_pa_template_update_parameters_root import LinkedPATemplateUpdateParametersRoot
from fds.analyticsapi.engines.model.linked_pa_template_root import LinkedPATemplateRoot
from fds.analyticsapi.engines.model.linked_pa_template import LinkedPATemplate
from common_functions import CommonFunctions
class TestLinkedPaTemplatesApi(unittest.TestCase):
def setUp(self):
self.linked_pa_templates_api = LinkedPATemplatesApi(CommonFunctions.build_api_client())
def test_a_create_linked_pa_template(self):
linked_pa_template_parameters = LinkedPATemplateParameters(
directory="Personal:SDKTests/DoNotModify/LinkedPATemplates/",
parent_component_id="801B800245E468A52AEBEC4BE31CFF5AF82F371DAEF5F158AC2E98C2FA324B46",
description="This is a linked PA template that only returns security level data",
content = TemplateContentTypes(
mandatory = ["accounts", "benchmarks"],
optional = ["groups", "columns"],
locked = ["componentdetail"]
)
)
linked_pa_template_parameters_root = LinkedPATemplateParametersRoot(
data = linked_pa_template_parameters
)
response = self.linked_pa_templates_api.create_linked_pa_templates(
linked_pa_template_parameters_root = linked_pa_template_parameters_root)
firsttemplate = list(response[0].data.keys())[0]
self.assertEqual(response[1], 201, "Response should be 201 - Success")
self.assertEqual(type(response[0].data), dict, "Response should be of Dictionary type.")
self.assertEqual(type(response[0].data[firsttemplate]),
LinkedPATemplateSummary, "Response should be of LinkedPATemplateSummary type.")
self.assertGreater(len(response[0].data), 0, "Response result should not be an empty list.")
def test_b_get_all_linked_pa_templates(self):
response = self.linked_pa_templates_api.get_linked_pa_templates(
directory = "Personal:SDKTests/DoNotModify/LinkedPATemplates/"
)
firsttemplate = list(response[0].data.keys())[0]
self.assertEqual(response[1], 200, "Response should be 200 - Success")
self.assertEqual(type(response[0].data), dict, "Response should be of Dictionary type.")
self.assertEqual(type(response[0].data[firsttemplate]),
LinkedPATemplateSummary, "Response should be of LinkedPATemplateSummary type.")
self.assertGreater(len(response[0].data), 0, "Response result should not be an empty list.")
def test_c_update_linked_pa_template(self):
templates = self.linked_pa_templates_api.get_linked_pa_templates(
directory = "Personal:SDKTests/DoNotModify/LinkedPATemplates/"
)
template_id = list(templates[0].data.keys())[0]
linked_pa_template_update_parameters = LinkedPATemplateUpdateParameters(
parent_component_id="801B800245E468A52AEBEC4BE31CFF5AF82F371DAEF5F158AC2E98C2FA324B46",
description="This is an updated linked PA template that only returns security level data",
content = TemplateContentTypes(
mandatory = ["accounts", "benchmarks"],
optional = ["groups", "columns"],
locked = ["componentdetail"]
)
)
linked_pa_template_update_parameters_root = LinkedPATemplateUpdateParametersRoot(
data = linked_pa_template_update_parameters
)
response = self.linked_pa_templates_api.update_linked_pa_templates(
id = template_id, linked_pa_template_update_parameters_root=linked_pa_template_update_parameters_root
)
self.assertEqual(response[1], 200, "Response should be 200 - Success")
self.assertEqual(type(response[0].data), dict, "Response should be of Dictionary type.")
self.assertEqual(type(response[0].data[template_id]),
LinkedPATemplateSummary, "Response should be of LinkedPATemplateSummary type.")
self.assertGreater(len(response[0].data), 0, "Response result should not be an empty list.")
def test_d_get_linked_pa_template_by_id(self):
templates = self.linked_pa_templates_api.get_linked_pa_templates(
directory = "Personal:SDKTests/DoNotModify/LinkedPATemplates/"
)
template_id = list(templates[0].data.keys())[0]
response = self.linked_pa_templates_api.get_linked_pa_templates_by_id(
id = template_id
)
self.assertEqual(response[1], 200, "Response should be 200 - Success")
self.assertEqual(type(response[0]), LinkedPATemplateRoot, "Response should be of LinkedPATemplateRoot type.")
self.assertEqual(type(response[0].data),
LinkedPATemplate, "Response should be of LinkedPATemplate type.")
def test_e_delete_linked_pa_template(self):
templates = self.linked_pa_templates_api.get_linked_pa_templates(
directory = "Personal:SDKTests/DoNotModify/LinkedPATemplates/"
)
template_id = list(templates[0].data.keys())[0]
response = self.linked_pa_templates_api.delete_linked_pa_templates(
id = template_id
)
self.assertEqual(response[1], 204, "Response should be 204 - Success")
if __name__ == '__main__':
unittest.main()
|
the-stack_0_10443 | from unittest import TestCase
from tests import get_data
from pytezos.michelson.converter import build_schema, decode_micheline, encode_micheline, micheline_to_michelson
class StorageTestKT1D8STdsizybSqDGCDn19s8R8Fn6KcDW9xg(TestCase):
@classmethod
def setUpClass(cls):
cls.maxDiff = None
cls.contract = get_data('storage/zeronet/KT1D8STdsizybSqDGCDn19s8R8Fn6KcDW9xg.json')
def test_storage_encoding_KT1D8STdsizybSqDGCDn19s8R8Fn6KcDW9xg(self):
type_expr = self.contract['script']['code'][1]
val_expr = self.contract['script']['storage']
schema = build_schema(type_expr)
decoded = decode_micheline(val_expr, type_expr, schema)
actual = encode_micheline(decoded, schema)
self.assertEqual(val_expr, actual)
def test_storage_schema_KT1D8STdsizybSqDGCDn19s8R8Fn6KcDW9xg(self):
_ = build_schema(self.contract['script']['code'][0])
def test_storage_format_KT1D8STdsizybSqDGCDn19s8R8Fn6KcDW9xg(self):
_ = micheline_to_michelson(self.contract['script']['code'])
_ = micheline_to_michelson(self.contract['script']['storage'])
|
the-stack_0_10444 | import os
import torch
import torch.optim as optim
import torch.backends.cudnn as cudnn
import argparse
import torch.utils.data as data
from data import AnnotationTransform, BaseTransform, VOCDetection, detection_collate, coco_detection_collate, seq_detection_collate, mb_cfg, dataset_training_cfg, COCOroot, COCODetection
from utils.augmentations import SSDAugmentation
from layers.modules import MultiBoxLoss, RefineMultiBoxLoss
from layers.functions import PriorBox
import numpy as np
import time
import logging
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
def print_log(args):
logging.info('model_name: '+ args.model_name)
logging.info('ssd_dim: '+ str(args.ssd_dim))
logging.info('Backbone: '+ args.backbone)
logging.info('BN: '+ str(args.bn))
logging.info('Conv7 Channel: '+ str(args.c7_channel))
if 'RefineDet' in args.backbone:
logging.info('Refine: ' + str(args.refine))
logging.info('Deform: ' + str(args.deform))
logging.info('Multi-head: ' + str(args.multihead))
if args.resume:
logging.info('resume: '+ args.resume )
logging.info('start_iter: '+ str(args.start_iter))
elif args.resume_from_ssd:
logging.info('resume_from_ssd: '+ args.resume_from_ssd )
else:
logging.info('load pre-trained backbone: '+ args.basenet )
logging.info('lr: '+ str(args.lr))
logging.info('warm_epoch: '+ str(args.warm_epoch))
logging.info('gamam: '+ str(args.gamma))
logging.info('step_list: '+ str(args.step_list))
logging.info('save_interval: '+ str(args.save_interval))
logging.info('dataset_name: '+ args.dataset_name )
logging.info('set_file_name: '+ args.set_file_name )
logging.info('gpu_ids: '+ args.gpu_ids)
logging.info('augm_type: '+ args.augm_type)
logging.info('batch_size: '+ str(args.batch_size))
logging.info('loss weights: '+ str(args.loss_coe))
parser = argparse.ArgumentParser(description='Single Shot MultiBox Detector Training')
parser.add_argument('--basenet', default='vgg16bn_reducedfc.pth', help='pretrained base model')
parser.add_argument('--jaccard_threshold', default=0.5, type=float, help='Min Jaccard index for matching')
parser.add_argument('--batch_size', default=4, type=int, help='Batch size for training')
parser.add_argument('--resume', default=None, type=str, help='Resume from checkpoint') #'./weights/tssd300_VID2017_b8s8_RSkipTBLstm_baseAugmDrop2Clip5_FixVggExtraPreLocConf/ssd300_seqVID2017_20000.pth'
parser.add_argument('--resume_from_ssd', default=None, type=str, help='Resume vgg and extras from ssd checkpoint')
parser.add_argument('--num_workers', default=8, type=int, help='Number of workers used in dataloading')
parser.add_argument('--start_iter', default=0, type=int, help='Begin counting iterations starting from this value (should be used with resume)')
parser.add_argument('--cuda', default=True, type=str2bool, help='Use cuda to train model')
parser.add_argument('--lr', '--learning-rate', default=1e-4, type=float, help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, help='momentum')
parser.add_argument('--weight_decay', default=5e-4, type=float, help='Weight decay for SGD')
parser.add_argument('--gamma', default=0.1, type=float, help='Gamma update for SGD')
parser.add_argument('--log_iters', default=True, type=bool, help='Print the loss at each iteration')
parser.add_argument('--visdom', default=False, type=str2bool, help='Use visdom to for loss visualization')
parser.add_argument('--save_folder', default='./weights040/test', help='Location to save checkpoint models')
parser.add_argument('--dataset_name', default='VOC0712', help='VOC0712/VIDDET/seqVID2017/MOT17Det/seqMOT17Det')
parser.add_argument('--step_list', nargs='+', type=int, default=[30,50], help='step_list for learning rate')
parser.add_argument('--backbone', default='RefineDet_ResNet101', type=str, help='Backbone')
parser.add_argument('--c7_channel', default=1024, type=int, help='out_channel of Conv7 in VGG')
parser.add_argument('--refine', default=True, type=str2bool, help='Only work when backbone==RefineDet')
parser.add_argument('--deform', default=1, type=int, help='number of deform group. 0: Do not use defomable conv. Only work when backbone==RefineDet')
parser.add_argument('--multihead', default=True, type=str2bool, help='Multihead detection')
parser.add_argument('--drop', default=1.0, type=float, help='DropOut, Only work when backbone==RefineDet')
parser.add_argument('--model_name', default='ssd', type=str, help='which model selected')
parser.add_argument('--ssd_dim', default=320, type=int, help='ssd_dim 300, 320 or 512')
parser.add_argument('--gpu_ids', default='4,5', type=str, help='gpu number')
parser.add_argument('--augm_type', default='base', type=str, help='how to transform data')
parser.add_argument('--set_file_name', default='train', type=str, help='train_VID_DET/train_video_remove_no_object/train, MOT dataset does not use it')
parser.add_argument('--loss_coe', nargs='+', type=float, default=[1.0,1.0, 0.5], help='coefficients for loc, conf, att, asso')
parser.add_argument('--bn', default=False, type=str2bool, help='select sequence data in a skip way')
parser.add_argument('--save_interval', default=10, type=int, help='frequency of checkpoint saving')
parser.add_argument('--warm_epoch', default=0, type=int, help='warm epoch')
args = parser.parse_args()
if not os.path.exists(args.save_folder):
os.mkdir(args.save_folder)
current_time = time.strftime("%b_%d_%H:%M:%S_%Y", time.localtime())
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
filename=os.path.join(args.save_folder, current_time+'.log'),
filemode='w')
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
print_log(args)
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_ids
device = torch.device('cuda' if args.cuda and torch.cuda.is_available() else 'cpu')
ssd_dim = args.ssd_dim
if args.dataset_name in ['MOT15', 'seqMOT15']:
prior = 'MOT_300'
cfg = mb_cfg[prior]
else:
prior = 'VOC_'+ str(args.ssd_dim)
if args.ssd_dim==300 and 'RFB' in args.backbone:
prior += '_RFB'
elif args.ssd_dim==512 and 'RefineDet' in args.backbone:
prior += '_RefineDet'
cfg = mb_cfg[prior]
train_sets, num_classes, data_root = dataset_training_cfg[args.dataset_name]
logging.info('train sets: ' + str(train_sets))
set_filename = args.set_file_name
if args.dataset_name[:3] == 'seq':
collate_fn = seq_detection_collate
elif args.dataset_name == 'COCO':
collate_fn = coco_detection_collate
else:
collate_fn = detection_collate
if args.dataset_name == 'UW':
means = (128, 128, 128)
else:
means = (104, 117, 123)
mean_np = np.array(means, dtype=np.int32)
batch_size = args.batch_size
weight_decay = args.weight_decay
max_epoch = args.step_list[-1]
gamma = 0.1
momentum = args.momentum
if args.visdom:
import visdom
viz = visdom.Visdom()
if 'RFB' in args.backbone:
from model.rfbnet_vgg import build_net
ssd_net = build_net('train', ssd_dim, num_classes, bn=args.bn)
elif 'RefineDet' in args.backbone:
if 'MobNet' in args.backbone:
if args.deform:
from model.dualrefinedet_mobilenet import build_net
ssd_net = build_net('train', size=ssd_dim, num_classes=num_classes,
def_groups=args.deform, multihead=args.multihead)
else:
from model.refinedet_mobilenet import build_net
ssd_net = build_net('train', size=ssd_dim, num_classes=num_classes, use_refine=args.refine)
elif args.deform:
from model.dualrefinedet_vggbn import build_net
ssd_net = build_net('train', size=ssd_dim, num_classes=num_classes, c7_channel=args.c7_channel, def_groups=args.deform, bn=args.bn, multihead=args.multihead)
else:
from model.refinedet_vgg import build_net
ssd_net = build_net('train', size=ssd_dim, num_classes=num_classes, use_refine=args.refine, c7_channel=args.c7_channel, bn=args.bn, multihead=args.multihead)
elif 'MobNet' in args.backbone:
from model.ssd4scale_mobile import build_net
ssd_net = build_net('train', size=ssd_dim, num_classes=num_classes, c7_channel=args.c7_channel)
elif '4s' in args.backbone:
from model.ssd4scale_vgg import build_net
ssd_net = build_net('train', size=ssd_dim, num_classes=num_classes, c7_channel=args.c7_channel, bn=args.bn)
else:
ssd_net = None
net = ssd_net
if device==torch.device('cuda'):
net = torch.nn.DataParallel(ssd_net)
cudnn.benchmark = True
print(ssd_net)
net = net.to(device)
if args.resume:
logging.info('Resuming training, loading {}...'.format(args.resume))
ssd_net.load_weights(args.resume)
else:
backbone_weights = torch.load('../weights/'+ args.basenet)
logging.info('Loading base network...')
ssd_net.backbone.load_state_dict(backbone_weights)
if not args.resume:
from model.networks import net_init
net_init(ssd_net, args.backbone, logging, refine=args.refine, deform=args.deform, multihead=args.multihead)
if args.augm_type == 'ssd':
data_transform = SSDAugmentation
else:
data_transform = BaseTransform
optimizer = optim.SGD(net.parameters(),
lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
# criterion
if 'RefineDet' in args.backbone and args.refine:
use_refine = True
arm_criterion = RefineMultiBoxLoss(2, 0.5, True, 0, True, 3, 0.5, False, device=device, only_loc=True)
criterion = RefineMultiBoxLoss(num_classes, 0.5, True, 0, True, 3, 0.5, False, device=device)
else:
use_refine = False
criterion = MultiBoxLoss(num_classes, 0.5, True, 0, True, 3, 0.5, False, device=device)
priorbox = PriorBox(cfg)
with torch.no_grad():
priors = priorbox.forward().to(device)
def train():
net.train()
epoch = args.start_iter
if args.dataset_name == 'COCO':
dataset = COCODetection(COCOroot, year='trainval2014', image_sets=train_sets, transform=data_transform(ssd_dim, means), phase='train')
else:
dataset = VOCDetection(data_root, train_sets, data_transform(ssd_dim, means),
AnnotationTransform(dataset_name=args.dataset_name),
dataset_name=args.dataset_name, set_file_name=set_filename)
epoch_size = len(dataset) // args.batch_size
drop_step = [s * epoch_size for s in args.step_list]
max_iter = max_epoch * epoch_size
logging.info('Loading Dataset:' + args.dataset_name + ' dataset size: ' +str(len(dataset)))
step_index = 0
if args.visdom:
# initialize visdom loss plot
y_dim = 3
legend = ['Loss', 'Loc Loss', 'Conf Loss',]
if use_refine:
y_dim += 1
legend += ['Arm Loc Loss',]
lot = viz.line(
X=torch.zeros((1,)),
Y=torch.zeros((1, y_dim)),
opts=dict(
xlabel='Iteration',
ylabel='Loss',
title=args.save_folder.split('/')[-1],
legend=legend,
)
)
batch_iterator = None
data_loader = data.DataLoader(dataset, batch_size, num_workers=args.num_workers, shuffle=True,
collate_fn=collate_fn,
pin_memory=True)
for iteration in range(epoch*epoch_size, max_iter + 10):
if (not batch_iterator) or (iteration % epoch_size == 0):
# create batch iterator
batch_iterator = iter(data_loader)
if epoch % args.save_interval == 0:
logging.info('Saving state, epoch: '+ str(epoch))
torch.save(ssd_net.state_dict(), os.path.join(args.save_folder, args.model_name + str(
ssd_dim) + '_' + args.dataset_name + '_' +repr(epoch) + '.pth'))
epoch += 1
t0 = time.time()
if iteration in drop_step:
step_index = drop_step.index(iteration) + 1
adjust_learning_rate(optimizer, args.gamma, epoch, step_index, iteration, epoch_size)
# adjust_learning_rate(optimizer, args.gamma)
collected_data = next(batch_iterator)
with torch.no_grad():
images, targets = collected_data[:2]
images = images.to(device)
targets = [anno.to(device) for anno in targets]
# forward
loss = torch.tensor(0., requires_grad=True).to(device)
out = net(images)
# backward
optimizer.zero_grad()
if use_refine:
loss_arm_l = arm_criterion(out[0], priors, targets)
loss_l, loss_c = criterion(out[2:], priors, targets, arm_data=out[:2])
loss += args.loss_coe[0] * loss_arm_l
else:
loss_l, loss_c = criterion(out, priors, targets)
loss += args.loss_coe[0] * loss_l + args.loss_coe[1] * loss_c
loss.backward()
optimizer.step()
t1 = time.time()
if iteration % 10 == 0:
if use_refine:
logging.info('Epoch:' + repr(epoch) + ', epochiter: ' + repr(iteration % epoch_size) + '/' + repr(epoch_size) + ', total_iter ' + repr(
iteration) + ' || loss: %.4f, Loss_l: %.4f, loss_c: %.4f, loss_arm_l: %.4f, lr: %.5f || Timer: %.4f sec.' % (
loss, loss_l, loss_c,loss_arm_l, optimizer.param_groups[0]['lr'], t1 - t0))
else:
logging.info('Epoch:' + repr(epoch) + ', epochiter: ' + repr(iteration % epoch_size) + '/' + repr(epoch_size) + ', total_iter ' + repr(
iteration) + ' || loss: %.4f, Loss_l: %.4f, loss_c: %.4f, lr: %.5f || Timer: %.4f sec.' % (loss, loss_l, loss_c, optimizer.param_groups[0]['lr'], t1 - t0))
if args.visdom:
y_dis = [loss.cpu(), args.loss_coe[0]*loss_l.cpu(), args.loss_coe[1]*loss_c.cpu()]
if iteration == 1000:
# initialize visdom loss plot
lot = viz.line(
X=torch.zeros((1,)),
Y=torch.zeros((1, y_dim)),
opts=dict(
xlabel='Iteration',
ylabel='Loss',
title=args.save_folder.split('/')[-1],
legend=legend,
)
)
if use_refine:
y_dis += [args.loss_coe[0]*loss_arm_l.cpu(),]
# update = 'append' if iteration
viz.line(
X=torch.ones((1, y_dim)) * iteration,
Y=torch.FloatTensor(y_dis).unsqueeze(0),
win=lot,
update='append',
opts=dict(
xlabel='Iteration',
ylabel='Loss',
title=args.save_folder.split('/')[-1],
legend=legend,)
)
torch.save(ssd_net.state_dict(),
os.path.join(args.save_folder, args.model_name + str(ssd_dim) + '_' + args.dataset_name + '_' +
repr(iteration) + '.pth'))
print('Complet Training. Saving state, iter:', iteration)
# def adjust_learning_rate(optimizer, gamma):
# for param_group in optimizer.param_groups:
# param_group['lr'] *= gamma
def adjust_learning_rate(optimizer, gamma, epoch, step_index, iteration, epoch_size):
if epoch <= args.warm_epoch:
lr = 1e-6 + (args.lr - 1e-6) * iteration / (epoch_size * args.warm_epoch)
else:
lr = args.lr * (gamma ** (step_index))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# return lr
if __name__ == '__main__':
train()
|
the-stack_0_10445 | from __future__ import absolute_import
from django.db.models import Q
from rest_framework import serializers
from rest_framework.response import Response
from sentry.api.bases.organization import (
OrganizationEndpoint, OrganizationPermission
)
from sentry.api.exceptions import ResourceDoesNotExist
from sentry.api.serializers import serialize
from sentry.api.serializers.models.team import TeamWithProjectsSerializer
from sentry.models import (
AuditLogEntryEvent, OrganizationAccessRequest,
OrganizationMember, OrganizationMemberTeam, Team
)
ERR_INSUFFICIENT_ROLE = 'You cannot modify a member other than yourself.'
class OrganizationMemberTeamSerializer(serializers.Serializer):
isActive = serializers.BooleanField()
class RelaxedOrganizationPermission(OrganizationPermission):
_allowed_scopes = [
'org:read', 'org:write', 'org:admin',
'member:read', 'member:write', 'member:admin',
]
scope_map = {
'GET': _allowed_scopes,
'POST': _allowed_scopes,
'PUT': _allowed_scopes,
# DELETE checks for role comparison as you can either remove a member
# with a lower access role, or yourself, without having the req. scope
'DELETE': _allowed_scopes,
}
class OrganizationMemberTeamDetailsEndpoint(OrganizationEndpoint):
permission_classes = [RelaxedOrganizationPermission]
def _can_access(self, request, member):
# TODO(dcramer): ideally org owners/admins could perform these actions
if request.is_superuser():
return True
if not request.user.is_authenticated():
return False
if request.user.id == member.user_id:
return True
return False
def _get_member(self, request, organization, member_id):
if member_id == 'me':
queryset = OrganizationMember.objects.filter(
organization=organization,
user__id=request.user.id,
user__is_active=True,
)
else:
queryset = OrganizationMember.objects.filter(
Q(user__is_active=True) | Q(user__isnull=True),
organization=organization,
id=member_id,
)
return queryset.select_related('user').get()
def post(self, request, organization, member_id, team_slug):
"""
Join a team
Join or request access to a team.
If the user is already a member of the team, this will simply return
a 204.
If the user needs permission to join the team, an access request will
be generated and the returned status code will be 202.
"""
try:
om = self._get_member(request, organization, member_id)
except OrganizationMember.DoesNotExist:
raise ResourceDoesNotExist
if not self._can_access(request, om):
return Response({'detail': ERR_INSUFFICIENT_ROLE}, status=400)
try:
team = Team.objects.get(
organization=organization,
slug=team_slug,
)
except Team.DoesNotExist:
raise ResourceDoesNotExist
try:
omt = OrganizationMemberTeam.objects.get(
team=team,
organizationmember=om,
)
except OrganizationMemberTeam.DoesNotExist:
if not (request.access.has_scope('org:write') or organization.flags.allow_joinleave):
omt, created = OrganizationAccessRequest.objects.get_or_create(
team=team,
member=om,
)
if created:
omt.send_request_email()
return Response(status=202)
omt = OrganizationMemberTeam.objects.create(
team=team,
organizationmember=om,
)
else:
return Response(status=204)
self.create_audit_entry(
request=request,
organization=organization,
target_object=omt.id,
target_user=om.user,
event=AuditLogEntryEvent.MEMBER_JOIN_TEAM,
data=omt.get_audit_log_data(),
)
return Response(serialize(
team, request.user, TeamWithProjectsSerializer()), status=201)
def delete(self, request, organization, member_id, team_slug):
"""
Leave a team
Leave a team.
"""
try:
om = self._get_member(request, organization, member_id)
except OrganizationMember.DoesNotExist:
raise ResourceDoesNotExist
if not self._can_access(request, om):
return Response({'detail': ERR_INSUFFICIENT_ROLE}, status=400)
try:
team = Team.objects.get(
organization=organization,
slug=team_slug,
)
except Team.DoesNotExist:
raise ResourceDoesNotExist
try:
omt = OrganizationMemberTeam.objects.get(
team=team,
organizationmember=om,
)
except OrganizationMemberTeam.DoesNotExist:
pass
else:
self.create_audit_entry(
request=request,
organization=organization,
target_object=omt.id,
target_user=om.user,
event=AuditLogEntryEvent.MEMBER_LEAVE_TEAM,
data=omt.get_audit_log_data(),
)
omt.delete()
return Response(serialize(
team, request.user, TeamWithProjectsSerializer()), status=200)
|
the-stack_0_10446 | from schematic.models.metadata import MetadataModel
from schematic import CONFIG
config = CONFIG.load_config("schematic/config.yml")
inputMModelLocation = CONFIG["model"]["input"]["location"]
inputMModelLocationType = CONFIG["model"]["input"]["file_type"]
manifest_title = CONFIG["manifest"]["title"]
manifest_data_type = CONFIG["manifest"]["data_type"]
metadata_model = MetadataModel(inputMModelLocation, inputMModelLocationType)
|
the-stack_0_10447 | from __future__ import division, print_function
import argparse
import datetime
import json
import os
import os.path
import shlex
import subprocess
DATA_TABLE_NAME = "ncbi_taxonomy_sqlite"
def build_sqlite(taxonomy_dir, output_directory, name=None, description=None):
if not os.path.exists(output_directory):
os.mkdir(output_directory)
output_filename = os.path.join(output_directory, "tax.ncbitaxonomy.sqlite")
cmd_str = "taxonomy_util -d {} to_sqlite {}".format(output_filename, taxonomy_dir)
cmd = shlex.split(cmd_str)
subprocess.check_call(cmd)
today_str = datetime.date.today().strftime("%Y-%m-%d")
if name is None or name.strip() == "":
name = "ncbitaxonomy_build_" + today_str
if description is None or description.strip() == "":
description = "NCBI Taxonomy database (built on {})".format(today_str)
data = [dict(value=name, description=description, path=output_filename)]
return data
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Build SQLite database from NCBI taxonomy"
)
parser.add_argument(
"--output_directory", default="tmp", help="Directory to write output to"
)
parser.add_argument(
"taxonomy_dir",
help="Path to directory containing NCBI Taxonomy nodes.dml and names.dmp file"
)
parser.add_argument(
"name",
help="Name to use for the entry in the data table"
)
parser.add_argument(
"description",
help="Description to use for the entry in the data table"
)
parser.add_argument(
"galaxy_datamanager_filename",
help="Galaxy JSON format file describing data manager inputs",
)
args = parser.parse_args()
config = json.load(open(args.galaxy_datamanager_filename))
output_directory = config.get("output_data", [{}])[0].get("extra_files_path", None)
if output_directory is None:
output_directory = args.output_directory
if not os.path.isdir(output_directory):
os.makedirs(output_directory)
data_manager_dict = {}
data_manager_dict["data_tables"] = json.load(
open(args.galaxy_datamanager_filename)
).get("data_tables", {})
data_manager_dict["data_tables"] = data_manager_dict.get("data_tables", {})
data_manager_dict["data_tables"][DATA_TABLE_NAME] = data_manager_dict[
"data_tables"
].get(DATA_TABLE_NAME, [])
data = build_sqlite(args.taxonomy_dir, output_directory, args.name, args.description)
data_manager_dict["data_tables"][DATA_TABLE_NAME].extend(data)
print(json.dumps(data_manager_dict))
json.dump(data_manager_dict, open(args.galaxy_datamanager_filename, "w"))
|
the-stack_0_10448 | """Customized Django paginators."""
from __future__ import unicode_literals
from math import ceil
from django.core.paginator import (
EmptyPage,
Page,
PageNotAnInteger,
Paginator,
)
class CustomPage(Page):
"""Handle different number of items on the first page."""
def start_index(self):
"""Return the 1-based index of the first item on this page."""
paginator = self.paginator
# Special case, return zero if no items.
if paginator.count == 0:
return 0
elif self.number == 1:
return 1
return (
(self.number - 2) * paginator.per_page + paginator.first_page + 1)
def end_index(self):
"""Return the 1-based index of the last item on this page."""
paginator = self.paginator
# Special case for the last page because there can be orphans.
if self.number == paginator.num_pages:
return paginator.count
return (self.number - 1) * paginator.per_page + paginator.first_page
class BasePaginator(Paginator):
"""A base paginator class subclassed by the other real paginators.
Handle different number of items on the first page.
"""
def __init__(self, object_list, per_page, **kwargs):
self._num_pages = None
if 'first_page' in kwargs:
self.first_page = kwargs.pop('first_page')
else:
self.first_page = per_page
super(BasePaginator, self).__init__(object_list, per_page, **kwargs)
def get_current_per_page(self, number):
return self.first_page if number == 1 else self.per_page
class DefaultPaginator(BasePaginator):
"""The default paginator used by this application."""
def page(self, number):
number = self.validate_number(number)
if number == 1:
bottom = 0
else:
bottom = ((number - 2) * self.per_page + self.first_page)
top = bottom + self.get_current_per_page(number)
if top + self.orphans >= self.count:
top = self.count
return CustomPage(self.object_list[bottom:top], number, self)
def _get_num_pages(self):
if self._num_pages is None:
if self.count == 0 and not self.allow_empty_first_page:
self._num_pages = 0
else:
hits = max(0, self.count - self.orphans - self.first_page)
try:
self._num_pages = int(ceil(hits / float(self.per_page))) + 1
except ZeroDivisionError:
self._num_pages = 0 # fallback to a safe value
return self._num_pages
num_pages = property(_get_num_pages)
class LazyPaginator(BasePaginator):
"""Implement lazy pagination."""
def validate_number(self, number):
try:
number = int(number)
except ValueError:
raise PageNotAnInteger('That page number is not an integer')
if number < 1:
raise EmptyPage('That page number is less than 1')
return number
def page(self, number):
number = self.validate_number(number)
current_per_page = self.get_current_per_page(number)
if number == 1:
bottom = 0
else:
bottom = ((number - 2) * self.per_page + self.first_page)
top = bottom + current_per_page
# Retrieve more objects to check if there is a next page.
objects = list(self.object_list[bottom:top + self.orphans + 1])
objects_count = len(objects)
if objects_count > (current_per_page + self.orphans):
# If another page is found, increase the total number of pages.
self._num_pages = number + 1
# In any case, return only objects for this page.
objects = objects[:current_per_page]
elif (number != 1) and (objects_count <= self.orphans):
raise EmptyPage('That page contains no results')
else:
# This is the last page.
self._num_pages = number
return CustomPage(objects, number, self)
def _get_count(self):
raise NotImplementedError
count = property(_get_count)
def _get_num_pages(self):
return self._num_pages
num_pages = property(_get_num_pages)
def _get_page_range(self):
raise NotImplementedError
page_range = property(_get_page_range)
|
the-stack_0_10449 | # -*- coding: utf-8 -*-
# Copyright 2018 IBM.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# Convert graph partitioning instances into Pauli list
# Deal with Gset format. See https://web.stanford.edu/~yyye/yyye/Gset/
import logging
from collections import OrderedDict
import numpy as np
import numpy.random as rand
from qiskit.quantum_info import Pauli
from qiskit_aqua import Operator
logger = logging.getLogger(__name__)
def random_graph(n, weight_range=10, edge_prob=0.3, savefile=None, seed=None):
"""Generate random Erdos-Renyi graph.
Args:
n (int): number of nodes.
weight_range (int): weights will be smaller than this value,
in absolute value.
edge_prob (float): probability of edge appearing.
savefile (str or None): name of file where to save graph.
seed (int or None): random seed - if None, will not initialize.
Returns:
numpy.ndarray: adjacency matrix (with weights).
"""
assert(weight_range >= 0)
if seed:
rand.seed(seed)
w = np.zeros((n, n))
m = 0
for i in range(n):
for j in range(i+1, n):
if rand.rand() <= edge_prob:
w[i, j] = rand.randint(1, weight_range)
if rand.rand() >= 0.5:
w[i, j] *= -1
m += 1
w += w.T
if savefile:
with open(savefile, 'w') as outfile:
outfile.write('{} {}\n'.format(n, m))
for i in range(n):
for j in range(i+1, n):
if w[i, j] != 0:
outfile.write('{} {} {}\n'.format(i + 1, j + 1, w[i, j]))
return w
def get_graphpartition_qubitops(weight_matrix):
"""Generate Hamiltonian for the graph partitioning
Args:
weight_matrix (numpy.ndarray) : adjacency matrix.
Returns:
operator.Operator, float: operator for the Hamiltonian and a
constant shift for the obj function.
Goals:
1 separate the vertices into two set of the same size
2 make sure the number of edges between the two set is minimized.
Hamiltonian:
H = H_A + H_B
H_A = sum\_{(i,j)\in E}{(1-ZiZj)/2}
H_B = (sum_{i}{Zi})^2 = sum_{i}{Zi^2}+sum_{i!=j}{ZiZj}
H_A is for achieving goal 2 and H_B is for achieving goal 1.
"""
num_nodes = len(weight_matrix)
pauli_list = []
shift = 0
for i in range(num_nodes):
for j in range(i):
if weight_matrix[i, j] != 0:
xp = np.zeros(num_nodes, dtype=np.bool)
zp = np.zeros(num_nodes, dtype=np.bool)
zp[i] = True
zp[j] = True
pauli_list.append([-0.5, Pauli(zp, xp)])
shift += 0.5
for i in range(num_nodes):
for j in range(num_nodes):
if i != j:
xp = np.zeros(num_nodes, dtype=np.bool)
zp = np.zeros(num_nodes, dtype=np.bool)
zp[i] = True
zp[j] = True
pauli_list.append([1, Pauli(zp, xp)])
else:
shift += 1
return Operator(paulis=pauli_list), shift
def parse_gset_format(filename):
"""Read graph in Gset format from file.
Args:
filename (str): name of the file.
Returns:
numpy.ndarray: adjacency matrix as a 2D numpy array.
"""
n = -1
with open(filename) as infile:
header = True
m = -1
count = 0
for line in infile:
v = map(lambda e: int(e), line.split())
if header:
n, m = v
w = np.zeros((n, n))
header = False
else:
s, t, x = v
s -= 1 # adjust 1-index
t -= 1 # ditto
w[s, t] = t
count += 1
assert m == count
w += w.T
return w
def objective_value(x, w):
"""Compute the value of a cut.
Args:
x (numpy.ndarray): binary string as numpy array.
w (numpy.ndarray): adjacency matrix.
Returns:
float: value of the cut.
"""
X = np.outer(x, (1-x))
w_01 = np.where(w != 0, 1, 0)
return np.sum(w_01 * X)
def get_graph_solution(x):
"""Get graph solution from binary string.
Args:
x (numpy.ndarray) : binary string as numpy array.
Returns:
numpy.ndarray: graph solution as binary numpy array.
"""
return 1 - x
def sample_most_likely(state_vector):
"""Compute the most likely binary string from state vector.
Args:
state_vector (numpy.ndarray or dict): state vector or counts.
Returns:
numpy.ndarray: binary string as numpy.ndarray of ints.
"""
if isinstance(state_vector, dict) or isinstance(state_vector, OrderedDict):
# get the binary string with the largest count
binary_string = sorted(state_vector.items(), key=lambda kv: kv[1])[-1][0]
x = np.asarray([int(y) for y in reversed(list(binary_string))])
return x
else:
n = int(np.log2(state_vector.shape[0]))
k = np.argmax(np.abs(state_vector))
x = np.zeros(n)
for i in range(n):
x[i] = k % 2
k >>= 1
return x
def get_gset_result(x):
"""Get graph solution in Gset format from binary string.
Args:
x (numpy.ndarray) : binary string as numpy array.
Returns:
Dict[int, int]: graph solution in Gset format.
"""
return {i + 1: 1 - x[i] for i in range(len(x))}
|
the-stack_0_10451 | #!/usr/bin/env python
# encoding: utf-8
"""
@author: sherlock
@contact: [email protected]
"""
import logging
import os
import sys
sys.path.append('.')
from fastreid.config import get_cfg
from fastreid.engine import DefaultTrainer, default_argument_parser, default_setup, launch, Hazytrainer
from fastreid.utils.checkpoint import Checkpointer
from fastreid.engine import hooks
from fastreid.evaluation import ReidEvaluator
class H_Trainer(Hazytrainer):
@classmethod
def build_evaluator(cls, cfg, num_query, output_folder=None):
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
return ReidEvaluator(cfg, num_query)
class BaseTrainer(DefaultTrainer):
@classmethod
def build_evaluator(cls, cfg, num_query, output_folder=None):
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
return ReidEvaluator(cfg, num_query)
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(cfg, args)
return cfg
def main(args):
cfg = setup(args)
if args.eval_only:
logger = logging.getLogger("fastreid.trainer")
cfg.defrost()
cfg.MODEL.BACKBONE.PRETRAIN = False
model = H_Trainer.build_model(cfg)
Checkpointer(model).load(cfg.MODEL.WEIGHTS) # load trained model
if cfg.TEST.PRECISE_BN.ENABLED and hooks.get_bn_modules(model):
prebn_cfg = cfg.clone()
prebn_cfg.DATALOADER.NUM_WORKERS = 0 # save some memory and time for PreciseBN
prebn_cfg.DATASETS.NAMES = tuple([cfg.TEST.PRECISE_BN.DATASET]) # set dataset name for PreciseBN
logger.info("Prepare precise BN dataset")
hooks.PreciseBN(
# Run at the same freq as (but before) evaluation.
model,
# Build a new data loader to not affect training
H_Trainer.build_train_loader(prebn_cfg),
cfg.TEST.PRECISE_BN.NUM_ITER,
).update_stats()
res = H_Trainer.test(cfg, model)
return res
trainer = H_Trainer(cfg)
trainer.resume_or_load(resume=args.resume)
return trainer.train()
if __name__ == "__main__":
args = default_argument_parser()
args.add_argument("--info", type=str, default="test", help="information of parameters and losses")
args = args.parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
|
the-stack_0_10452 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
from airflow.models.dag import DAG
from airflow.operators.python import PythonOperator
from airflow.providers.amazon.aws.hooks.s3 import S3Hook
from airflow.providers.amazon.aws.operators.s3_bucket import S3CreateBucketOperator, S3DeleteBucketOperator
from airflow.utils.dates import days_ago
BUCKET_NAME = os.environ.get('BUCKET_NAME', 'test-airflow-12345')
def upload_keys():
"""This is a python callback to add keys into the s3 bucket"""
# add keys to bucket
s3_hook = S3Hook()
for i in range(0, 3):
s3_hook.load_string(
string_data="input",
key=f"path/data{i}",
bucket_name=BUCKET_NAME,
)
with DAG(
dag_id='s3_bucket_dag',
schedule_interval=None,
start_date=days_ago(2),
max_active_runs=1,
tags=['example'],
) as dag:
create_bucket = S3CreateBucketOperator(
task_id='s3_bucket_dag_create',
bucket_name=BUCKET_NAME,
region_name='us-east-1',
)
add_keys_to_bucket = PythonOperator(
task_id="s3_bucket_dag_add_keys_to_bucket", python_callable=upload_keys
)
delete_bucket = S3DeleteBucketOperator(
task_id='s3_bucket_dag_delete',
bucket_name=BUCKET_NAME,
force_delete=True,
)
create_bucket >> add_keys_to_bucket >> delete_bucket
|
the-stack_0_10455 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import sys
import logging
import signal
import uuid
import json
import datetime
from threading import Thread, current_thread
import mesos.interface
from mesos.interface import mesos_pb2
import mesos.native
logging.basicConfig(level=logging.DEBUG)
def run_driver(*args, **kwargs):
"""
Starts mesos driver in separate thread.
Stops driver in case when SIGINT is received at the main thread.
"""
driver = mesos.native.MesosSchedulerDriver(*args, **kwargs)
def run_driver_async():
status = 0 if driver.run() == mesos_pb2.DRIVER_STOPPED else 1
driver.stop()
sys.exit(status)
framework_thread = Thread(target=run_driver_async)
framework_thread.daemon = True
framework_thread.start()
if current_thread().name == "MainThread":
signal.signal(signal.SIGINT, lambda signal, frame: driver.stop())
class FailureScheduler(mesos.interface.Scheduler):
"""
Starts executor for each failure config.
Passes needed config for running failure using task.data.
"""
def __init__(self, executor, logger, task_retry):
self.executor = executor
self.services_statuses = {}
self.logger = logger
self.task_retry = task_retry
def make_task_prototype(self, offer, cpus, mem, service_name):
"""
Creates task with needed resources
"""
task = mesos_pb2.TaskInfo()
task_id = uuid.uuid4().hex
task.task_id.value = task_id
task.slave_id.value = offer.slave_id.value
task.name = "pisaura-failure-runner-{}".format(service_name.replace(" ", "-"))
cpus_r = task.resources.add()
cpus_r.name = "cpus"
cpus_r.type = mesos_pb2.Value.SCALAR
cpus_r.scalar.value = cpus
mem_r = task.resources.add()
mem_r.name = "mem"
mem_r.type = mesos_pb2.Value.SCALAR
mem_r.scalar.value = mem
return task
def add_service(self, service):
self.services_statuses[service['service']] = {
'service': service,
'status': mesos_pb2.TASK_STAGING,
'tasks': [],
'logs': [],
'updated': str(datetime.datetime.utcnow()),
'created': str(datetime.datetime.utcnow())
}
def make_task(self, offer, service):
task = self.make_task_prototype(
offer, service['cpus'], service['mem'], service['service'])
task.data = json.dumps(service)
task.executor.MergeFrom(self.executor)
if service['service'] in self.services_statuses:
self.services_statuses[service['service']]['status'] = None
self.services_statuses[service['service']]['tasks'].append(task.task_id.value)
else:
self.services_statuses[service['service']] = {
'service': service,
'status': None,
'tasks': [task.task_id.value]
}
return task
def registered(self, driver, frameworkId, masterInfo):
self.logger.info("Registered with framework ID %s" % frameworkId.value)
def log_offer_stat(self, offer):
offerCpus = 0
offerMem = 0
for resource in offer.resources:
if resource.name == "cpus":
offerCpus += resource.scalar.value
elif resource.name == "mem":
offerMem += resource.scalar.value
self.logger.debug(
"Received offer %s with cpus: %s and mem: %s", offer.id.value,
offerCpus, offerMem)
def get_next_service(self):
retry_statuses = [mesos_pb2.TASK_ERROR, mesos_pb2.TASK_FAILED, mesos_pb2.TASK_STAGING]
for service_name in self.services_statuses:
self.logger.debug("Trying to commit %s as next service", service_name)
tasks_count = len(self.services_statuses[service_name]['tasks'])
status = self.services_statuses[service_name]['status']
if status not in retry_statuses:
continue
if status is None and tasks_count:
continue
if tasks_count < self.task_retry:
return self.services_statuses[service_name]['service']
else:
self.logger.debug(
"retry count exceeded for service %s", service_name)
def resourceOffers(self, driver, offers):
for offer in offers:
self.log_offer_stat(offer)
service = self.get_next_service()
self.logger.debug("Next service is %s", service)
if not service:
driver.declineOffer(offer.id)
return
task = self.make_task(offer, service)
self.logger.info("Launching task {task} "
"using offer {offer}.".format(task=task.task_id.value,
offer=offer.id.value))
tasks = [task]
driver.launchTasks(offer.id, tasks)
def statusUpdate(self, driver, update):
self.logger.debug(
"Task %s is in state %s, message: %s" % (
update.task_id.value, mesos_pb2.TaskState.Name(update.state), update.message))
for service_name in self.services_statuses:
if update.task_id.value in self.services_statuses[service_name]['tasks']:
self.logger.info(
"Move service %s to the state %s",
service_name, mesos_pb2.TaskState.Name(update.state))
self.services_statuses[service_name]['status'] = update.state
self.services_statuses[service_name]['logs'] = json.loads(update.data or "[]")
self.services_statuses[service_name]['updated'] = str(datetime.datetime.utcnow())
def frameworkMessage(self, driver, executor_id, slave_id, message):
self.logger.info("Received framework message %s", message)
def init_executor(app_config):
"""
Creates mesos executor using given config dict.
"""
uris = app_config['resources']
executor = mesos_pb2.ExecutorInfo()
executor.executor_id.value = "%s-executor" % app_config['framework_name']
executor.command.value = app_config['executor_command']
for uri in uris:
uri_proto = executor.command.uris.add()
uri_proto.value = uri
uri_proto.extract = False if not uri.endswith(".tar.gz") else True
executor.name = app_config['framework_name'].capitalize()
return executor
def run(application_config):
"""
Main function for setup and run FailureScheduler.
"""
executor = init_executor(application_config)
framework = mesos_pb2.FrameworkInfo()
framework.user = "" # Have Mesos fill in the current user.
framework.name = application_config['framework_name']
logger = logging.getLogger("pisaura.scheduler")
scheduler = FailureScheduler(
executor, logger, application_config['task_retry'])
run_driver(scheduler, framework, application_config['master'])
return scheduler
|
the-stack_0_10456 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_device_sshd
short_description: Manage the SSHD settings of a BIG-IP
description:
- Manage the SSHD settings of a BIG-IP.
version_added: 2.2
options:
allow:
description:
- Specifies, if you have enabled SSH access, the IP address or address
range for other systems that can use SSH to communicate with this
system.
- To specify all addresses, use the value C(all).
- IP address can be specified, such as 172.27.1.10.
- IP rangees can be specified, such as 172.27.*.* or 172.27.0.0/255.255.0.0.
- To remove SSH access specify an empty list or an empty string.
type: list
banner:
description:
- Whether to enable the banner or not.
type: str
choices:
- enabled
- disabled
banner_text:
description:
- Specifies the text to include on the pre-login banner that displays
when a user attempts to login to the system using SSH.
type: str
inactivity_timeout:
description:
- Specifies the number of seconds before inactivity causes an SSH
session to log out.
type: int
log_level:
description:
- Specifies the minimum SSHD message level to include in the system log.
type: str
choices:
- debug
- debug1
- debug2
- debug3
- error
- fatal
- info
- quiet
- verbose
login:
description:
- Specifies, when checked C(enabled), that the system accepts SSH
communications.
type: str
choices:
- enabled
- disabled
port:
description:
- Port that you want the SSH daemon to run on.
type: int
notes:
- Requires BIG-IP version 12.0.0 or greater
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Set the banner for the SSHD service from a string
bigip_device_sshd:
banner: enabled
banner_text: banner text goes here
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Set the banner for the SSHD service from a file
bigip_device_sshd:
banner: enabled
banner_text: "{{ lookup('file', '/path/to/file') }}"
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Set the SSHD service to run on port 2222
bigip_device_sshd:
port: 2222
provider:
user: admin
password: secret
server: lb.mydomain.com
delegate_to: localhost
'''
RETURN = r'''
allow:
description:
- Specifies, if you have enabled SSH access, the IP address or address
range for other systems that can use SSH to communicate with this
system.
returned: changed
type: list
sample: 192.0.2.*
banner:
description: Whether the banner is enabled or not.
returned: changed
type: str
sample: true
banner_text:
description:
- Specifies the text included on the pre-login banner that
displays when a user attempts to login to the system using SSH.
returned: changed and success
type: str
sample: This is a corporate device. Connecting to it without...
inactivity_timeout:
description:
- The number of seconds before inactivity causes an SSH
session to log out.
returned: changed
type: int
sample: 10
log_level:
description: The minimum SSHD message level to include in the system log.
returned: changed
type: str
sample: debug
login:
description: Specifies that the system accepts SSH communications or not.
returned: changed
type: bool
sample: true
port:
description: Port that you want the SSH daemon to run on.
returned: changed
type: int
sample: 22
'''
from ansible.module_utils.basic import AnsibleModule
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import is_empty_list
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import is_empty_list
class Parameters(AnsibleF5Parameters):
api_map = {
'bannerText': 'banner_text',
'inactivityTimeout': 'inactivity_timeout',
'logLevel': 'log_level',
}
api_attributes = [
'allow', 'banner', 'bannerText', 'inactivityTimeout',
'logLevel', 'login', 'port',
]
updatables = [
'allow', 'banner', 'banner_text', 'inactivity_timeout',
'log_level', 'login', 'port',
]
returnables = [
'allow', 'banner', 'banner_text', 'inactivity_timeout',
'log_level', 'login', 'port',
]
class ApiParameters(Parameters):
pass
class ModuleParameters(Parameters):
@property
def inactivity_timeout(self):
if self._values['inactivity_timeout'] is None:
return None
return int(self._values['inactivity_timeout'])
@property
def port(self):
if self._values['port'] is None:
return None
return int(self._values['port'])
@property
def allow(self):
allow = self._values['allow']
if allow is None:
return None
if is_empty_list(allow):
return []
return allow
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
change = getattr(self, returnable)
if isinstance(change, dict):
result.update(change)
else:
result[returnable] = change
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def allow(self):
if self.want.allow is None:
return None
if not self.want.allow:
if self.have.allow is None:
return None
if self.have.allow is not None:
return self.want.allow
if self.have.allow is None:
return self.want.allow
if set(self.want.allow) != set(self.have.allow):
return self.want.allow
@property
def banner_text(self):
if self.want.banner_text is None:
return None
if self.want.banner_text == '' and self.have.banner_text is None:
return None
if self.want.banner_text != self.have.banner_text:
return self.want.banner_text
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def exec_module(self):
result = dict()
changed = self.present()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def present(self):
return self.update()
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/sys/sshd/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/sys/sshd/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
class ArgumentSpec(object):
def __init__(self):
self.choices = ['enabled', 'disabled']
self.levels = [
'debug', 'debug1', 'debug2', 'debug3', 'error', 'fatal', 'info',
'quiet', 'verbose'
]
self.supports_check_mode = True
argument_spec = dict(
allow=dict(
type='list'
),
banner=dict(
choices=self.choices
),
banner_text=dict(),
inactivity_timeout=dict(
type='int'
),
log_level=dict(
choices=self.levels
),
login=dict(
choices=self.choices
),
port=dict(
type='int'
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
|
the-stack_0_10457 | # Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import sys
import tempfile
from glob import glob
import nibabel as nib
import numpy as np
import torch
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import monai
from monai.data import ImageDataset, create_test_image_3d, decollate_batch
from monai.inferers import sliding_window_inference
from monai.metrics import DiceMetric
from monai.transforms import Activations, AddChannel, AsDiscrete, Compose, RandRotate90, RandSpatialCrop, ScaleIntensity, EnsureType
from monai.visualize import plot_2d_or_3d_image
def main(tempdir):
monai.config.print_config()
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
# create a temporary directory and 40 random image, mask pairs
print(f"generating synthetic data to {tempdir} (this may take a while)")
for i in range(40):
im, seg = create_test_image_3d(128, 128, 128, num_seg_classes=1)
n = nib.Nifti1Image(im, np.eye(4))
nib.save(n, os.path.join(tempdir, f"im{i:d}.nii.gz"))
n = nib.Nifti1Image(seg, np.eye(4))
nib.save(n, os.path.join(tempdir, f"seg{i:d}.nii.gz"))
images = sorted(glob(os.path.join(tempdir, "im*.nii.gz")))
segs = sorted(glob(os.path.join(tempdir, "seg*.nii.gz")))
# define transforms for image and segmentation
train_imtrans = Compose(
[
ScaleIntensity(),
AddChannel(),
RandSpatialCrop((96, 96, 96), random_size=False),
RandRotate90(prob=0.5, spatial_axes=(0, 2)),
EnsureType(),
]
)
train_segtrans = Compose(
[
AddChannel(),
RandSpatialCrop((96, 96, 96), random_size=False),
RandRotate90(prob=0.5, spatial_axes=(0, 2)),
EnsureType(),
]
)
val_imtrans = Compose([ScaleIntensity(), AddChannel(), EnsureType()])
val_segtrans = Compose([AddChannel(), EnsureType()])
# define image dataset, data loader
check_ds = ImageDataset(images, segs, transform=train_imtrans, seg_transform=train_segtrans)
check_loader = DataLoader(check_ds, batch_size=10, num_workers=2, pin_memory=torch.cuda.is_available())
im, seg = monai.utils.misc.first(check_loader)
print(im.shape, seg.shape)
# create a training data loader
train_ds = ImageDataset(images[:20], segs[:20], transform=train_imtrans, seg_transform=train_segtrans)
train_loader = DataLoader(train_ds, batch_size=4, shuffle=True, num_workers=8, pin_memory=torch.cuda.is_available())
# create a validation data loader
val_ds = ImageDataset(images[-20:], segs[-20:], transform=val_imtrans, seg_transform=val_segtrans)
val_loader = DataLoader(val_ds, batch_size=1, num_workers=4, pin_memory=torch.cuda.is_available())
dice_metric = DiceMetric(include_background=True, reduction="mean", get_not_nans=False)
post_trans = Compose([EnsureType(), Activations(sigmoid=True), AsDiscrete(threshold=0.5)])
# create UNet, DiceLoss and Adam optimizer
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = monai.networks.nets.UNet(
spatial_dims=3,
in_channels=1,
out_channels=1,
channels=(16, 32, 64, 128, 256),
strides=(2, 2, 2, 2),
num_res_units=2,
).to(device)
loss_function = monai.losses.DiceLoss(sigmoid=True)
optimizer = torch.optim.Adam(model.parameters(), 1e-3)
# start a typical PyTorch training
val_interval = 2
best_metric = -1
best_metric_epoch = -1
epoch_loss_values = list()
metric_values = list()
writer = SummaryWriter()
for epoch in range(5):
print("-" * 10)
print(f"epoch {epoch + 1}/{5}")
model.train()
epoch_loss = 0
step = 0
for batch_data in train_loader:
step += 1
inputs, labels = batch_data[0].to(device), batch_data[1].to(device)
optimizer.zero_grad()
outputs = model(inputs)
loss = loss_function(outputs, labels)
loss.backward()
optimizer.step()
epoch_loss += loss.item()
epoch_len = len(train_ds) // train_loader.batch_size
print(f"{step}/{epoch_len}, train_loss: {loss.item():.4f}")
writer.add_scalar("train_loss", loss.item(), epoch_len * epoch + step)
epoch_loss /= step
epoch_loss_values.append(epoch_loss)
print(f"epoch {epoch + 1} average loss: {epoch_loss:.4f}")
if (epoch + 1) % val_interval == 0:
model.eval()
with torch.no_grad():
val_images = None
val_labels = None
val_outputs = None
for val_data in val_loader:
val_images, val_labels = val_data[0].to(device), val_data[1].to(device)
roi_size = (96, 96, 96)
sw_batch_size = 4
val_outputs = sliding_window_inference(val_images, roi_size, sw_batch_size, model)
val_outputs = [post_trans(i) for i in decollate_batch(val_outputs)]
# compute metric for current iteration
dice_metric(y_pred=val_outputs, y=val_labels)
# aggregate the final mean dice result
metric = dice_metric.aggregate().item()
# reset the status for next validation round
dice_metric.reset()
metric_values.append(metric)
if metric > best_metric:
best_metric = metric
best_metric_epoch = epoch + 1
torch.save(model.state_dict(), "best_metric_model_segmentation3d_array.pth")
print("saved new best metric model")
print(
"current epoch: {} current mean dice: {:.4f} best mean dice: {:.4f} at epoch {}".format(
epoch + 1, metric, best_metric, best_metric_epoch
)
)
writer.add_scalar("val_mean_dice", metric, epoch + 1)
# plot the last model output as GIF image in TensorBoard with the corresponding image and label
plot_2d_or_3d_image(val_images, epoch + 1, writer, index=0, tag="image")
plot_2d_or_3d_image(val_labels, epoch + 1, writer, index=0, tag="label")
plot_2d_or_3d_image(val_outputs, epoch + 1, writer, index=0, tag="output")
print(f"train completed, best_metric: {best_metric:.4f} at epoch: {best_metric_epoch}")
writer.close()
if __name__ == "__main__":
with tempfile.TemporaryDirectory() as tempdir:
main(tempdir)
|
the-stack_0_10458 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import re
import textwrap
from typing import (
Any, Callable, NamedTuple, Optional, Dict, Sequence, Set, Type, TypeVar
)
import warnings
from jax._src.config import config
from jax._src import dtypes
from jax._src.lax import lax as lax_internal
from jax._src.numpy.ndarray import ndarray
from jax._src.util import safe_zip
from jax._src import api
from jax import core
from jax._src.lax import lax
import numpy as np
_T = TypeVar("_T")
_parameter_break = re.compile("\n(?=[A-Za-z_])")
_section_break = re.compile(r"\n(?=[^\n]{3,15}\n-{3,15})", re.MULTILINE)
_numpy_signature_re = re.compile(r'^([\w., ]+=)?\s*[\w\.]+\([\w\W]*?\)$', re.MULTILINE)
_versionadded = re.compile(r'^\s+\.\.\s+versionadded::', re.MULTILINE)
_docreference = re.compile(r':doc:`(.*?)\s*<.*?>`')
class ParsedDoc(NamedTuple):
"""
docstr: full docstring
signature: signature from docstring.
summary: summary from docstring.
front_matter: front matter before sections.
sections: dictionary of section titles to section content.
"""
docstr: Optional[str]
signature: str = ""
summary: str = ""
front_matter: str = ""
sections: Dict[str, str] = {}
def _parse_numpydoc(docstr: Optional[str]) -> ParsedDoc:
"""Parse a standard numpy-style docstring.
Args:
docstr: the raw docstring from a function
Returns:
ParsedDoc: parsed version of the docstring
"""
if docstr is None or not docstr.strip():
return ParsedDoc(docstr)
# Remove any :doc: directives in the docstring to avoid sphinx errors
docstr = _docreference.sub(
lambda match: f"{match.groups()[0]}", docstr)
signature, body = "", docstr
match = _numpy_signature_re.match(body)
if match:
signature = match.group()
body = docstr[match.end():]
firstline, _, body = body.partition('\n')
body = textwrap.dedent(body.lstrip('\n'))
match = _numpy_signature_re.match(body)
if match:
signature = match.group()
body = body[match.end():]
summary = firstline
if not summary:
summary, _, body = body.lstrip('\n').partition('\n')
body = textwrap.dedent(body.lstrip('\n'))
front_matter = ""
body = "\n" + body
section_list = _section_break.split(body)
if not _section_break.match(section_list[0]):
front_matter, *section_list = section_list
sections = {section.split('\n', 1)[0]: section for section in section_list}
return ParsedDoc(docstr=docstr, signature=signature, summary=summary,
front_matter=front_matter, sections=sections)
def _parse_parameters(body: str) -> Dict[str, str]:
"""Parse the Parameters section of a docstring."""
title, underline, content = body.split('\n', 2)
assert title == 'Parameters'
assert underline and not underline.strip('-')
parameters = _parameter_break.split(content)
return {p.partition(' : ')[0].partition(', ')[0]: p for p in parameters}
def _parse_extra_params(extra_params: str) -> Dict[str, str]:
"""Parse the extra parameters passed to _wraps()"""
parameters = _parameter_break.split(extra_params.strip('\n'))
return {p.partition(' : ')[0].partition(', ')[0]: p for p in parameters}
def _wraps(
fun: Optional[Callable[..., Any]],
update_doc: bool = True,
lax_description: str = "",
sections: Sequence[str] = ('Parameters', 'Returns', 'References'),
skip_params: Sequence[str] = (),
extra_params: Optional[str] = None,
) -> Callable[[_T], _T]:
"""Specialized version of functools.wraps for wrapping numpy functions.
This produces a wrapped function with a modified docstring. In particular, if
`update_doc` is True, parameters listed in the wrapped function that are not
supported by the decorated function will be removed from the docstring. For
this reason, it is important that parameter names match those in the original
numpy function.
Args:
fun: The function being wrapped
update_doc: whether to transform the numpy docstring to remove references of
parameters that are supported by the numpy version but not the JAX version.
If False, include the numpy docstring verbatim.
lax_description: a string description that will be added to the beginning of
the docstring.
sections: a list of sections to include in the docstring. The default is
["Parameters", "returns", "References"]
skip_params: a list of strings containing names of parameters accepted by the
function that should be skipped in the parameter list.
extra_params: an optional string containing additional parameter descriptions.
When ``update_doc=True``, these will be added to the list of parameter
descriptions in the updated doc.
"""
def wrap(op):
docstr = getattr(fun, "__doc__", None)
try:
name = f"{fun.__module__}.{fun.__name__}"
except AttributeError:
name = getattr(fun, "__name__", getattr(op, "__name__", str(op)))
if docstr:
try:
parsed = _parse_numpydoc(docstr)
if update_doc and 'Parameters' in parsed.sections:
code = getattr(getattr(op, "__wrapped__", op), "__code__", None)
# Remove unrecognized parameter descriptions.
parameters = _parse_parameters(parsed.sections['Parameters'])
if extra_params:
parameters.update(_parse_extra_params(extra_params))
parsed.sections['Parameters'] = (
"Parameters\n"
"----------\n" +
"\n".join(_versionadded.split(desc)[0].rstrip() for p, desc in parameters.items()
if (code is None or p in code.co_varnames) and p not in skip_params)
)
docstr = parsed.summary.strip() + "\n" if parsed.summary else ""
docstr += f"\nLAX-backend implementation of :func:`{name}`.\n"
if lax_description:
docstr += "\n" + lax_description.strip() + "\n"
docstr += "\n*Original docstring below.*\n"
# We remove signatures from the docstrings, because they redundant at best and
# misleading at worst: e.g. JAX wrappers don't implement all ufunc keyword arguments.
# if parsed.signature:
# docstr += "\n" + parsed.signature.strip() + "\n"
if parsed.front_matter:
docstr += "\n" + parsed.front_matter.strip() + "\n"
kept_sections = (content.strip() for section, content in parsed.sections.items()
if section in sections)
if kept_sections:
docstr += "\n" + "\n\n".join(kept_sections) + "\n"
except:
if config.jax_enable_checks:
raise
docstr = fun.__doc__
op.__doc__ = docstr
op.__np_wrapped__ = fun
for attr in ['__name__', '__qualname__']:
try:
value = getattr(fun, attr)
except AttributeError:
pass
else:
setattr(op, attr, value)
return op
return wrap
_dtype = partial(dtypes.dtype, canonicalize=True)
def _asarray(arr):
"""
Pared-down utility to convert object to a DeviceArray.
Note this will not correctly handle lists or tuples.
"""
_check_arraylike("_asarray", arr)
dtype, weak_type = dtypes._lattice_result_type(arr)
return lax_internal._convert_element_type(arr, dtype, weak_type)
def _promote_shapes(fun_name, *args):
"""Apply NumPy-style broadcasting, making args shape-compatible for lax.py."""
if len(args) < 2:
return args
else:
shapes = [np.shape(arg) for arg in args]
if all(len(shapes[0]) == len(s) for s in shapes[1:]):
return args # no need for rank promotion, so rely on lax promotion
nonscalar_ranks = {len(shp) for shp in shapes if shp}
if len(nonscalar_ranks) < 2:
return args
else:
if config.jax_numpy_rank_promotion != "allow":
_rank_promotion_warning_or_error(fun_name, shapes)
if config.jax_dynamic_shapes:
# With dynamic shapes we don't support singleton-dimension broadcasting;
# we instead broadcast out to the full shape as a temporary workaround.
res_shape = lax.broadcast_shapes(*shapes)
return [_broadcast_to(arg, res_shape) for arg, shp in zip(args, shapes)]
else:
result_rank = len(lax.broadcast_shapes(*shapes))
return [_broadcast_to(arg, (1,) * (result_rank - len(shp)) + shp)
for arg, shp in zip(args, shapes)]
def _rank_promotion_warning_or_error(fun_name, shapes):
if config.jax_numpy_rank_promotion == "warn":
msg = ("Following NumPy automatic rank promotion for {} on shapes {}. "
"Set the jax_numpy_rank_promotion config option to 'allow' to "
"disable this warning; for more information, see "
"https://jax.readthedocs.io/en/latest/rank_promotion_warning.html.")
warnings.warn(msg.format(fun_name, ' '.join(map(str, shapes))))
elif config.jax_numpy_rank_promotion == "raise":
msg = ("Operands could not be broadcast together for {} on shapes {} "
"and with the config option jax_numpy_rank_promotion='raise'. "
"For more information, see "
"https://jax.readthedocs.io/en/latest/rank_promotion_warning.html.")
raise ValueError(msg.format(fun_name, ' '.join(map(str, shapes))))
def _promote_dtypes(*args):
"""Convenience function to apply Numpy argument dtype promotion."""
# TODO(dougalm,mattjj): This is a performance bottleneck. Consider memoizing.
if len(args) < 2:
return args
else:
to_dtype, weak_type = dtypes._lattice_result_type(*args)
to_dtype = dtypes.canonicalize_dtype(to_dtype)
return [lax_internal._convert_element_type(x, to_dtype, weak_type) for x in args]
def _promote_dtypes_inexact(*args):
"""Convenience function to apply Numpy argument dtype promotion.
Promotes arguments to an inexact type."""
to_dtype, weak_type = dtypes._lattice_result_type(*args)
to_dtype = dtypes.canonicalize_dtype(to_dtype)
to_dtype_inexact = _to_inexact_dtype(to_dtype)
weak_type = (weak_type and to_dtype == to_dtype_inexact)
return [lax_internal._convert_element_type(x, to_dtype_inexact, weak_type)
for x in args]
def _to_inexact_dtype(dtype):
"""Promotes a dtype into an inexact dtype, if it is not already one."""
return dtype if dtypes.issubdtype(dtype, np.inexact) else dtypes.promote_types(dtype, dtypes.float_)
def _complex_elem_type(dtype):
"""Returns the float type of the real/imaginary parts of a complex dtype."""
return np.abs(np.zeros((), dtype)).dtype
def _arraylike(x):
return (isinstance(x, np.ndarray) or isinstance(x, ndarray) or
hasattr(x, '__jax_array__') or np.isscalar(x))
def _stackable(*args):
return all(type(arg) in stackables for arg in args)
stackables: Set[Type] = set()
_register_stackable: Callable[[Type], None] = stackables.add
def _check_arraylike(fun_name, *args):
"""Check if all args fit JAX's definition of arraylike."""
assert isinstance(fun_name, str), f"fun_name must be a string. Got {fun_name}"
if any(not _arraylike(arg) for arg in args):
pos, arg = next((i, arg) for i, arg in enumerate(args)
if not _arraylike(arg))
msg = "{} requires ndarray or scalar arguments, got {} at position {}."
raise TypeError(msg.format(fun_name, type(arg), pos))
def _check_no_float0s(fun_name, *args):
"""Check if none of the args have dtype float0."""
if any(dtypes.dtype(arg) is dtypes.float0 for arg in args):
raise TypeError(
f"Called {fun_name} with a float0 array. "
"float0s do not support any operations by design because they "
"are not compatible with non-trivial vector spaces. No implicit dtype "
"conversion is done. You can use np.zeros_like(arr, dtype=np.float) "
"to cast a float0 array to a regular zeros array. \n"
"If you didn't expect to get a float0 you might have accidentally "
"taken a gradient with respect to an integer argument.")
def _promote_args(fun_name, *args):
"""Convenience function to apply Numpy argument shape and dtype promotion."""
_check_arraylike(fun_name, *args)
_check_no_float0s(fun_name, *args)
return _promote_shapes(fun_name, *_promote_dtypes(*args))
def _promote_args_inexact(fun_name, *args):
"""Convenience function to apply Numpy argument shape and dtype promotion.
Promotes non-inexact types to an inexact type."""
_check_arraylike(fun_name, *args)
_check_no_float0s(fun_name, *args)
return _promote_shapes(fun_name, *_promote_dtypes_inexact(*args))
@partial(api.jit, inline=True)
def _broadcast_arrays(*args):
"""Like Numpy's broadcast_arrays but doesn't return views."""
shapes = [np.shape(arg) for arg in args]
if not shapes or all(core.symbolic_equal_shape(shapes[0], s) for s in shapes):
# TODO(mattjj): remove the array(arg) here
return [arg if isinstance(arg, ndarray) or np.isscalar(arg) else _asarray(arg)
for arg in args]
result_shape = lax.broadcast_shapes(*shapes)
return [_broadcast_to(arg, result_shape) for arg in args]
def _broadcast_to(arr, shape):
if hasattr(arr, "broadcast_to"):
return arr.broadcast_to(shape)
_check_arraylike("broadcast_to", arr)
arr = arr if isinstance(arr, ndarray) else _asarray(arr)
if not isinstance(shape, tuple) and np.ndim(shape) == 0:
shape = (shape,)
shape = core.canonicalize_shape(shape) # check that shape is concrete
arr_shape = np.shape(arr)
if core.symbolic_equal_shape(arr_shape, shape):
return arr
else:
nlead = len(shape) - len(arr_shape)
shape_tail = shape[nlead:]
compatible = all(core.symbolic_equal_one_of_dim(arr_d, [1, shape_d])
for arr_d, shape_d in safe_zip(arr_shape, shape_tail))
if nlead < 0 or not compatible:
msg = "Incompatible shapes for broadcasting: {} and requested shape {}"
raise ValueError(msg.format(arr_shape, shape))
diff, = np.where(tuple(not core.symbolic_equal_dim(arr_d, shape_d)
for arr_d, shape_d in safe_zip(arr_shape, shape_tail)))
new_dims = tuple(range(nlead)) + tuple(nlead + diff)
kept_dims = tuple(np.delete(np.arange(len(shape)), new_dims))
return lax.broadcast_in_dim(lax.squeeze(arr, tuple(diff)), shape, kept_dims)
# The `jit` on `where` exists to avoid materializing constants in cases like
# `np.where(np.zeros(1000), 7, 4)`. In op-by-op mode, we don't want to
# materialize the broadcast forms of scalar arguments.
@api.jit
def _where(condition, x=None, y=None):
if x is None or y is None:
raise ValueError("Either both or neither of the x and y arguments should "
"be provided to jax.numpy.where, got {} and {}."
.format(x, y))
if not np.issubdtype(_dtype(condition), np.bool_):
condition = lax.ne(condition, lax_internal._zero(condition))
x, y = _promote_dtypes(x, y)
condition, x, y = _broadcast_arrays(condition, x, y)
try: is_always_empty = core.is_empty_shape(np.shape(x))
except: is_always_empty = False # can fail with dynamic shapes
return lax.select(condition, x, y) if not is_always_empty else x
|
the-stack_0_10460 | __author__ = 'Sergei'
from model.contact import Contact
def test_contact_new(app):
old_contact = app.contact.get_contact_list()
contacts = Contact(first_n="first", mid_n="middle",last_n="last",nick_n= "kuk",company= "adda",address= "575 oiweojdckjgsd,russia",home_ph= "12134519827",cell_ph= "120092340980",email="[email protected]")
app.contact.create_c(contacts)
assert len(old_contact)+1 == app.contact.count_first()
new_contact = app.contact.get_contact_list()
old_contact.append(contacts)
assert sorted(old_contact, key=Contact.id_or_max) == sorted(new_contact, key=Contact.id_or_max)
|
the-stack_0_10462 | """
===============================================
vidgear library source-code is deployed under the Apache 2.0 License:
Copyright (c) 2019-2020 Abhishek Thakur(@abhiTronix) <[email protected]>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
===============================================
"""
# import the necessary packages
import os
import sys
import pytest
import platform
import logging as log
import tempfile
from vidgear.gears import VideoGear
from vidgear.gears.helper import logger_handler
# define test logger
logger = log.getLogger("Test_videogear")
logger.propagate = False
logger.addHandler(logger_handler())
logger.setLevel(log.DEBUG)
# define machine os
_windows = True if os.name == "nt" else False
def return_testvideo_path():
"""
returns Test video path
"""
path = "{}/Downloads/Test_videos/BigBuckBunny_4sec.mp4".format(
tempfile.gettempdir()
)
return os.path.abspath(path)
@pytest.mark.skipif((platform.system() != "Linux"), reason="Not Implemented")
def test_PiGear_import():
"""
Testing VideoGear Import -> assign to fail when PiGear class is imported
"""
# cleanup environment
try:
del sys.modules["picamera"]
del sys.modules["picamera.array"]
except KeyError:
pass
try:
stream = VideoGear(enablePiCamera=True, logging=True).start()
stream.stop()
except Exception as e:
if isinstance(e, ImportError):
pytest.xfail(str(e))
else:
pytest.fail(str(e))
# Video credit: http://www.liushuaicheng.org/CVPR2014/index.html
test_data = [
(
"https://raw.githubusercontent.com/abhiTronix/Imbakup/master/Images/example4_train_input.mp4",
{
"SMOOTHING_RADIUS": 5,
"BORDER_SIZE": 10,
"BORDER_TYPE": "replicate",
"CROP_N_ZOOM": True,
},
),
(
"https://raw.githubusercontent.com/abhiTronix/Imbakup/master/Images/example_empty_train_input.mp4",
{
"SMOOTHING_RADIUS": 5,
"BORDER_SIZE": 15,
"BORDER_TYPE": "reflect",
},
),
(
"https://raw.githubusercontent.com/abhiTronix/Imbakup/master/Images/example4_train_input.mp4",
{
"SMOOTHING_RADIUS": "5",
"BORDER_SIZE": "15",
"BORDER_TYPE": ["reflect"],
"CROP_N_ZOOM": "yes",
},
),
(return_testvideo_path(), {"BORDER_TYPE": "im_wrong"}),
]
@pytest.mark.parametrize("source, options", test_data)
def test_video_stablization(source, options):
"""
Testing VideoGear's Video Stablization playback capabilities
"""
try:
# open stream
stab_stream = VideoGear(
source=source, stabilize=True, logging=True, **options
).start()
framerate = stab_stream.framerate
# playback
while True:
frame = stab_stream.read() # read stablized frames
if frame is None:
break
# clean resources
stab_stream.stop()
logger.debug("Input Framerate: {}".format(framerate))
assert framerate > 0
except Exception as e:
pytest.fail(str(e))
|
the-stack_0_10465 | import unittest
from pathlib import Path
from taskcat import Config
from taskcat.testing._unit_test import UnitTest
from taskcat.testing.base_test import BaseTest
class TestUnitTest(unittest.TestCase):
BaseTest.__abstractmethods__ = set()
@classmethod
def setUpClass(cls):
input_file = ".taskcat.yml"
project_root_path = Path(__file__).parent / "../data/nested-fail"
input_file_path = project_root_path / input_file
cls.base_config = Config.create(
project_root=project_root_path, project_config_path=input_file_path,
)
def test_methods(self):
test = UnitTest(self.base_config)
with self.assertRaises(NotImplementedError):
test.run()
with self.assertRaises(NotImplementedError):
test.clean_up()
def test_inheritance(self):
test = UnitTest(self.base_config)
self.assertIsInstance(test, BaseTest)
|
the-stack_0_10466 | """
This file offers the methods to automatically retrieve the graph Eubacterium sp. AB3007.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def EubacteriumSpAb3007(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Eubacterium sp. AB3007 graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.0
- homology.v11.5
- physical.links.v11.0
- physical.links.v11.5
- links.v11.0
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Eubacterium sp. AB3007 graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="EubacteriumSpAb3007",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
|
the-stack_0_10467 | #! /usr/bin/env python
try:
from io import StringIO
import builtins
except ImportError: # Python 3
from io import StringIO
import builtins as __builtin__
import glob
import os
import shutil
import sys
sys.path.append(os.path.dirname(sys.path[0]))
import AUTOutil
import bifDiag
import parseB
import parseBandS
import parseC
import parseH
import parseS
SIMPLE=0
EXPERT=1
import AUTOExceptions
#############################################
# commands
#############################################
def command(f,*args,**kw):
# This is a class factory that produces a class that can be used
# to make macros of commands.
class cmd(object):
if len(args) == 2:
type = args[0]
shortName = args[1]
alias = kw.get("alias",[])
fun = staticmethod(f)
__doc__ = f.__doc__
def __init__(self,*args,**kw):
self.args = args
self.kw = kw
# The call function must return something that you
# can call the "print" function on
def __call__(self):
return self.fun(*self.args,**self.kw)
def undo(self):
raise Exception("Undo undefined for this command")
return cmd
##############################################
# Generic Commands
##############################################
def macro(command_list):
for command in command_list:
command()
commandMacro = command(macro)
# info messages: override this function or sys.stdout to redirect
def info(s):
sys.stdout.write(s)
# interact with a .exe file
def interact(command,*args):
if not os.path.exists(command):
command = command + '.exe'
fullcmd = " ".join([command]+list(args))
if os.spawnv(os.P_WAIT,command, (os.path.basename(command),) + args) != 0:
raise AUTOExceptions.AUTORuntimeError("Error running %s" % fullcmd)
info("Finished running: " + fullcmd + "\n")
##############################################
# Script based commands from $AUTO_DIR/97/cmds
##############################################
def clean():
"""Clean the current directory.
Type FUNC() to clean the current directory. This command will
delete all files of the form fort.*, *.*~, *.o, and *.exe.
"""
toclean = (glob.glob("fort.*") + glob.glob("*.o") + glob.glob("*.exe")+
glob.glob("*.*~"))
# remove duplicates
files = []
for f in toclean:
if f not in files:
files.append(f)
for f in files:
os.remove(f)
info("Deleting fort.* *.o *.exe *.*~ ... done\n")
commandClean = command(clean,alias=['cl'])
def copydemo(name):
"""Copy a demo into the current directory.
Type FUNC('xxx') to copy all files from auto/07p/demos/xxx to the
current user directory. Here 'xxx' denotes a demo name; e.g.,
'abc'. To avoid the overwriting of existing
files, always run demos in a clean work directory.
"""
demodir = os.path.join(os.environ["AUTO_DIR"],"demos",name)
for f in glob.glob(os.path.join(demodir,"*")):
if os.path.isdir(f):
subdir = f[len(demodir)+len(os.sep):]
try:
os.remove(subdir)
except OSError:
pass
try:
os.mkdir(subdir)
except OSError:
pass
for f2 in glob.glob(os.path.join(f,"*")):
try:
shutil.copy(f2, subdir)
except IOError:
pass
try:
shutil.copy(f, ".")
except IOError:
pass
if (os.path.exists(os.path.join(demodir,"c.%s.1"%name)) and
not os.path.exists(os.path.join(demodir,"c.%s"%name))):
shutil.copy("c.%s.1"%name,"c.%s"%name)
info("Copying demo %s ... done\n"%name)
commandCopyDemo = command(copydemo,SIMPLE,"demo")
def demo(name,runner=None):
"""Copy a demo into the current directory and load it.
Type FUNC('xxx') to copy all files from auto/07p/demos/xxx to the
current user directory. Here 'xxx' denotes a demo name; e.g.,
'abc'. To avoid the overwriting of existing
files, always run demos in a clean work directory. NOTE: This
command automatically performs the load command as well.
"""
runner = withrunner(runner)
lst = [commandCopyDemo(name)]
slash = name.rfind("/")
if slash != -1:
name = name[slash+1:]
lst.append(commandRunnerLoadName(name,runner))
return macro(lst)
commandCopyAndLoadDemo = command(demo,alias=['dm'])
def df():
"""Clear the current directory of fort files.
Type FUNC() to clean the current directory. This command will
delete all files of the form fort.*.
"""
toclean = glob.glob("fort.*")
for f in toclean:
os.remove(f)
info("Deleting fort.* ... done\n")
commandDeleteFortFiles = command(df,alias=['deletefort'])
def us(name,templates=None):
"""Convert user-supplied data files.
Type FUNC('xxx') to convert a user-supplied data file 'xxx.dat' to
AUTO format. The converted file is called 's.dat'. The original
file is left unchanged. AUTO automatically sets the period in
PAR(11). Other parameter values must be set in 'STPNT'. (When
necessary, PAR(11) may also be redefined there.) The
constants-file file 'c.xxx' must be present, as the AUTO-constants
'NTST' and 'NCOL' are used to define the new mesh. For examples
of using the 'userData' command see demos 'lor' and 'pen' (where
it has the old name 'fc').
Note: this technique has been obsoleted by the 'dat' AUTO constant.
"""
info("Starting conversion of %s.dat : \n"%name)
if glob.glob("%s.f90"%name) == []:
if glob.glob("%s.f"%name) == []:
equation_file="%s.c"%name
else:
equation_file="%s.f"%name
else:
equation_file="%s.f90"%name
cfile = applyTemplate(name,"constants",templates)
datfile = "%s.dat"%name
info("(Required files : %s, %s, %s)\n"%(equation_file,cfile,
datfile))
from . import runAUTO
fconrun = runAUTO.runAUTO(makefile="$AUTO_DIR/cmds/cmds.make fcon")
fconrun.config(e=name)
fconrun.runMakefile(name)
if os.path.exists(cfile):
shutil.copy(cfile,"fort.2")
if os.path.exists(datfile):
shutil.copy(datfile,"fort.3")
interact("./fcon")
sfile = applyTemplate("dat","solution",templates)
if os.path.exists("fort.8"):
if os.path.exists(sfile):
os.remove(sfile)
os.rename("fort.8",sfile)
info("Conversion done : converted file saved as %s\n"%sfile)
files = glob.glob("fcon*") + ["fort.2", "fort.3"]
for f in files:
os.remove(f)
commandUserData = command(us,alias=['userdata'])
##############################################
# Commands which use the filename templates
##############################################
def applyTemplate(text,template,templates=None):
if templates is None:
templates = {}
templates["equation"] = "EQUATION_NAME=%s"
templates["constants"] = "c.%s"
templates["bifurcationDiagram"] = "b.%s"
templates["solution"] = "s.%s"
templates["diagnostics"] = "d.%s"
templates["homcont"] = "h.%s"
if text is None:
return None
elif type(text) in [type(""), type(1), type(1.0)]:
rval = templates[template]%text
tmp = glob.glob(rval)
if len(tmp) > 0:
rval = ""
for x in tmp:
rval = rval + x + " "
rval = rval.strip()
return rval
else:
return text
def filenameTemplate(name=None,templates=None):
name1={}
name1["constants"] = applyTemplate(name,"constants",templates)
name1["bifurcationDiagram"] = applyTemplate(name,"bifurcationDiagram",templates)
name1["solution"] = applyTemplate(name,"solution",templates)
name1["diagnostics"] = applyTemplate(name,"diagnostics",templates)
return name1
def relabel(name1=None,name2=None,templates=None):
"""Relabel data files.
Type y=FUNC(x) to return the python object x, with the solution
labels sequentially relabelled starting at 1, as a new object y.
Type FUNC('xxx') to relabel s.xxx and b.xxx. Backups of the
original files are saved.
Type FUNC('xxx','yyy') to relabel the existing data-files s.xxx and b.xxx,
and save them to s.yyy and b.yyy; d.xxx is copied to d.yyy.
"""
typen = type(name1)
if type(name1) == type(""):
name1 = filenameTemplate(name1,templates)
name2 = filenameTemplate(name2,templates)
if typen != type("") and typen != type(None):
data = name1.relabel()
info("Relabeling done\n")
return data
n1b = name1["bifurcationDiagram"]
n1s = name1["solution"]
n1d = name1["diagnostics"]
if n1b is None and n1s is None and n1d is None:
n1b, n1s, n1d = "fort.7", "fort.8", "fort.9"
if name2["bifurcationDiagram"] is None:
n2b = n1b+'~~'
n2s = n1s+'~~'
n2d = n1d+'~~'
else:
n2b = name2["bifurcationDiagram"]
n2s = name2["solution"]
n2d = name2["diagnostics"]
from . import relabel
relabel.relabel(n1b,n1s,n2b,n2s)
if os.access(n2b,os.F_OK):
if name2["bifurcationDiagram"] is None:
# Save backups
if os.access(n1b+'~',os.F_OK):
os.remove(n1b+'~')
os.rename(n1b,n1b+'~')
os.rename(n2b,n1b)
if os.access(n1s+'~',os.F_OK):
os.remove(n1s+'~')
os.rename(n1s,n1s+'~')
os.rename(n2s,n1s)
elif os.path.exists(n1d):
shutil.copy(n1d, n2d)
info("Relabeling succeeded\n")
info("Relabeling done\n")
commandRelabel = command(relabel,SIMPLE,"relabel",alias=['rl'])
def merge(name1=None,name2=None,templates=None):
"""Merge branches in data files.
Type y=FUNC(x) to return the python object x, with its branches
merged into continuous curves, as a new object y.
Type FUNC('xxx') to merge branches in s.xxx, b.xxx, and d.xxx. Backups
of the original files are saved.
Type FUNC('xxx','yyy') to merge branches in the existing data-files
s.xxx, b.xxx, and d.xxx and save them to s.yyy, b.yyy, and d.yyy.
"""
ntype = type(name1)
if type(name1) == type(""):
name1 = filenameTemplate(name1,templates)
name2 = filenameTemplate(name2,templates)
if ntype != type("") and ntype != type(None):
data = name1.merge()
info("Merge done\n")
return data
n1b = name1["bifurcationDiagram"]
n1s = name1["solution"]
n1d = name1["diagnostics"]
if n1b is None and n1s is None and n1d is None:
n1b, n1s, n1d = "fort.7", "fort.8", "fort.9"
bd = bifDiag.bifDiag(n1b,n1s,n1d)
bd = bd.merge()
if name2["bifurcationDiagram"] is None:
n2b = n1b+'~~'
n2s = n1s+'~~'
n2d = n1d+'~~'
else:
n2b = name2["bifurcationDiagram"]
n2s = name2["solution"]
n2d = name2["diagnostics"]
bd.writeFilename(n2b,n2s,n2d)
if os.access(n2b,os.F_OK):
if name2["bifurcationDiagram"] is None:
# Save backups
for [n1,n2] in [[n1b,n2b],[n1s,n2s],[n1d,n2d]]:
if os.access(n1+'~',os.F_OK):
os.remove(n1+'~')
os.rename(n1,n1+'~')
os.rename(n2,n1)
info("Merging succeeded\n")
info("Merging done\n")
commandMergeBranches = command(merge,SIMPLE,"merge",alias=['mb'])
def subtract(name1,name2,col,branch=1,point=1,templates=None):
"""Subtract branches in data files.
Type z=FUNC(x,y,ref) to return the python object x, where,
using interpolation, the first branch in y is subtracted from all
branches in x, as a new object z.
Use 'ref' (e.g., 'PAR(1)') as the reference column in y
(only the first monotonically increasing or decreasing part is used).
Type FUNC('xxx','yyy','ref') to subtract, using interpolation, the first
branch in b.yyy from all branches in b.xxx, and save the result in b.xxx.
A Backup of the original file is saved.
Use optional arguments branch=m, and point=n, to denote the branch and
first point on that branch within y or 'b.yyy', where m,n are in
{1,2,3,...}.
"""
ntype = type(name1)
if type(name1) == type(""):
name1 = filenameTemplate(name1,templates)
name2 = filenameTemplate(name2,templates)
if ntype != type(""):
sub = name1.subtract(name2[branch-1],col,point)
info("Subtracting done\n")
return sub
else:
n1b = name1["bifurcationDiagram"]
bd1 = bifDiag.bifDiag(n1b)
n2b = name2["bifurcationDiagram"]
if n1b == n2b:
bd2 = bd1
else:
bd2 = bifDiag.bifDiag(n2b)
sub = bd1.subtract(bd2[branch-1],col,point)
shutil.copy(n1b,n1b+'~')
sub.writeFilename(n1b,'')
info("Subtracting done\n")
commandSubtractBranches = command(subtract,SIMPLE,"subtract",alias=['sb'])
def append(name1,name2=None,templates=None):
"""Append data files.
Type FUNC(x,'xxx') to append bifurcation diagram x
to the data-files b.xxx, s.xxx, and d.xxx. This is equivalent to
the command
save(x+load('xxx'),'xxx')
Type FUNC('xxx',x) to append existing data-files s.xxx, b.xxx,
and d.xxx to bifurcation diagram x. This is equivalent to
the command
x=load('xxx')+x
Type FUNC('xxx') to append the output-files fort.7, fort.8,
fort.9, to existing data-files s.xxx, b.xxx, and d.xxx.
Type FUNC('xxx','yyy') to append existing data-files s.xxx, b.xxx,
and d.xxx to data-files s.yyy, b.yyy, and d.yyy.
"""
parsed1=None
parsed2=None
if isinstance(name1, bifDiag.bifDiag):
parsed1=name1
name1=name2
name2=None
if isinstance(name1, bifDiag.bifDiag):
parsed2=name1
else:
name1 = filenameTemplate(name1,templates)
name2 = filenameTemplate(name2,templates)
if parsed1 or parsed2:
n = None
if not parsed1 or not parsed2:
nb = name1["bifurcationDiagram"]
ns = name1["solution"]
nd = name1["diagnostics"]
if parsed2: #append to parsed2
if not parsed1:
parsed1 = bifDiag.bifDiag(nb,ns,nd)
info("Appending from %s, %s and %s ... done\n"%(nb,ns,nd))
parsed2.extend(parsed1)
return
if parsed1: #append from parsed1 to file
parsed1.writeFilename(nb,ns,nd,append=True)
info("Appending to %s, %s and %s ... done\n"%(nb,ns,nd))
return
i = 7
for s in ["bifurcationDiagram","solution","diagnostics"]:
n1 = name1[s]
n2 = name2[s]
if n2 is None:
n2 = n1
n1 = "fort."+str(i)
i = i+1
try:
f1 = open(n1,"rb")
f2 = open(n2,"ab")
while 1:
buf = f1.read(1024*1024)
if len(buf) == 0:
break
f2.write(buf)
f1.close()
f2.close()
info("Appending %s to %s ... done\n"%(n1,n2))
except IOError:
info("Appending %s to %s: %s\n"%(n1,n2,sys.exc_info()[1]))
commandAppend = command(append,SIMPLE,"append",alias=['ap'])
def dirfilenames(name1,name2,name3,name4):
"""Convert arguments to directories and names for copy() and move()"""
dir1 = ""
dir2 = ""
if os.path.isdir(name1):
dir1 = name1
name1 = name2
if name4 is not None:
dir2 = name3
name2 = name4
elif name3 is not None:
name2 = name3
elif os.path.isdir(name2):
dir2 = name2
if name3 is not None:
name2 = name3
else:
name2 = name1
return dir1,name1,dir2,name2
def copy(name1,name2,name3=None,name4=None,templates=None):
"""Copy data files.
Type FUNC(name1,name2) or
FUNC(name1,name2,name3) or
FUNC(name1,name2,name3,name4).
Copy the data-files dir1/c.xxx, dir1/b.xxx, dir1/s.xxx, and dir1/d.xxx
to dir2/c.yyy, dir2/b.yyy, dir2/s.yyy, and dir2/d.yyy.
The values of dir1/?.xxx and dir2/?.yyy are as follows, depending on
whether name1 is a directory or name2 is a directory:
FUNC(name1,name2)
no directory names: ./?.name1 and ./?.name2
name1 is a directory: name1/?.name2 and ./?.name2
name2 is a directory: ./?.name1 and name2/?.name1
FUNC(name1,name2,name3)
name1 is a directory: name1/?.name2 and ./?.name3
name2 is a directory: ./?.name1 and name2/?.name3
FUNC(name1,name2,name3,name4)
name1/?.name2 and name3/?.name4
"""
dir1, name1, dir2, name2 = dirfilenames(name1,name2,name3,name4)
names1 = filenameTemplate(name1,templates)
names2 = filenameTemplate(name2,templates)
done = False
for s in ["bifurcationDiagram","solution","diagnostics","constants"]:
n1 = os.path.join(dir1,names1[s])
n2 = os.path.join(dir2,names2[s])
if os.path.exists(n1):
shutil.copy(n1,n2)
info("Copying %s to %s ... done\n"%(n1,n2))
done = True
if not done:
raise AUTOExceptions.AUTORuntimeError(
"Copying: no files found for %s and %s"%(
os.path.join(dir1,"[bsdc]."+name1),
os.path.join(dir2,"[bsdc]."+name2)))
commandCopyDataFiles = command(copy,alias=['cp'])
def save(name1,name2=None,templates=None):
"""Save data files.
Type FUNC(x,'xxx') to save bifurcation diagram x
to the files b.xxx, s.xxx, d.xxx.
Existing files with these names will be overwritten.
If x is a solution, a list of solutions, or does not contain any
bifurcation diagram or diagnostics data, then only the file s.xxx
is saved to.
Type FUNC('xxx') to save the output-files fort.7, fort.8, fort.9,
to b.xxx, s.xxx, d.xxx. Existing files with these names will be
overwritten.
"""
parsed = None
if not name2 is None:
parsed = name1
name1 = name2
name1 = filenameTemplate(name1,templates)
for s in ["bifurcationDiagram","solution","diagnostics"]:
n1 = name1[s]
if os.path.exists(n1):
shutil.copy(n1,n1+'~')
if parsed:
n1b = name1["bifurcationDiagram"]
n1s = name1["solution"]
n1d = name1["diagnostics"]
if (type(parsed) == type([]) and
isinstance(parsed[0], parseB.AUTOBranch)):
parsed = bifDiag.bifDiag(parsed)
if (isinstance(parsed,bifDiag.bifDiag) and
len(parsed) > 0 and len(parsed[0]) > 0):
parsed.writeFilename(n1b,n1s,n1d)
msg = "Saving to %s and %s ... done\n"%(n1b,n1s)
for d in parsed:
if hasattr(d,"diagnostics"):
msg = "Saving to %s, %s, and %s ... done\n"%(n1b,n1s,n1d)
break
else:
if (type(parsed) == type([]) and
isinstance(parsed[0], parseS.AUTOSolution)):
parsed = parseS.parseS(parsed)
parsed.writeFilename(n1s)
msg = "Saving to %s ... done\n"%(n1s)
info(msg)
return
i = 7
for s in ["bifurcationDiagram","solution","diagnostics"]:
n1 = name1[s]
forti = "fort." + str(i)
i = i + 1
if os.path.exists(forti):
shutil.copy(forti,n1)
info("Saving %s as %s ... done\n"%(forti,n1))
commandCopyFortFiles = command(save,SIMPLE,"save",alias=['sv'])
def delete(name,templates=None):
"""Delete data files.
Type FUNC('xxx') to delete the data-files d.xxx, b.xxx, and s.xxx.
"""
name = filenameTemplate(name,templates)
n1b = name["bifurcationDiagram"]
n1s = name["solution"]
n1d = name["diagnostics"]
if os.path.exists(n1b):
os.remove(n1b)
info("Deleting %s ... done\n"%n1b)
if os.path.exists(n1s):
os.remove(n1s)
info("Deleting %s ... done\n"%n1s)
if os.path.exists(n1d):
os.remove(n1d)
info("Deleting %s ... done\n"%n1d)
commandDeleteDataFiles = command(delete,alias=['dl'])
def deleteLabel(codes=None,name1=None,name2=None,templates=None,
keepTY=0,keep=0):
if hasattr(codes,'deleteLabel'):
origlen=len(codes())
new = codes.deleteLabel(name1,keepTY=keepTY,keep=keep,copy=1)
newlen=len(new())
info("Deleted %d labels, and kept %d.\n"%(origlen-newlen, newlen))
return new
name1 = filenameTemplate(name1,templates)
if name1["solution"] is None:
changedb='fort.7'
changeds='fort.8'
else:
changedb=name1["bifurcationDiagram"]
changeds=name1["solution"]
bs=bifDiag.bifDiag(changedb,changeds)
origlen=len(bs())
bs.deleteLabel(codes,keepTY=keepTY,keep=keep)
newlen=len(bs())
if name2 is None:
origb=changedb+'~'
origs=changeds+'~'
try:
os.remove(origb)
except:
pass
try:
os.remove(origs)
except:
pass
os.rename(changedb,origb)
os.rename(changeds,origs)
bs.writeFilename(changedb,changeds)
else:
name2 = filenameTemplate(name2,templates)
bs.writeFilename(name2["bifurcationDiagram"],name2["solution"])
info("Deleted %d labels, and kept %d.\n"%(origlen-newlen, newlen))
def dsp(typenames=None,name1=None,name2=None,templates=None):
"""Delete special points.
Type FUNC(x,list) to delete the special points in list from
the Python object x, which must be a solution list or a bifurcation diagram.
Type FUNC(list,'xxx') to delete from the data-files b.xxx, and s.xxx.
Type FUNC(list,'xxx','yyy') to save to b.yyy and s.yyy instead of ?.xxx.
Type FUNC(list) to delete from fort.7 and fort.8.
list is a label number or type name code, or a list of those,
such as 1, or [2,3], or 'UZ' or ['BP','LP'], or it can be None or
omitted to mean the special points ['BP','LP','HB','PD','TR','EP','MX']
Alternatively a boolean user-defined function f that takes a solution
can be specified for list, such as
def f(s):
return s["PAR(9)"]<0
where all solutions are deleted that satisfy the given condition, or
def f(s1,s2):
return abs(s1["L2-NORM"] - s2["L2-NORM"]) < 1e-4
where all solutions are compared with each other and s2 is deleted if
the given condition is satisfied, which causes pruning of solutions
that are close to each other.
Type information is NOT kept in the bifurcation diagram.
"""
return deleteLabel(typenames,name1,name2,templates)
commandDeleteSpecialPoints = command(dsp)
def ksp(typenames=None,name1=None,name2=None,templates=None):
"""Keep special points.
Type FUNC(x,list) to only keep the special points in list in
the Python object x, which must be a solution list or a bifurcation diagram.
Type FUNC(list,'xxx') to keep them in the data-files b.xxx and s.xxx.
Type FUNC(list,'xxx','yyy') to save to b.yyy and s.yyy instead of ?.xxx.
Type FUNC(list) to keep them in fort.7 and fort.8.
list is a label number or type name code, or a list of those,
such as 1, or [2,3], or 'UZ' or ['BP','LP'], or it can be None or
omitted to mean ['BP','LP','HB','PD','TR','EP','MX'], deleting 'UZ' and
regular points.
Alternatively a boolean user-defined function f that takes a solution
can be specified for list, such as
def f(s):
return s["PAR(9)"]<0
where only solutions are kept that satisfy the given condition.
Type information is NOT kept in the bifurcation diagram.
"""
return deleteLabel(typenames,name1,name2,templates,keep=1)
commandKeepSpecialPoints = command(ksp)
def dlb(typenames=None,name1=None,name2=None,templates=None):
"""Delete special labels.
Type FUNC(x,list) to delete the special points in list from
the Python object x, which must be a solution list or a bifurcation diagram.
Type FUNC(list,'xxx') to delete from the data-files b.xxx and s.xxx.
Type FUNC(list,'xxx','yyy') to save to b.yyy and s.yyy instead of ?.xxx.
Type FUNC(list) to delete from fort.7 and fort.8.
list is a label number or type name code, or a list of those,
such as 1, or [2,3], or 'UZ' or ['BP','LP'], or it can be None or
omitted to mean the special points ['BP','LP','HB','PD','TR','EP','MX']
Alternatively a boolean user-defined function f that takes a solution
can be specified for list, such as
def f(s):
return s["PAR(9)"] < 0
where all solutions are deleted that satisfy the given condition, or
def f(s1,s2):
return abs(s1["L2-NORM"] - s2["L2-NORM"]) < 1e-4
where all solutions are compared with each other and s2 is deleted if
the given condition is satisfied, which causes pruning of solutions
that are close to each other.
Type information is kept in the bifurcation diagram for plotting.
"""
return deleteLabel(typenames,name1,name2,templates,keepTY=1)
commandDeleteLabels = command(dlb)
def klb(typenames=None,name1=None,name2=None,templates=None):
"""Keep special labels.
Type FUNC(x,list) to only keep the special points in list in
the Python object x, which must be a solution list or a bifurcation diagram.
Type FUNC(list,'xxx') to keep them in the data-files b.xxx and s.xxx.
Type FUNC(list,'xxx','yyy') to save to b.yyy and s.yyy instead of ?.xxx.
Type FUNC(list) to keep them in fort.7 and fort.8.
list is a label number or type name code, or a list of those,
such as 1, or [2,3], or 'UZ' or ['BP','LP'], or it can be None or
omitted to mean ['BP','LP','HB','PD','TR','EP','MX'], deleting 'UZ' and
regular points.
Alternatively a boolean user-defined function f that takes a solution
can be specified for list, such as
def f(s):
return s["PAR(9)"]<0
where only solutions are kept that satisfy the given condition.
Type information is kept in the bifurcation diagram for plotting.
"""
return deleteLabel(typenames,name1,name2,templates,keepTY=1,keep=1)
commandKeepLabels = command(klb)
def expandData(cmd,name=None,templates=None):
name = filenameTemplate(name,templates)
n1b = name["bifurcationDiagram"]
n1s = name["solution"]
if n1s is None:
n1s = "fort.8"
n1b = "fort.7"
if os.path.exists(n1b):
shutil.copy(n1b,n1b+'~')
if os.path.exists(n1s):
shutil.copy(n1s,"fort.28")
if os.path.exists(n1s+'~'):
os.remove(n1s+'~')
os.rename(n1s,n1s+'~')
interact(os.path.expandvars("$AUTO_DIR/bin/%s"%cmd))
os.rename("fort.38",n1s)
if os.path.exists("fort.28"):
os.remove("fort.28")
if cmd == "double":
info("Solution doubling done.\n")
else:
info("Solution tripling done.\n")
def double(name=None,templates=None):
"""Double a solution.
Type FUNC() to double the solution in 'fort.7' and 'fort.8'.
Type FUNC('xxx') to double the solution in b.xxx and s.xxx.
"""
expandData("double",name,templates)
commandDouble = command(double,alias=['db'])
def move(name1,name2,name3=None,name4=None,templates=None):
"""Move data-files to a new name.
Type FUNC(name1,name2) or
FUNC(name1,name2,name3) or
FUNC(name1,name2,name3,name4)
Move the data-files dir1/b.xxx, dir1/s.xxx, and dir1/d.xxx,
to dir2/b.yyy, dir2/s.yyy, and dir2/d.yyy, and copy the constants
file dir1/c.xxx to dir2/c.yyy.
The values of dir1/?.xxx and dir2/?.yyy are as follows, depending on
whether name1 is a directory or name2 is a directory:
FUNC(name1,name2)
no directory names: ./?.name1 and ./?.name2
name1 is a directory: name1/?.name2 and ./?.name2
name2 is a directory: ./?.name1 and name2/?.name1
FUNC(name1,name2,name3)
name1 is a directory: name1/?.name2 and ./?.name3
name2 is a directory: ./?.name1 and name2/?.name3
FUNC(name1,name2,name3,name4)
name1/?.name2 and name3/?.name4
"""
dir1, name1, dir2, name2 = dirfilenames(name1,name2,name3,name4)
names1 = filenameTemplate(name1,templates)
names2 = filenameTemplate(name2,templates)
done = False
for s in ["bifurcationDiagram","solution","diagnostics","constants"]:
n1 = os.path.join(dir1,names1[s])
n2 = os.path.join(dir2,names2[s])
if s == "constants":
try:
shutil.copy(n1,n2)
info("Copying %s to %s ... done\n"%(n1,n2))
done = True
except IOError:
pass
continue
if os.path.exists(n1):
if os.path.exists(n2):
os.remove(n2)
os.rename(n1,n2)
info("Renaming %s as %s ... done\n"%(n1,n2))
done = True
if not done:
raise AUTOExceptions.AUTORuntimeError(
"Renaming: no files found for %s and %s"%(
os.path.join(dir1,"[bsdc]."+name1),
os.path.join(dir2,"[bsdc]."+name2)))
commandMoveFiles = command(move,alias=['mv'])
def cn(name,templates=None):
"""Get the current continuation constants.
Type FUNC('xxx') to get a parsed version of the constants file
c.xxx.
This is equivalent to the command
loadbd('xxx').c
"""
name = filenameTemplate(name,templates)
data = parseC.parseC(name["constants"])
info("Parsed file: %s\n"%name["constants"])
return data
commandParseConstantsFile = command(cn,alias=['constantsget'])
def hcn(name,templates=None):
"""Get the current HomCont continuation constants.
Type FUNC('xxx') to get a parsed version of the HomCont file
h.xxx.
"""
name = filenameTemplate(name,templates)
data = parseH.parseH(name["homcont"])
info("Parsed file: %s\n"%name["homcont"])
return data
commandParseHomcontFile = command(hcn)
def sl(name=None,templates=None):
"""Parse solution file:
Type FUNC('xxx') to get a parsed version of the solution file
s.xxx.
This is equivalent to the command
loadbd('xxx')()
"""
name = filenameTemplate(name,templates)
n1s = name["solution"] or "fort.8"
try:
data = parseS.parseS(n1s)
except IOError:
raise AUTOExceptions.AUTORuntimeError(sys.exc_info()[1])
if isinstance(n1s, str):
info("Parsed file: %s\n"%n1s)
return data
commandParseSolutionFile = command(sl,alias=['solutionget'])
def dg(name=None,templates=None):
"""Parse a bifurcation diagram.
Type FUNC('xxx') to get a parsed version of the diagram file b.xxx.
This is equivalent to the command loadbd('xxx') but without the
solutions in s.xxx and without the diagnostics in d.xxx.
"""
name = filenameTemplate(name,templates)
n1b = name["bifurcationDiagram"]
if n1b is None:
n1b = "fort.7"
try:
data = parseB.parseB(n1b)
except IOError:
raise AUTOExceptions.AUTORuntimeError(sys.exc_info()[1])
info("Parsed file: %s\n"%n1b)
return data
commandParseDiagramFile = command(dg,alias=['diagramget'])
def bt(name=None,templates=None):
"""Parse both bifurcation diagram and solution.
Type FUNC('xxx') to get a parsed version of the diagram file b.xxx
and solution file s.xxx.
This is equivalent to the command loadbd('xxx') but without the
diagnostics in d.xxx.
"""
name = filenameTemplate(name,templates)
n1b = name["bifurcationDiagram"]
n1s = name["solution"]
if n1b is None:
n1b = "fort.7"
n1s = "fort.8"
data = parseBandS.parseBandS(n1b,n1s)
output_names = n1b + " and " + n1s
info("Parsed files: %s\n"%output_names)
return data
commandParseDiagramAndSolutionFile = command(bt,alias=['diagramandsolutionget'])
def queryDiagnostic(diagnostic,name=None,templates=None):
name = filenameTemplate(name,templates)
n1d = name["diagnostics"]
if n1d is None:
n1d = "fort.9"
try:
f = open(n1d)
except TypeError:
for branch in n1d:
if hasattr(branch,"diagnostics"):
for s in str(branch.diagnostics).splitlines():
if diagnostic in s:
info(s+"\n")
info("\n")
return
for s in f:
if diagnostic in s:
info(s)
f.close()
info("\n")
commandQueryDiagnostic = command(queryDiagnostic,alias=None)
def branchpoint(name=None,templates=None):
"""Print the ``branch-point function''.
Type FUNC(x) to list the value of the ``branch-point function''
in the diagnostics of the bifurcation diagram object x.
This function vanishes at a branch point.
Type FUNC() to list the value of the ``branch-point function''
in the output-file fort.9.
Type FUNC('xxx') to list the value of the ``branch-point function''
in the info file 'd.xxx'.
"""
queryDiagnostic("BP",name,templates)
commandQueryBranchPoint = command(branchpoint,alias=['bp','br'])
def eigenvalue(name=None,templates=None):
"""Print eigenvalues of Jacobian (algebraic case).
Type FUNC(x) to list the eigenvalues of the Jacobian
in the diagnostics of the bifurcation diagram object x.
(Algebraic problems.)
Type FUNC() to list the eigenvalues of the Jacobian
in fort.9.
Type FUNC('xxx') to list the eigenvalues of the Jacobian
in the info file 'd.xxx'.
"""
queryDiagnostic("Eigenvalue",name,templates)
commandQueryEigenvalue = command(eigenvalue,alias=['ev','eg'])
def floquet(name=None,templates=None):
"""Print the Floquet multipliers.
Type FUNC(x) to list the Floquet multipliers
in the diagnostics of the bifurcation diagram object x.
(Differential equations.)
Type FUNC() to list the Floquet multipliers
in the output-file fort.9.
Type FUNC('xxx') to list the Floquet multipliers
in the info file 'd.xxx'.
"""
queryDiagnostic("Mult",name,templates)
commandQueryFloquet = command(floquet,alias=['fl'])
def hopf(name=None,templates=None):
"""Print the value of the ``Hopf function''.
Type FUNC(x) to list the value of the ``Hopf function''
in the diagnostics of the bifurcation diagram object x.
This function vanishes at a Hopf bifurcation point.
Type FUNC() to list the value of the ``Hopf function''
in the output-file fort.9.
Type FUNC('xxx') to list the value of the ``Hopf function''
in the info file 'd.xxx'.
"""
queryDiagnostic("Hopf",name,templates)
commandQueryHopf = command(hopf,alias=['hp','hb'])
def iterations(name=None,templates=None):
"""Print the number of Newton interations.
Type FUNC(x) to list the number of Newton iterations per
continuation step in the diagnostics of the bifurcation diagram
object x.
Type FUNC() to list the number of Newton iterations per
continuation step in fort.9.
Type FUNC('xxx') to list the number of Newton iterations per
continuation step in the info file 'd.xxx'.
"""
queryDiagnostic("Iterations",name,templates)
commandQueryIterations = command(iterations,alias=['it'])
def limitpoint(name=None,templates=None):
"""Print the value of the ``limit point function''.
Type FUNC(x) to list the value of the ``limit point function''
in the diagnostics of the bifurcation diagram object x.
This function vanishes at a limit point (fold).
Type FUNC() to list the value of the ``limit point function''
in the output-file fort.9.
Type FUNC('xxx') to list the value of the ``limit point function''
in the info file 'd.xxx'.
"""
queryDiagnostic("Fold",name,templates)
commandQueryLimitpoint = command(limitpoint,alias=['lm','lp'])
def note(name=None,templates=None):
"""Print notes in info file.
Type FUNC(x) to show any notes
in the diagnostics of the bifurcation diagram
object x.
Type FUNC() to show any notes
in the output-file fort.9.
Type FUNC('xxx') to show any notes
in the info file 'd.xxx'.
"""
queryDiagnostic("NOTE",name,templates)
commandQueryNote = command(note,alias=['nt'])
def secondaryperiod(name=None,templates=None):
"""Print value of ``secondary-periodic bif. fcn''.
Type FUNC(x) to list the value of the
``secondary-periodic bifurcation function''
in the diagnostics of the bifurcation diagram object x.
This function vanishes at period-doubling and torus bifurcations.
Type FUNC() to list the value of the
``secondary-periodic bifurcation function''
in the output-file 'fort.9.
Type FUNC('xxx') to list the value of the
``secondary-periodic bifurcation function''
in the info file 'd.xxx'.
"""
queryDiagnostic("SPB",name,templates)
commandQuerySecondaryPeriod = command(secondaryperiod,alias=['sp','sc'])
def stepsize(name=None,templates=None):
"""Print continuation step sizes.
Type FUNC(x) to list the continuation step size for each
continuation step in the diagnostics of the bifurcation diagram
object x.
Type FUNC() to list the continuation step size for each
continuation step in 'fort.9'.
Type FUNC('xxx') to list the continuation step size for each
continuation step in the info file 'd.xxx'.
"""
queryDiagnostic("Step",name,templates)
commandQueryStepsize = command(stepsize,alias=['ss','st'])
def triple(name=None,templates=None):
"""Triple a solution.
Type FUNC() to triple the solution in 'fort.8'.
Type FUNC('xxx') to triple the solution in s.xxx.
"""
return expandData("triple",name,templates)
commandTriple = command(triple,alias=['tr'])
############################################
# System Commands
############################################
def ls(dir=None):
"""List the current directory.
Type 'FUNC' to run the system 'ls' command in the current directory. This
command will accept whatever arguments are accepted by the Unix command
'ls'.
"""
cmd = "ls"
if os.name in ["nt", "dos"]:
path = os.environ["PATH"].split(os.pathsep)
cmd = "dir"
for s in path:
if os.path.exists(os.path.join(s,"ls.exe")):
cmd = "ls"
break
if dir is not None:
cmd = "%s %s"%(cmd,dir)
if sys.stdout is sys.__stdout__:
sys.stdout.flush()
os.system(cmd)
else:
info(AUTOutil.getstatusoutput(cmd, shell=True)[1]+'\n')
commandLs = command(ls)
def quit():
"""Quit the AUTO CLUI."""
if isinstance(builtins.quit,str):
sys.exit()
else:
builtins.quit()
commandQuit = command(quit,alias=['q'])
def shell(cmd):
"""Run a shell command.
Type FUNC('xxx') to run the command 'xxx' in the Unix shell and display
the results in the AUTO command line user interface.
"""
sys.stdout.flush()
os.system(cmd)
commandShell = command(shell)
def wait():
"""Wait for the user to enter a key.
Type 'FUNC()' to have the AUTO interface wait
until the user hits any key (mainly used in scripts).
"""
print("Hit <return> to continue")
input()
commandWait = command(wait)
def cat(f=None):
"""Print the contents of a file
Type 'FUNC xxx' to list the contents of the file 'xxx'.
"""
if f is not None:
f = open(f,"r")
for line in f:
info(line)
f.close()
else:
line = sys.stdin.readline()
while line != "":
info(line)
line = sys.stdin.readline()
commandCat = command(cat)
############################################
# Commands which use runAUTO
############################################
# This function is overridden in AUTOclui.py, so the AUTOSimpleFunctions
# instance's runner can be used.
def withrunner(runner=None):
return runner
def cd(dir=None,runner=None):
"""Change directories.
Type 'FUNC xxx' to change to the directory 'xxx'. This command
understands both shell variables and home directory expansion.
"""
runner = withrunner(runner)
if dir is None or dir == '':
dir = os.path.expanduser("~")
try:
dir = os.path.expanduser(dir)
dir = os.path.expandvars(dir)
os.chdir(dir)
except:
print(("Directory '%s' not found"%(dir,)))
runner.config(dir=os.getcwd())
commandCd = command(cd)
def configure(runner=None,templates=None,data=None,**kw):
"""Load files into the AUTO runner or return modified solution data.
Type result=FUNC([options]) to modify the AUTO runner.
The type of the result is a solution object.
There are many possible options:
\\begin{verbatim}
Long name Short name Description
-------------------------------------------
equation e The equations file
constants c The AUTO constants file
homcont h The Homcont parameter file
solution s The restart solution file
NDIM,IPS,etc AUTO constants.
BR,PT,TY,LAB Solution constants.
\\end{verbatim}
Options which are not explicitly set retain their previous value.
For example one may type: s=FUNC(e='ab',c='ab.1') to use 'ab.c' as
the equations file and c.ab.1 as the constants file.
You can also specify AUTO Constants, e.g., DS=0.05, or IRS=2.
Special values for DS are '+' (forwards) and '-' (backwards).
Example: s = FUNC(s,DS='-') changes s.c['DS'] to -s.c['DS'].
"""
def applyRunnerConfigResolveAbbreviation(**kw):
abbrev = {}
for key in ["equation", "constants", "solution", "homcont"]:
abbrev[key[0]] = key
abbrev[key] = key
for key in list(kw):
# remove long duplicates
if (key in abbrev and key != abbrev[key] and
abbrev[key] in kw):
del kw[abbrev[key]]
for key,value in list(kw.items()):
if key in abbrev:
# change the abbreviation to the long version
del kw[key]
if AUTOutil.isiterable(value):
kw[abbrev[key]] = value
else:
if key[0] == 'e':
kw['e'] = value
kw[abbrev[key]] = applyTemplate(value,abbrev[key],templates)
return kw
def applyRunnerConfigResolveFilenames(**kw):
exception = None
objectdict = {"constants": parseC.parseC,
"homcont": parseH.parseH,
"solution": parseS.parseS}
for key in ["constants", "homcont", "solution"]:
if key in kw:
value = kw[key]
elif data is not None:
value = applyTemplate(data,key,templates)
else:
value = None
if value is not None and not AUTOutil.isiterable(value):
try:
kw[key] = objectdict[key](value)
except IOError:
if key in kw:
# for solution only raise exception later if IRS!=0
exception = sys.exc_info()[1]
if key != "solution":
raise AUTOExceptions.AUTORuntimeError(exception)
# ignore error, but erase runner data for load("xxx")
kw[key] = None
if data is not None and "e" not in kw and not AUTOutil.isiterable(data):
kw["e"] = data
kw["equation"] = applyTemplate(data,"equation",templates)
if "e" in kw:
eq = kw["e"]
for ext in [".f90",".f",".c"]:
if os.path.exists(eq+ext):
return kw, exception
raise AUTOExceptions.AUTORuntimeError(
"No equations file found for: '%s'"%eq)
return kw, exception
runner = withrunner(runner)
if "info" in kw:
info = kw["info"]
del kw["info"]
else:
info = globals()["info"]
kw = applyRunnerConfigResolveAbbreviation(**kw)
kw, exception = applyRunnerConfigResolveFilenames(**kw)
if data is not None and AUTOutil.isiterable(data):
if hasattr(data,"load"):
# for load(object,...)
if "equation" in kw:
del kw["equation"]
solution = data.load(**kw)
c = solution.c
kw = {"equation": applyTemplate(c.get("e", ""), "equation",
templates),
"solution": solution, "constants": c,
"homcont": c.get("homcont")}
else:
# for load(array,...)
kw["solution"] = data
solution = runner.load(**kw)
if exception is not None and runner.options["constants"]["IRS"]:
raise AUTOExceptions.AUTORuntimeError(exception)
info("Runner configured\n")
return solution
commandRunnerConfig = command(configure,alias=None)
def load(data=None,runner=None,templates=None,**kw):
"""Load files into the AUTO runner or return modified solution data.
Type result=FUNC([options]) to modify the AUTO runner.
Type result=FUNC(data,[options]) to return possibly
modified solution data.
The type of the result is a solution object.
FUNC(data,[options]) returns a solution in the following way for
different types of data:
* A solution: load returns the solution data, with AUTO constants
modified by options.
* A bifurcation diagram or a solution list:
returns the solution specified by
the AUTO constant IRS, or if IRS is not specified, the last solution
in s.
* A string: AUTO uses the solution in the file 's.s' together with the
constants in the files 'c.s', and 'h.s'. Not all of these
files need to be present.
* A Python list array or a numpy array representing a solution,
returns a solution with the given contents. Such an array must be given
column-wise, as [[t0, ..., tn], [x0, ..., xn], [y0, ..., yn], ...],
or for a point solution as [x, y, z, ...].
There are many possible options:
\\begin{verbatim}
Long name Short name Description
-------------------------------------------
equation e The equations file
constants c The AUTO constants file
homcont h The Homcont parameter file
solution s The restart solution file
NDIM,IPS,etc AUTO constants.
BR,PT,TY,LAB Solution constants.
\\end{verbatim}
If data is not specified or data is a string then options which
are not explicitly set retain their previous value.
For example one may type: s=FUNC(e='ab',c='ab.1') to use 'ab.c' as
the equations file and c.ab.1 as the constants file.
Type s=FUNC('name') to load all files with base 'name'.
This does the same thing as running
s=FUNC(e='name',c='name,h='name',s='name').
You can also specify AUTO Constants, e.g., DS=0.05, or IRS=2.
Special values for DS are '+' (forwards) and '-' (backwards).
Example: s = FUNC(s,DS='-') changes s.c['DS'] to -s.c['DS'].
"""
runner = withrunner(runner)
return configure(runner,templates,data,**kw)
commandRunnerLoadName = command(load,SIMPLE,"loadname",alias=['ld'])
def loadbd(name=None,templates=None,**kw):
"""Load bifurcation diagram files.
Type b=FUNC([options]) to load output files or output data.
There are three possible options:
\\begin{verbatim}
Long name Short name Description
-------------------------------------------
bifurcationdiagram b The bifurcation diagram file
solution s The solution file or list of solutions
diagnostics d The diagnostics file
\\end{verbatim}
Type FUNC('name') to load all files with base 'name'.
This does the same thing as running
FUNC(b='name',s='name,d='name').
plot(b) will then plot the 'b' and 's' components.
Returns a bifurcation diagram object representing the files in b.
"""
def __applyBsdConfigResolveAbbreviation(**kw):
abbrev = {}
for key in ["bifurcationDiagram", "solution", "diagnostics"]:
abbrev[key[0]] = key
abbrev[key] = key
for key in list(kw):
# remove long duplicates
if (key in abbrev and key != abbrev[key] and
abbrev[key] in kw):
del kw[abbrev[key]]
for key,value in list(kw.items()):
if key in abbrev:
# change the abbreviation to the long version
del kw[key]
if type(value) in [type(""),type(1),type(1.0)]:
kw[abbrev[key]] = applyTemplate(value,abbrev[key],templates)
else:
kw[abbrev[key]] = value
return kw
if name is not None:
if AUTOutil.isiterable(name):
lst = ["bifurcationDiagram"]
else:
lst = ["bifurcationDiagram", "solution", "diagnostics"]
for key in lst:
if key not in kw:
kw[key] = name
if name is None and kw == {}:
bname, sname, dname = "fort.7", "fort.8", "fort.9"
else:
dict = __applyBsdConfigResolveAbbreviation(**kw)
bname = dict.get("bifurcationDiagram")
sname = dict.get("solution")
dname = dict.get("diagnostics")
data = bifDiag.bifDiag(bname,sname,dname)
info("Parsed output data\n")
return data
commandParseOutputFiles = command(loadbd,SIMPLE,"loadbd",alias=['bd'])
def pr(parameter=None,runner=None):
"""Print continuation parameters.
Type FUNC() to print all the parameters.
Type FUNC('xxx') to return the parameter 'xxx'.
These commands are equivalent to the commands
print s.c
print s.c['xxx']
where s is a solution.
"""
runner = withrunner(runner)
if parameter is None:
info(str(runner.options["constants"]))
else:
return runner.options["constants"][parameter]
commandRunnerPrintFort2 = command(pr,alias=['printconstant','pc'])
def hpr(parameter=None,runner=None):
"""Print HomCont continuation parameters.
Type FUNC() to print all the HomCont parameters.
Type FUNC('xxx') to return the HomCont parameter 'xxx'.
These commands are equivalent to the commands
print s.c
print s.c['xxx']
where s is a solution.
"""
runner = withrunner(runner)
if parameter is None:
info(str(runner.options["homcont"]))
else:
return runner.options["homcont"][parameter]
commandRunnerPrintFort12 = command(hpr)
def ch(entry=None,value=None,runner=None,**kw):
"""Modify continuation constants.
Type FUNC('xxx',yyy) to change the constant 'xxx' to have
value yyy.
This is equivalent to the command
s=load(s,xxx=yyy)
where s is a solution.
"""
runner = withrunner(runner)
if entry is not None:
runner.options["constants"][entry] = value
info("%s changed to %s\n"%(entry,value))
else:
configure(runner,None,info=lambda s:None,**kw)
info(str(kw)+'\n')
commandRunnerConfigFort2 = command(ch,SIMPLE,"changeConstants",
alias=['changeconstant','cc'])
def hch(entry=None,value=None,runner=None,**kw):
"""Modify HomCont continuation constants.
Type FUNC('xxx',yyy) to change the HomCont constant 'xxx' to have
value yyy.
This is equivalent to the command
s=load(s,xxx=yyy)
where s is a solution.
"""
runner = withrunner(runner)
if entry is not None:
runner.options["homcont"][entry] = value
info("%s changed to %s\n"%(entry,value))
else:
configure(runner,None,info=lambda s:None,**kw)
info(str(kw)+'\n')
commandRunnerConfigFort12 = command(hch,SIMPLE,"changeConstantsHomCont")
def run(data=None,sv=None,ap=None,runner=None,templates=None,**kw):
"""Run AUTO.
Type r=FUNC([data],[options]) to run AUTO from solution data with the given
AUTO constants or file keyword options.
The results are stored in the bifurcation diagram r which you can
later print with ``print r'', obtain branches from via r[0], r[1], ...,
and obtain solutions from via r(3), r(5), r('LP2'), where 3 and 5
are label numbers, and 'LP2' refers to the second LP label.
FUNC(data) runs AUTO in the following way for different types of data:
* A solution: AUTO starts from solution data, with AUTO constants data.c.
* A bifurcation diagram: AUTO start from the solution specified by
the AUTO constant IRS, or if IRS is not specified, the last solution
in data, data()[-1], with AUTO constants data()[-1].c.
* A string: AUTO uses the solution in the file 's.data' together with the
constants in the files 'c.data', and 'h.data'. Not all of these
files need to be present.
If no solution data is specified, then the global values from the
'load' command are used instead, where
options which are not explicitly set retain their previous value.
Keyword argument options can be AUTO constants, such as DS=0.05,
or ISW=-1, or specify a constant or solution file. These override
the constants in s.c, where applicable. See ``load'':
FUNC(s,options) is equivalent to FUNC(load(s,options))
Example: given a bifurcation diagram bd, with a branch point
solution, switch branches and stop at the first Hopf bifurcation:
hb = FUNC(bd('BP1'),ISW=-1,STOP='HB1')
Special keyword arguments are 'sv' and 'ap'; 'sv' is also an AUTO
constant:
FUNC(bd('BP1'),ISW=-1,STOP='HB1',sv='hb',ap='all')
saves to the files b.hb, s.hb and d.hb, and appends to b.all,
s.all, and d.all.
"""
runner = withrunner(runner)
if sv is not None:
kw['sv'] = sv
load(data,runner,templates,info=lambda msg:None,**kw)
res = runner.run()
sv = runner.options["constants"].get("sv")
runner.options["constants"]['sv'] = None
if sv is not None and sv != '':
name = filenameTemplate(sv,templates)
bname = name["bifurcationDiagram"]
sname = name["solution"]
dname = name["diagnostics"]
info("Saving to %s, %s, and %s ... done\n"%(bname,sname,dname))
if ap is not None:
append(sv,ap)
elif ap is not None:
append(ap)
return res
commandRun = command(run,SIMPLE,"run",alias=['r','rn'])
def rundemo(demo,equation="all",runner=None):
runner = withrunner(runner)
runner.config(equation=equation)
runner.runDemo(demo)
commandRunDemo = command(rundemo,alias=None)
def runMakefileWithSetup(equation=None,fort2=None,fort3=None,runner=None):
runner = withrunner(runner)
if fort2 is not None:
runner.config(fort2=fort2)
if fort3 is not None:
runner.config(fort3=fort3)
# Before this is called runner needs to have the fort2 and fort3
# options set. Otherwise this will raise an exception.
runner.runMakefileWithSetup(equation)
commandRunMakefileWithSetup = command(runMakefileWithSetup,alias=None)
def runMakefile(equation=None,runner=None):
runner = withrunner(runner)
runner.runMakefile(equation)
commandRunMakefile = command(runMakefile,alias=None)
def runExecutableWithSetup(executable=None,fort2=None,fort3=None,runner=None):
runner = withrunner(runner)
if fort2 is not None:
runner.config(fort2=fort2)
if fort3 is not None:
runner.config(fort3=fort3)
# Before this is called runner needs to have the fort2 and fort3
# options set. Otherwise this will raise an exception.
runner.runExecutableWithSetup(executable)
commandRunExecutableWithSetup = command(runExecutableWithSetup,alias=None)
def runExecutable(executable=None,fort2=None,fort3=None,runner=None):
runner = withrunner(runner)
runner.runExecutable(executable)
commandRunExecutable = command(runExecutable,alias=None)
def runCommandWithSetup(command=None,fort2=None,fort3=None,runner=None):
runner = withrunner(runner)
if fort2 is not None:
runner.config(fort2=fort2)
if fort3 is not None:
runner.config(fort3=fort3)
# Before this is called runner needs to have the fort2 and fort3
# options set. Otherwise this will raise an exception.
runner.runCommandWithSetup(command)
commandRunCommandWithSetup = command(runCommandWithSetup,alias=None)
def runCommand(command=None,runner=None):
runner = withRunner(runner)
runner.runCommand(command)
commandRunCommand = command(runCommand,alias=None)
def plot3(name=None,r3b=False):
"""3D plotting of data.
Type FUNC(x) to run the graphics program PLAUT04 for the graphical
inspection of bifurcation diagram or solution data in x.
Type FUNC('xxx') to run the graphics program PLAUT04 for the graphical
inspection of the data-files b.xxx and s.xxx.
Type FUNC() to run the graphics program PLAUT04 for the graphical
inspection of the output-files 'fort.7' and 'fort.8'.
Type FUNC(...,r3b=True) to run PLAUT04 in restricted three body
problem mode.
"""
cmd = os.path.join(os.path.expandvars("$AUTO_DIR"),"bin")
cmd = os.path.join(cmd, "plaut04")
arg = []
if r3b:
arg = ["-r3b"]
if name is not None:
if type(name) == type(""):
arg.append(name)
else:
d = name
for f in ["fort.7","fort.8","fort.9"]:
if os.path.exists(f):
os.remove(f)
if isinstance(d,bifDiag.bifDiag):
d.writeFilename("fort.7","fort.8","fort.9")
elif isinstance(d,parseBandS.parseBandS):
d.writeFilename("fort.7","fort.8")
elif isinstance(d,parseB.parseB):
d.writeFilename("fort.7")
elif isinstance(d,parseS.parseS):
d.writeFilename("fort.8")
elif isinstance(d,parseB.AUTOBranch):
d.writeFilename("fort.7")
elif isinstance(d,parseS.AUTOSolution):
d.writeFilename("fort.8")
sys.stdout.flush()
if not os.path.exists(cmd):
cmd = cmd + '.exe'
if sys.stdout is sys.__stdout__:
os.spawnv(os.P_NOWAIT,cmd,[os.path.basename(cmd)] + arg)
else:
# when testing, change directories so plaut04 does not keep
# cwd open on Windows and it can be deleted
cwd = os.getcwd()
os.chdir(os.path.dirname(cmd))
os.spawnv(os.P_NOWAIT,cmd,[os.path.basename(cmd), cwd] + arg)
# and wait a little bit
os.chdir(cwd)
import time
time.sleep(2)
commandPlotter3D = command(plot3,alias=['p3'])
try:
try:
from tkinter import Tk
except ImportError:
from tkinter import Tk # Python 3
plotterimported = False
try:
import readline
import atexit
except:
pass
import select
# this polling loop is here so that Cygwin Python does not "hang" the
# plot window while Python waits for a user input
def handleevents():
while select.select([sys.stdin],[],[],0.02) == ([], [], []):
_root.dooneevent()
#####################################################
# Plotting commands
#####################################################
def plot(name=None,templates=None,**kw):
"""Plotting of data.
Type FUNC(x) to run the graphics program PyPLAUT for the graphical
inspection of bifurcation diagram or solution data in x.
Type FUNC('xxx') to run the graphics program PyPLAUT for the graphical
inspection of the data-files b.xxx and s.xxx.
Type FUNC() to run the graphics program for the graphical
inspection of the output-files 'fort.7' and 'fort.8'.
Values also present in the file autorc, such as
color_list="black green red blue orange" can be provided as
keyword arguments, as well as hide=True which hides the
on-screen plot.
The return value, for instance, p for p=plot(x) will be the handle
for the graphics window.
It has p.config() and p.savefig() methods that allow you to configure
and save the plot. When plotting, see help(p.config) and help(p.savefig)
for details.
"""
options = kw
if type(name) == type("") or name is None:
name = filenameTemplate(name,templates)
parsed = None
else:
parsed = name
# delay importing plotting modules until we actually plot...
global plotterimported, windowPlotter
if not plotterimported:
#from ..graphics import windowPlotter
from sys import path
from os.path import dirname as dir
path.append(dir(path[0]))
from graphics import windowPlotter
plotterimported = True
# root has to be here since I am passing options in
# a dictionary. Otherwise the default agruements
# get messed up
# NOTE: options set here go to the MegaToplevel!, while
# the return value of this function is the underlying
# grapher. So we add 'grapher_' to all options that don't
# already do
for k in list(options):
if k[:8] != 'grapher_':
v = options[k]
del options[k]
options['grapher_'+k] = v
# Get rid of the initial window
if options.get('grapher_hide'):
root=None
else:
root=Tk()
root.withdraw()
if sys.platform == "cygwin":
try:
readline.set_pre_input_hook(handleevents)
global _root
_root=root
except:
pass
if parsed:
nb, ns = None, None
if isinstance(parsed,bifDiag.bifDiag):
nb = parsed
ns = parsed()
elif isinstance(parsed,parseBandS.parseBandS):
nb = parsed.diagram.branches
ns = parsed.solution
elif isinstance(parsed,parseB.parseB):
nb = parsed.branches
elif isinstance(parsed,parseS.parseS):
ns = parsed
elif isinstance(parsed,parseB.AUTOBranch):
nb = parseB.parseBR([parsed])
elif isinstance(parsed,parseS.AUTOSolution):
ns = parseS.parseS([parsed])
if nb:
options["grapher_bifurcation_diagram"] = nb
if ns:
options["grapher_solution"] = ns
else:
n1b = name["bifurcationDiagram"]
n1s = name["solution"]
if n1b is None:
n1b = "fort.7"
n1s = "fort.8"
try:
n1b = parseB.parseBR(n1b)
n1b = bifDiag.bifDiag(n1b,n1s,constants=n1b[0].c)
except IOError:
n1b = bifDiag.bifDiag(n1b,n1s)
options["grapher_bifurcation_diagram"] = n1b
options["grapher_solution"] = n1b()
handle = windowPlotter.WindowPlotter2D(root,**options)
if (not options.get('grapher_hide') or
'graphics.grapher_mpl' not in sys.modules):
handle.update()
try:
def plotterquit():
try:
handle.destroy()
except KeyError:
pass
atexit.register(plotterquit)
except:
pass
info("Created plot\n")
return handle
except:
print("\n-------------------------------------------------------------")
print("Could not import plotting modules, plotting will be disabled.")
print("This is probably because Tkinter is not enabled in your Python installation.")
print("-------------------------------------------------------------\n")
def plot(name=None,templates=None,**kw):
"""2D plotting of data.
Plotting of data has been disabled in the AUTO-07P CLUI.
This is probably because the Python interpretor cannot
load the Tkinter module.
"""
info("2D plotting has been disabled\n")
commandPlotter = command(plot,SIMPLE,"plot",alias=['pl','p2'])
##################################################
# CLUI commands
##################################################
def autohelp(command_string=""):
outputString = ""
# Read in the aliases.
_aliases = {}
parser = AUTOutil.getAUTORC()
if parser.has_section("AUTO_command_aliases"):
for option in parser.options("AUTO_command_aliases"):
cmd = parser.get("AUTO_command_aliases",option)
if cmd not in _aliases:
_aliases[cmd] = []
_aliases[cmd].append(option)
from . import AUTOCommands
if _aliases == {}:
# Now we copy the commands from the module
for key in AUTOCommands.__dict__:
cmd = getattr(AUTOCommands,key)
# Check to see if it is a command
if hasattr(cmd,"fun") and cmd.alias is not None:
_aliases[key] = [cmd.fun.__name__] + cmd.alias
command_list = []
# Here we get a list of the names of all of the commands in AUTOCommands
for key in AUTOCommands.__dict__:
if key in _aliases:
command_list.append(key)
return_value = {}
if not isinstance(command_string, str):
try:
outputString += command_string.__doc__+'\n'
except TypeError:
pass
info(outputString)
return return_value
if len(command_string) == 0:
# If we were created with the empty string return a formatted
# quick reference of all commands as the string and a
# dictionary of all commands as the data. The dictionary
# has an entry for each command which is a dictionary
# with two entries:
# "aliases" a list of the aliases of the command
# "description" a one line description of the command
command_list.sort()
outputString += " ALIASES DESCRIPTION\n"
for cmd in command_list:
return_value[cmd] = {}
return_value[cmd]["aliases"] = []
aliases = ""
for key in _aliases[cmd]:
aliases = aliases + key + " "
return_value[cmd]["aliases"].append(key)
doc = getattr(AUTOCommands,cmd).__doc__
if doc is not None:
outputString += " %-25s"%aliases
doc = doc.splitlines()
return_value[cmd]["description"] = doc[0]
outputString += doc[0]
outputString += "\n"
from . import interactiveBindings
execlist = [{'name' : 'auto', 'alias' : 'ex',
'fn' : interactiveBindings.AUTOInteractiveConsole.ex},
{'name' : 'demofile', 'alias' : 'dmf',
'fn' : interactiveBindings.AUTOInteractiveConsole.dmf}]
for cmdprop in execlist:
cmd = cmdprop['name']
return_value[cmd] = {}
return_value[cmd]["aliases"] = [cmd,cmdprop['alias']]
aliases = cmd + " " + cmdprop['alias']
doc = cmdprop["fn"].__doc__
outputString += " %-25s"%aliases
doc = doc.splitlines()
return_value[cmd]["description"] = doc[0]
outputString += doc[0]
outputString += "\n"
outputString += "\n"
else:
# If we were created with the nonempty string return a formatted
# reference for the given command as the string and a
# dictionary containing information about the command as the data.
# The dictionary has 3 entries:
# "name" the full name of the command
# "aliases" a list of all of the aliases of the command
# "description" a long description of the command
try:
doc = getattr(AUTOCommands,command_string).__doc__
return_value["name"] = command_string
except:
doc = getattr(AUTOCommands,_aliases[command_string]).__doc__
return_value["name"] = _aliases[command_string]
doc = doc.replace("FUNC",command_string)
return_value["short description"] = doc.splitlines()[0]
return_value["long description"] = "\n".join(doc.split("\n")[1:])
# Get rid of the LaTeX stuff from the string that gets returned, but
# NOT from the data portion
doc = doc.replace("\\begin{verbatim}","")
doc = doc.replace("\\end{verbatim}","")
doc = doc + "\n"
if not command_string in command_list:
# This means help was asked for an alias
for cmd in _aliases:
if command_string in _aliases[cmd]:
command_string = cmd
break
doc = doc + "Command name: "+command_string+"\n"
return_value["aliases"] = []
doc = doc + "Aliases: "
if command_string in _aliases:
for key in _aliases[command_string]:
doc = doc + key + " "
return_value["aliases"].append(key)
outputString += doc+"\n"
info(outputString)
return return_value
commandHelp = command(autohelp)
# This is just a little wrapper around commandHelp which discards the
# data portion of the return. This is because, for the
# interactive command line we don't want it to print out.
def man(command_string=""):
"""Get help on the AUTO commands.
Type 'FUNC' to list all commands with a online help.
Type 'FUNC xxx' to get help for command 'xxx'.
"""
autohelp(command_string)
commandInteractiveHelp = command(man)
##################################################
# GUI commands
##################################################
def printFunc(printFnc,text):
printFnc(text)
info(text)
commandPrintFunc = command(printFunc)
# FIXME: This is not done!!
def gui(type="simple"):
"""Show AUTOs graphical user interface.
Type FUNC() to start AUTOs graphical user interface.
NOTE: This command is not implemented yet.
"""
try:
from tkinter import Tk
except ImportError:
from tkinter import Tk # Python 3
from .graphics import AUTOgui
# Get rid of the initial window
root = Tk()
root.withdraw()
guic = AUTOgui.AUTOgui(type)
info("GUI created\n")
return guic
commandCreateGUI = command(gui)
# Not ready yet
## def commandRunGeneralGUI(runner):
## tkSimple
## first = commandSetupGeneralRun(eq_name,saved_data,parameter_name)
## second = commandRunnerConfig(runner,makefile="$AUTO_DIR/cmds/cmds.make")
## third = commandRunMakefile(runner,"EQUATION_NAME=%s"%(eq_name))
## return commandMacro((first,second,third))
## commandRunGeneralGUI = command(generalGUI)
############################################
# High level functions
############################################
def splabs(s,typename,templates=None):
"""Return special labels
Type FUNC('xxx',typename) to get a list of labels with the specified
typename, where typename can be one of
'EP', 'MX', 'BP', 'LP', 'UZ', 'HB', 'PD', 'TR', or 'RG'.
This is equivalent to the command
load('xxx')(typename)
which gives a list of the solutions themselves;
load('xxx')(typename).getLabels()
returns the list of labels.
Or use FUNC(s,typename) where s is a parsed solution from sl().
This is equivalent to the command
s(typename).getLabels()
"""
labels = []
for solution in sl(s,templates=templates):
if solution['Type name'] == typename:
labels.append(solution['Label'])
return labels
commandSpecialPointLabels = command(splabs)
############################################
# Testing stuff
############################################
def test():
from . import runAUTO
import sys
def printfunc(text):
stdout.write(text+"\n")
stdout = sys.stdout
f = StringIO()
def getinfo(s):
f.write(s)
def noinfo(s):
pass
global info
runner = runAUTO.runAUTO(
makefile="",
demos_dir=os.path.join(os.environ["AUTO_DIR"],"python"))
clean = commandRunDemo("wav","clean",runner)
first = commandRunDemo("wav","first",runner)
second = commandRunDemo("wav","second",runner)
tmacro = commandMacro((clean,first,first))
printer = commandPrintFunc(printfunc,"Hello World")
quiet = commandRunnerConfig(runner,log=f)
verbose = commandRunnerConfig(runner,log=None)
changedir = commandCd("wav",runner)
constants = commandParseConstantsFile("wav")
changeup = commandCd("..",runner)
verbose()
clean()
first()
tmacro()
quiet()
second()
stdout.write(f.getvalue()+"\n")
printer()
verbose()
clean()
changedir()
constants()
changeup()
if __name__ == "__main__":
test()
|
the-stack_0_10468 | '''
Created on 5/9/2014
@author: victor
'''
import unittest
from pyproct.data.handler.dataHandler import DataHandler
from pyproct.data.handler.test.TestDataLoader import FakeFileLoader
class DataHandlerMock(DataHandler):
def get_loader(self, data_type):
return FakeFileLoader
class FakeSourceGenerator():
def __init__(self, source_list):
self.source_list = source_list
class TestDataHandler(unittest.TestCase):
def test_data_handler(self):
dh = DataHandlerMock({
"type":"any", # As our loader is hardcoded it must not check the data type
# availability
"files":[(1,5), (6,9)]
}, source_generator_class = FakeSourceGenerator)
# We check we have all the elements
self.assertEqual([0, 1, 2, 3, 4, 5, 6, 7, 8],
list(dh.get_all_elements()))
# Then we check we can get their sources
self.assertTupleEqual( dh.get_source_of_element(3), (1, 5)) # Element 3 is datum(3) == 4
self.assertTupleEqual( dh.get_source_of_element(4), (1, 5)) # Element 4 is datum(4) == 5
self.assertTupleEqual( dh.get_source_of_element(5), (6, 9)) # Element 5 is datum(5) == 6
self.assertTupleEqual( dh.get_source_of_element(7), (6, 9)) # Element 7 is datum(7) == 8
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() |
the-stack_0_10470 | import string
from time import sleep
from loguru import logger
from tacticalrmm.celery import app
from django.conf import settings
from agents.models import Agent
from .models import ChocoSoftware, ChocoLog, InstalledSoftware
logger.configure(**settings.LOG_CONFIG)
@app.task()
def install_chocolatey(pk, wait=False):
if wait:
sleep(15)
agent = Agent.objects.get(pk=pk)
r = agent.salt_api_cmd(timeout=120, func="chocolatey.bootstrap", arg="force=True")
if r == "timeout" or r == "error":
logger.error(f"failed to install choco on {agent.salt_id}")
return
try:
output = r.lower()
except Exception as e:
logger.error(f"failed to install choco on {agent.salt_id}: {e}")
return
success = ["chocolatey", "is", "now", "ready"]
if all(x in output for x in success):
agent.choco_installed = True
agent.save(update_fields=["choco_installed"])
logger.info(f"Installed chocolatey on {agent.salt_id}")
return "ok"
else:
logger.error(f"failed to install choco on {agent.salt_id}")
return
@app.task
def update_chocos():
agents = Agent.objects.only("pk")
online = [x for x in agents if x.status == "online" and x.choco_installed]
while 1:
for agent in online:
r = agent.salt_api_cmd(timeout=10, func="test.ping")
if r == "timeout" or r == "error" or (isinstance(r, bool) and not r):
continue
if isinstance(r, bool) and r:
ret = agent.salt_api_cmd(timeout=200, func="chocolatey.list")
if ret == "timeout" or ret == "error":
continue
try:
chocos = [{"name": k, "version": v[0]} for k, v in ret.items()]
except AttributeError:
continue
else:
# somtimes chocolatey api is down or buggy and doesn't return the full list of software
if len(chocos) < 4000:
continue
else:
logger.info(f"Chocos were updated using {agent.salt_id}")
ChocoSoftware(chocos=chocos).save()
break
break
return "ok"
@app.task
def get_installed_software(pk):
agent = Agent.objects.get(pk=pk)
r = agent.salt_api_cmd(timeout=30, func="pkg.list_pkgs")
if r == "timeout" or r == "error":
logger.error(f"Timed out trying to get installed software on {agent.salt_id}")
return
printable = set(string.printable)
try:
software = [
{
"name": "".join(filter(lambda x: x in printable, k)),
"version": "".join(filter(lambda x: x in printable, v)),
}
for k, v in r.items()
]
except Exception as e:
logger.error(f"Unable to get installed software on {agent.salt_id}: {e}")
return
if not InstalledSoftware.objects.filter(agent=agent).exists():
InstalledSoftware(agent=agent, software=software).save()
else:
s = agent.installedsoftware_set.get()
s.software = software
s.save(update_fields=["software"])
return "ok"
@app.task
def install_program(pk, name, version):
agent = Agent.objects.get(pk=pk)
r = agent.salt_api_cmd(
timeout=900, func="chocolatey.install", arg=[name, f"version={version}"],
)
if r == "timeout" or r == "error":
logger.error(f"Failed to install {name} {version} on {agent.salt_id}: timeout")
return
try:
output = r.lower()
except Exception as e:
logger.error(f"Failed to install {name} {version} on {agent.salt_id}: {e}")
return
success = [
"install",
"of",
name.lower(),
"was",
"successful",
"installed",
]
duplicate = [name.lower(), "already", "installed", "--force", "reinstall"]
installed = False
if all(x in output for x in success):
installed = True
logger.info(f"Successfully installed {name} {version} on {agent.salt_id}")
elif all(x in output for x in duplicate):
logger.warning(f"Already installed: {name} {version} on {agent.salt_id}")
else:
logger.error(f"Something went wrong - {name} {version} on {agent.salt_id}")
ChocoLog(
agent=agent, name=name, version=version, message=output, installed=installed
).save()
get_installed_software.delay(agent.pk)
return "ok"
|
the-stack_0_10472 | from mythic_c2_container.C2ProfileBase import *
import sys
# request is a dictionary: {"action": func_name, "message": "the input", "task_id": task id num}
# must return an RPCResponse() object and set .status to an instance of RPCStatus and response to str of message
async def test(request):
response = RPCResponse()
response.status = RPCStatus.Success
response.response = "hello"
#resp = await MythicCallbackRPC.MythicCallbackRPC().add_event_message(message="got a POST message")
return response
# The opsec function is called when a payload is created as a check to see if the parameters supplied are good
# The input for "request" is a dictionary of:
# {
# "action": "opsec",
# "parameters": {
# "param_name": "param_value",
# "param_name2: "param_value2",
# }
# }
# This function should return one of two things:
# For success: {"status": "success", "message": "your success message here" }
# For error: {"status": "error", "error": "your error message here" }
async def opsec(request):
return {"status": "success", "message": "No OPSEC Check Performed"} |
the-stack_0_10476 | import os.path
import sys
import tqdm
import pathlib
import cv2 as cv
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
import xml.etree.ElementTree as ET
from torchvision import ops
from torchvision import transforms
from torch.utils.data import Dataset, DataLoader
from PIL import Image
POS_ANCHOR_LABEL = 1
NEG_ANCHOR_LABEL = 0
INVALID_ANCHOR_LABEL = -1
class RegionProposalNetwork(nn.Module):
def __init__(self, n_anchors, n_inter_channels=256):
super().__init__()
pretrained_model = models.mobilenet_v2(pretrained=True)
for param in pretrained_model.parameters():
param.requires_grad = False
self.backbone = nn.Sequential(*pretrained_model.features[0:7])
self.conv_inter = nn.Conv2d(32, n_inter_channels, 3, padding=1)
self.relu = nn.ReLU(inplace=True)
self.conv_reg = nn.Conv2d(n_inter_channels, 4 * n_anchors, 1)
self.conv_cls = nn.Conv2d(n_inter_channels, 2 * n_anchors, 1)
def forward(self, x):
x = self.backbone(x)
x = self.conv_inter(x)
x = self.relu(x)
pred_reg = self.conv_reg(x)
pred_cls = self.conv_cls(x)
batch_size = x.shape[0]
pred_reg = pred_reg.permute(0, 2, 3, 1).reshape(batch_size, -1, 4)
pred_cls = pred_cls.permute(0, 2, 3, 1).reshape(batch_size, -1, 2)
return pred_reg, pred_cls
def _get_rand_sub_selector(selector, indices, n_vals):
rand_perm = torch.randperm(len(indices))[:n_vals]
subset_indices = indices[rand_perm]
subset_selector = torch.full_like(selector, False)
subset_selector[subset_indices] = True
return subset_selector
def _rpn_cls_loss(pred_cls, cls_labels, pos_selector, neg_selector):
pred_cls_pos = pred_cls[pos_selector]
pred_cls_neg = pred_cls[neg_selector]
cls_loss_pos = F.cross_entropy(
pred_cls_pos, cls_labels[pos_selector], reduction='sum')
cls_loss_neg = F.cross_entropy(
pred_cls_neg, cls_labels[neg_selector], reduction='sum')
n_valid = pred_cls_pos.numel() + pred_cls_neg.numel()
cls_loss = (cls_loss_pos + cls_loss_neg) / n_valid
return cls_loss
def _rpn_reg_loss(pred_reg, reg_targets, pos_selector):
pred_reg_valid = pred_reg[pos_selector]
if len(pred_reg_valid) == 0:
return 0
reg_targets_valid = reg_targets[pos_selector]
reg_loss = F.smooth_l1_loss(pred_reg_valid, reg_targets_valid)
return reg_loss
class RPNClsAndRegLoss(nn.Module):
def __init__(self, n_pos_samples=32, n_neg_samples=32, reg_balance=0.5):
super().__init__()
self.n_pos_samples = n_pos_samples
self.n_neg_samples = n_neg_samples
assert 0 < reg_balance < 1
self.reg_balance = reg_balance
def forward(self, pred_reg, pred_cls, reg_targets, cls_labels):
pos_selector = (cls_labels == POS_ANCHOR_LABEL)
neg_selector = (cls_labels == NEG_ANCHOR_LABEL)
pos_indices = torch.where(pos_selector)[0]
neg_indices = torch.where(neg_selector)[0]
n_pos_found = len(pos_indices)
n_neg_found = len(neg_indices)
n_pos_subset = min(self.n_pos_samples, n_pos_found)
n_pos_missing = max(self.n_pos_samples - n_pos_found, 0)
n_neg_subset = min(self.n_neg_samples + n_pos_missing, n_neg_found)
pos_subset_selector = _get_rand_sub_selector(
pos_selector, pos_indices, n_pos_subset)
neg_subset_selector = _get_rand_sub_selector(
neg_selector, neg_indices, n_neg_subset)
cls_loss = _rpn_cls_loss(
pred_cls, cls_labels, pos_subset_selector, neg_subset_selector)
reg_loss = _rpn_reg_loss(pred_reg, reg_targets, pos_subset_selector)
loss = (self.reg_balance * reg_loss +
(1 - self.reg_balance) * cls_loss)
return loss
def generate_anchors(scales, ratios, response_size, total_stride):
n_response_cells = response_size ** 2
n_anchors = len(scales) * len(ratios)
feature_cell_size = total_stride * total_stride
widths = torch.sqrt(feature_cell_size * ratios)
heights = widths / ratios
widths_scaled = torch.outer(scales, widths).flatten()
heights_scaled = torch.outer(scales, heights).flatten()
widths_scaled = torch.tile(widths_scaled, (n_response_cells,))
heights_scaled = torch.tile(heights_scaled, (n_response_cells,))
coords = torch.arange(response_size) * total_stride
centers = coords + (total_stride / 2)
xs, ys = torch.meshgrid(centers, centers)
xs = torch.tile(xs.flatten(), (n_anchors,))
ys = torch.tile(ys.flatten(), (n_anchors,))
anchors = torch.vstack((xs, ys, widths_scaled, heights_scaled)).squeeze().T
return anchors
class RPNTargetBuilder:
def __init__(
self, anchors, img_width, img_height, pos_thresh=0.5,
neg_thresh=0.2):
self.pos_thresh = pos_thresh
self.neg_thresh = neg_thresh
self.anchors = anchors
self.anchors_xyxy = ops.box_convert(anchors, 'cxcywh', 'xyxy') # [A, 4]
self.valid_anchors_selector = (
(self.anchors_xyxy[:, 0] >= 0) &
(self.anchors_xyxy[:, 1] >= 0) &
(self.anchors_xyxy[:, 2] < img_width) &
(self.anchors_xyxy[:, 3] < img_height))
def build_reg_and_cls_targets(self, boxes):
boxes_xyxy = ops.box_convert(boxes, 'cxcywh', 'xyxy') # [B, 4]
iou_dist = ops.box_iou(self.anchors_xyxy, boxes_xyxy) # [A, B]
closest_box_indices = torch.argmax(iou_dist, dim=1) # [A, 1]
target_boxes = boxes[closest_box_indices] # [A, 4]
# Both [A, 2]
xy_targets = (
(target_boxes[..., :2] - self.anchors[..., :2]) /
self.anchors[..., 2:])
wh_targets = torch.log(target_boxes[..., 2:] / self.anchors[..., 2:])
reg_target = torch.hstack((xy_targets, wh_targets)) # [A, 4]
pos_selector = torch.any(iou_dist > self.pos_thresh, dim=1) # [A,]
neg_selector = torch.all(iou_dist < self.neg_thresh, dim=1) # [A,]
valid_pos_selector = pos_selector & self.valid_anchors_selector # [A,]
valid_neg_selector = neg_selector & self.valid_anchors_selector # [A,]
cls_target = torch.full(
(len(self.anchors),), INVALID_ANCHOR_LABEL,
device=boxes.device) # [A,]
cls_target[valid_pos_selector] = POS_ANCHOR_LABEL # [A,]
cls_target[valid_neg_selector] = NEG_ANCHOR_LABEL # [A,]
return reg_target, cls_target
def read_img_with_boxes(imgs_dir, anno_file):
tree = ET.parse(str(anno_file))
root = tree.getroot()
img_file_name = root.find("filename").text
img = Image.open(str(imgs_dir / img_file_name))
if img.mode in ('L', 'RGBA'):
img = img.convert('RGB')
box_elems = ("xmin", "ymin", "xmax", "ymax")
boxes = []
for box in root.findall(".//bndbox"):
box = [int(box.find(elem).text) for elem in box_elems]
boxes.append(box)
boxes = ops.box_convert(torch.tensor(boxes), 'xyxy', 'cxcywh')
return img, boxes
class ImgAndBoxesSyncResize:
def __init__(self, target_width, target_height, resample=Image.BICUBIC):
self.target_width = target_width
self.target_height = target_height
self.resample = resample
def __call__(self, img, boxes):
img_resized = img.resize(
(self.target_width, self.target_height), self.resample)
width_scale = self.target_width / img.size[0]
height_scale = self.target_height / img.size[1]
scale = torch.tensor(
((width_scale, height_scale, width_scale, height_scale),))
boxes_resized = boxes * scale
return img_resized, boxes_resized
class RoadSignDetectionDataset(Dataset):
def __init__(
self, root_dir_path, img_transforms=None,
img_boxes_transforms=None):
self.img_transforms = img_transforms
self.img_boxes_transforms = img_boxes_transforms
self.imgs = []
self.boxes = []
self._read_dataset(root_dir_path)
def __getitem__(self, item):
img = self.imgs[item]
boxes = self.boxes[item]
if self.img_boxes_transforms:
img, boxes = self.img_boxes_transforms(img, boxes)
if self.img_transforms:
img = self.img_transforms(img)
return img, boxes
def __len__(self):
return len(self.imgs)
def _read_dataset(self, root_dir_path):
root_dir = pathlib.Path(root_dir_path)
annos_dir = root_dir / "annotations"
imgs_dir = root_dir / "images"
for anno_file in annos_dir.rglob("road*.xml"):
img, boxes = read_img_with_boxes(imgs_dir, anno_file)
self.imgs.append(img)
self.boxes.append(boxes)
if __name__ == '__main__':
device = torch.device('cuda')
pin_memory = True
root_dir_path = "../../../datasets/road_sign_detection"
response_size = 28
total_stride = 8
img_size = 224
scales = torch.tensor((4.0, 8.0, 16.0))
ratios = torch.tensor((0.5, 1.0, 2.0))
n_anchors = len(scales) * len(ratios)
n_epochs = 100
anchors = generate_anchors(
scales, ratios, response_size, total_stride).to(device)
rpn = RegionProposalNetwork(n_anchors).to(device)
criterion = RPNClsAndRegLoss().to(device)
target_builder = RPNTargetBuilder(anchors, img_size, img_size)
optimizer = torch.optim.Adam(rpn.parameters(), lr=1.e-3)
img = torch.zeros((1, 3, 224, 224)).to(device)
rpn(img)
normalize = transforms.Normalize(
mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))
img_transforms = transforms.Compose((
transforms.ToTensor(), normalize,))
img_boxes_transforms = ImgAndBoxesSyncResize(img_size, img_size)
dataset = RoadSignDetectionDataset(
root_dir_path, img_transforms, img_boxes_transforms)
n_workers = 1
train_loader = DataLoader(
dataset, batch_size=1, shuffle=True, num_workers=n_workers,
pin_memory=pin_memory)
def run_epoch(epoch, device, backward=True):
rpn.train(backward)
losses_sum = 0.0
n_batches = len(dataset)
mode_text = "train" if backward else "valid"
epoch_text = f"[{mode_text}] epoch: {epoch:3d}/{n_epochs}"
tqdm_bar = tqdm.tqdm(total=n_batches, file=sys.stdout)
with torch.set_grad_enabled(backward), tqdm_bar as pbar:
for batch, (img, boxes) in enumerate(train_loader, start=1):
img = img.to(device)
boxes = boxes.to(device).squeeze(dim=0) # Remove batch dim.
pred_reg, pred_cls = rpn(img)
pred_reg = pred_reg.squeeze()
pred_cls = pred_cls.squeeze()
reg_target, cls_target =\
target_builder.build_reg_and_cls_targets(boxes)
loss = criterion(pred_reg, pred_cls, reg_target, cls_target)
if backward:
optimizer.zero_grad()
loss.backward()
optimizer.step()
curr_loss = loss.item()
losses_sum += curr_loss
curr_batch_loss = losses_sum / batch
loss_text = f"loss: {curr_loss:.5f} ({curr_batch_loss:.5f})"
pbar.set_description(f"{epoch_text} | {loss_text}")
pbar.update()
batch_loss = losses_sum / n_batches
return batch_loss
for epoch in range(1, n_epochs + 1):
run_epoch(epoch, device)
checkpoint_file_path = "../rpn_checkpoint.pth"
checkpoint = rpn.state_dict()
torch.save(checkpoint, checkpoint_file_path)
|
the-stack_0_10477 | #
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import copy
import re
import sys
from command import InteractiveCommand
from editor import Editor
from error import HookError, UploadError
from git_command import GitCommand
from project import RepoHook
from pyversion import is_python3
# pylint:disable=W0622
if not is_python3():
input = raw_input
else:
unicode = str
# pylint:enable=W0622
UNUSUAL_COMMIT_THRESHOLD = 1000
def _ConfirmManyUploads(multiple_branches=False):
if multiple_branches:
print('ATTENTION: One or more branches has an unusually high number '
'of commits.')
else:
print('ATTENTION: You are uploading an unusually high number of commits.')
print('YOU PROBABLY DO NOT MEAN TO DO THIS. (Did you rebase across '
'branches?)')
answer = input("If you are sure you intend to do this, type 'yes': ").strip()
return answer == "yes"
def _die(fmt, *args):
msg = fmt % args
print('error: %s' % msg, file=sys.stderr)
sys.exit(1)
def _SplitEmails(values):
result = []
for value in values:
result.extend([s.strip() for s in value.split(',')])
return result
class Upload(InteractiveCommand):
common = True
helpSummary = "Upload changes for code review"
helpUsage = """
%prog [--re --cc] [<project>]...
"""
helpDescription = """
The '%prog' command is used to send changes to the Gerrit Code
Review system. It searches for topic branches in local projects
that have not yet been published for review. If multiple topic
branches are found, '%prog' opens an editor to allow the user to
select which branches to upload.
'%prog' searches for uploadable changes in all projects listed at
the command line. Projects can be specified either by name, or by
a relative or absolute path to the project's local directory. If no
projects are specified, '%prog' will search for uploadable changes
in all projects listed in the manifest.
If the --reviewers or --cc options are passed, those emails are
added to the respective list of users, and emails are sent to any
new users. Users passed as --reviewers must already be registered
with the code review system, or the upload will fail.
Configuration
-------------
review.URL.autoupload:
To disable the "Upload ... (y/N)?" prompt, you can set a per-project
or global Git configuration option. If review.URL.autoupload is set
to "true" then repo will assume you always answer "y" at the prompt,
and will not prompt you further. If it is set to "false" then repo
will assume you always answer "n", and will abort.
review.URL.autoreviewer:
To automatically append a user or mailing list to reviews, you can set
a per-project or global Git option to do so.
review.URL.autocopy:
To automatically copy a user or mailing list to all uploaded reviews,
you can set a per-project or global Git option to do so. Specifically,
review.URL.autocopy can be set to a comma separated list of reviewers
who you always want copied on all uploads with a non-empty --re
argument.
review.URL.username:
Override the username used to connect to Gerrit Code Review.
By default the local part of the email address is used.
The URL must match the review URL listed in the manifest XML file,
or in the .git/config within the project. For example:
[remote "origin"]
url = git://git.example.com/project.git
review = http://review.example.com/
[review "http://review.example.com/"]
autoupload = true
autocopy = [email protected],[email protected]
review.URL.uploadtopic:
To add a topic branch whenever uploading a commit, you can set a
per-project or global Git option to do so. If review.URL.uploadtopic
is set to "true" then repo will assume you always want the equivalent
of the -t option to the repo command. If unset or set to "false" then
repo will make use of only the command line option.
References
----------
Gerrit Code Review: http://code.google.com/p/gerrit/
"""
def _Options(self, p):
p.add_option('-t',
dest='auto_topic', action='store_true',
help='Send local branch name to Gerrit Code Review')
p.add_option('--re', '--reviewers',
type='string', action='append', dest='reviewers',
help='Request reviews from these people.')
p.add_option('--cc',
type='string', action='append', dest='cc',
help='Also send email to these email addresses.')
p.add_option('--br',
type='string', action='store', dest='branch',
help='Branch to upload.')
p.add_option('--cbr', '--current-branch',
dest='current_branch', action='store_true',
help='Upload current git branch.')
p.add_option('-d', '--draft',
action='store_true', dest='draft', default=False,
help='If specified, upload as a draft.')
p.add_option('-D', '--destination', '--dest',
type='string', action='store', dest='dest_branch',
metavar='BRANCH',
help='Submit for review on this target branch.')
# Options relating to upload hook. Note that verify and no-verify are NOT
# opposites of each other, which is why they store to different locations.
# We are using them to match 'git commit' syntax.
#
# Combinations:
# - no-verify=False, verify=False (DEFAULT):
# If stdout is a tty, can prompt about running upload hooks if needed.
# If user denies running hooks, the upload is cancelled. If stdout is
# not a tty and we would need to prompt about upload hooks, upload is
# cancelled.
# - no-verify=False, verify=True:
# Always run upload hooks with no prompt.
# - no-verify=True, verify=False:
# Never run upload hooks, but upload anyway (AKA bypass hooks).
# - no-verify=True, verify=True:
# Invalid
p.add_option('--no-verify',
dest='bypass_hooks', action='store_true',
help='Do not run the upload hook.')
p.add_option('--verify',
dest='allow_all_hooks', action='store_true',
help='Run the upload hook without prompting.')
def _SingleBranch(self, opt, branch, people):
project = branch.project
name = branch.name
remote = project.GetBranch(name).remote
key = 'review.%s.autoupload' % remote.review
answer = project.config.GetBoolean(key)
if answer is False:
_die("upload blocked by %s = false" % key)
if answer is None:
date = branch.date
commit_list = branch.commits
destination = opt.dest_branch or project.dest_branch or project.revisionExpr
print('Upload project %s/ to remote branch %s:' % (project.relpath, destination))
print(' branch %s (%2d commit%s, %s):' % (
name,
len(commit_list),
len(commit_list) != 1 and 's' or '',
date))
for commit in commit_list:
print(' %s' % commit)
sys.stdout.write('to %s (y/N)? ' % remote.review)
answer = sys.stdin.readline().strip().lower()
answer = answer in ('y', 'yes', '1', 'true', 't')
if answer:
if len(branch.commits) > UNUSUAL_COMMIT_THRESHOLD:
answer = _ConfirmManyUploads()
if answer:
self._UploadAndReport(opt, [branch], people)
else:
_die("upload aborted by user")
def _MultipleBranches(self, opt, pending, people):
projects = {}
branches = {}
script = []
script.append('# Uncomment the branches to upload:')
for project, avail in pending:
script.append('#')
script.append('# project %s/:' % project.relpath)
b = {}
for branch in avail:
if branch is None:
continue
name = branch.name
date = branch.date
commit_list = branch.commits
if b:
script.append('#')
destination = opt.dest_branch or project.dest_branch or project.revisionExpr
script.append('# branch %s (%2d commit%s, %s) to remote branch %s:' % (
name,
len(commit_list),
len(commit_list) != 1 and 's' or '',
date,
destination))
for commit in commit_list:
script.append('# %s' % commit)
b[name] = branch
projects[project.relpath] = project
branches[project.name] = b
script.append('')
script = [ x.encode('utf-8')
if issubclass(type(x), unicode)
else x
for x in script ]
script = Editor.EditString("\n".join(script)).split("\n")
project_re = re.compile(r'^#?\s*project\s*([^\s]+)/:$')
branch_re = re.compile(r'^\s*branch\s*([^\s(]+)\s*\(.*')
project = None
todo = []
for line in script:
m = project_re.match(line)
if m:
name = m.group(1)
project = projects.get(name)
if not project:
_die('project %s not available for upload', name)
continue
m = branch_re.match(line)
if m:
name = m.group(1)
if not project:
_die('project for branch %s not in script', name)
branch = branches[project.name].get(name)
if not branch:
_die('branch %s not in %s', name, project.relpath)
todo.append(branch)
if not todo:
_die("nothing uncommented for upload")
many_commits = False
for branch in todo:
if len(branch.commits) > UNUSUAL_COMMIT_THRESHOLD:
many_commits = True
break
if many_commits:
if not _ConfirmManyUploads(multiple_branches=True):
_die("upload aborted by user")
self._UploadAndReport(opt, todo, people)
def _AppendAutoList(self, branch, people):
"""
Appends the list of reviewers in the git project's config.
Appends the list of users in the CC list in the git project's config if a
non-empty reviewer list was found.
"""
name = branch.name
project = branch.project
key = 'review.%s.autoreviewer' % project.GetBranch(name).remote.review
raw_list = project.config.GetString(key)
if not raw_list is None:
people[0].extend([entry.strip() for entry in raw_list.split(',')])
key = 'review.%s.autocopy' % project.GetBranch(name).remote.review
raw_list = project.config.GetString(key)
if not raw_list is None and len(people[0]) > 0:
people[1].extend([entry.strip() for entry in raw_list.split(',')])
def _FindGerritChange(self, branch):
last_pub = branch.project.WasPublished(branch.name)
if last_pub is None:
return ""
refs = branch.GetPublishedRefs()
try:
# refs/changes/XYZ/N --> XYZ
return refs.get(last_pub).split('/')[-2]
except (AttributeError, IndexError):
return ""
def _UploadAndReport(self, opt, todo, original_people):
have_errors = False
for branch in todo:
try:
people = copy.deepcopy(original_people)
self._AppendAutoList(branch, people)
# Check if there are local changes that may have been forgotten
changes = branch.project.UncommitedFiles()
if changes:
key = 'review.%s.autoupload' % branch.project.remote.review
answer = branch.project.config.GetBoolean(key)
# if they want to auto upload, let's not ask because it could be automated
if answer is None:
sys.stdout.write('Uncommitted changes in ' + branch.project.name)
sys.stdout.write(' (did you forget to amend?):\n')
sys.stdout.write('\n'.join(changes) + '\n')
sys.stdout.write('Continue uploading? (y/N) ')
a = sys.stdin.readline().strip().lower()
if a not in ('y', 'yes', 't', 'true', 'on'):
print("skipping upload", file=sys.stderr)
branch.uploaded = False
branch.error = 'User aborted'
continue
# Check if topic branches should be sent to the server during upload
if opt.auto_topic is not True:
key = 'review.%s.uploadtopic' % branch.project.remote.review
opt.auto_topic = branch.project.config.GetBoolean(key)
destination = opt.dest_branch or branch.project.dest_branch
# Make sure our local branch is not setup to track a different remote branch
merge_branch = self._GetMergeBranch(branch.project)
if destination:
full_dest = 'refs/heads/%s' % destination
if not opt.dest_branch and merge_branch and merge_branch != full_dest:
print('merge branch %s does not match destination branch %s'
% (merge_branch, full_dest))
print('skipping upload.')
print('Please use `--destination %s` if this is intentional'
% destination)
branch.uploaded = False
continue
branch.UploadForReview(people, auto_topic=opt.auto_topic, draft=opt.draft, dest_branch=destination)
branch.uploaded = True
except UploadError as e:
branch.error = e
branch.uploaded = False
have_errors = True
print(file=sys.stderr)
print('----------------------------------------------------------------------', file=sys.stderr)
if have_errors:
for branch in todo:
if not branch.uploaded:
if len(str(branch.error)) <= 30:
fmt = ' (%s)'
else:
fmt = '\n (%s)'
print(('[FAILED] %-15s %-15s' + fmt) % (
branch.project.relpath + '/', \
branch.name, \
str(branch.error)),
file=sys.stderr)
print()
for branch in todo:
if branch.uploaded:
print('[OK ] %-15s %s' % (
branch.project.relpath + '/',
branch.name),
file=sys.stderr)
if have_errors:
sys.exit(1)
def _GetMergeBranch(self, project):
p = GitCommand(project,
['rev-parse', '--abbrev-ref', 'HEAD'],
capture_stdout = True,
capture_stderr = True)
p.Wait()
local_branch = p.stdout.strip()
p = GitCommand(project,
['config', '--get', 'branch.%s.merge' % local_branch],
capture_stdout = True,
capture_stderr = True)
p.Wait()
merge_branch = p.stdout.strip()
return merge_branch
def Execute(self, opt, args):
project_list = self.GetProjects(args)
pending = []
reviewers = []
cc = []
branch = None
if opt.branch:
branch = opt.branch
for project in project_list:
if opt.current_branch:
cbr = project.CurrentBranch
up_branch = project.GetUploadableBranch(cbr)
if up_branch:
avail = [up_branch]
else:
avail = None
print('ERROR: Current branch (%s) not uploadable. '
'You may be able to type '
'"git branch --set-upstream-to m/master" to fix '
'your branch.' % str(cbr),
file=sys.stderr)
else:
avail = project.GetUploadableBranches(branch)
if avail:
pending.append((project, avail))
if pending and (not opt.bypass_hooks):
hook = RepoHook('pre-upload', self.manifest.repo_hooks_project,
self.manifest.topdir, abort_if_user_denies=True)
pending_proj_names = [project.name for (project, avail) in pending]
pending_worktrees = [project.worktree for (project, avail) in pending]
try:
hook.Run(opt.allow_all_hooks, project_list=pending_proj_names,
worktree_list=pending_worktrees)
except HookError as e:
print("ERROR: %s" % str(e), file=sys.stderr)
return
if opt.reviewers:
reviewers = _SplitEmails(opt.reviewers)
if opt.cc:
cc = _SplitEmails(opt.cc)
people = (reviewers, cc)
if not pending:
print("no branches ready for upload", file=sys.stderr)
elif len(pending) == 1 and len(pending[0][1]) == 1:
self._SingleBranch(opt, pending[0][1][0], people)
else:
self._MultipleBranches(opt, pending, people)
|
the-stack_0_10478 | # Copyright 2022 Maximilien Le Clei.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cv2
import numpy as np
import torch
def compute_padding(d_input):
padding = ()
for d in d_input[-1:0:-1]:
if d == 1:
padding += (1,1)
elif d == 2:
padding += (0,1)
else:
padding += (0,0)
return padding
def neg(tup):
return tuple(-x for x in tup)
def avg_pool(x, d):
_, _, h, w = x.shape
x = x.numpy()
x = x[0]
x = np.transpose(x, (1, 2, 0))
x = cv2.resize(x, (h//d, w//d), interpolation=cv2.INTER_AREA)
if x.ndim == 2:
x = x[:, :, None]
x = np.transpose(x, (2, 0, 1))
x = x[None, :, :, :]
x = torch.Tensor(x)
return x
def torch_cat(x, i):
for x_i in x:
x_i = x_i.numpy()
return torch.Tensor(np.concatenate(x, i)) |
the-stack_0_10479 | """Tests for the templatetags of the markdown_utils app."""
from django.test import TestCase
from ..templatetags import markdown_utils_tags as tags
class RenderMarkdownTestCase(TestCase):
"""Tests for the ``render_markdown`` assignment tag."""
longMessage = True
def test_tag(self):
result = tags.render_markdown('# Foobar')
self.assertEqual(result, '<h1>Foobar</h1>', msg=(
'Should render the given input correctly.'))
result = tags.render_markdown('Foobar\nBarfoo')
self.assertEqual(result, '<p>Foobar<br />\nBarfoo</p>', msg=(
'Should render the given input correctly.'))
|
the-stack_0_10480 | """Measurements collection."""
from datetime import datetime, timedelta
from typing import Optional, cast
import pymongo
from pymongo.database import Database
from model.metric import Metric
from model.queries import get_attribute_type, get_measured_attribute
from server_utilities.functions import iso_timestamp, percentage
from server_utilities.type import MeasurementId, MetricId, Scale, Status, TargetType
def latest_measurement(database: Database, metric_uuid: MetricId):
"""Return the latest measurement."""
return database.measurements.find_one(filter={"metric_uuid": metric_uuid}, sort=[("start", pymongo.DESCENDING)])
def latest_successful_measurement(database: Database, metric_uuid: MetricId):
"""Return the latest successful measurement."""
return database.measurements.find_one(
filter={"metric_uuid": metric_uuid, "sources.value": {"$ne": None}}, sort=[("start", pymongo.DESCENDING)]
)
def recent_measurements_by_metric_uuid(database: Database, max_iso_timestamp: str = "", days=7):
"""Return all recent measurements."""
max_iso_timestamp = max_iso_timestamp or iso_timestamp()
min_iso_timestamp = (datetime.fromisoformat(max_iso_timestamp) - timedelta(days=days)).isoformat()
recent_measurements = database.measurements.find(
filter={"end": {"$gte": min_iso_timestamp}, "start": {"$lte": max_iso_timestamp}},
sort=[("start", pymongo.ASCENDING)],
projection={"_id": False, "sources.entities": False},
)
measurements_by_metric_uuid: dict[MetricId, list] = {}
for measurement in recent_measurements:
measurements_by_metric_uuid.setdefault(measurement["metric_uuid"], []).append(measurement)
return measurements_by_metric_uuid
def measurements_by_metric(
database: Database,
*metric_uuids: MetricId,
min_iso_timestamp: str = "",
max_iso_timestamp: str = "",
):
"""Return all measurements for one metric, without the entities, except for the most recent one."""
measurement_filter: dict = {"metric_uuid": {"$in": metric_uuids}}
if min_iso_timestamp:
measurement_filter["end"] = {"$gt": min_iso_timestamp}
if max_iso_timestamp:
measurement_filter["start"] = {"$lt": max_iso_timestamp}
latest_with_entities = database.measurements.find_one(
measurement_filter, sort=[("start", pymongo.DESCENDING)], projection={"_id": False}
)
if not latest_with_entities:
return []
all_measurements_without_entities = database.measurements.find(
measurement_filter, projection={"_id": False, "sources.entities": False}
)
return list(all_measurements_without_entities)[:-1] + [latest_with_entities]
def count_measurements(database: Database) -> int:
"""Return the number of measurements."""
return int(database.measurements.count_documents(filter={}))
def update_measurement_end(database: Database, measurement_id: MeasurementId):
"""Set the end date and time of the measurement to the current date and time."""
return database.measurements.update_one(filter={"_id": measurement_id}, update={"$set": {"end": iso_timestamp()}})
def insert_new_measurement(
database: Database, data_model, metric_data: dict, measurement: dict, previous_measurement: dict
) -> dict:
"""Insert a new measurement."""
if "_id" in measurement:
del measurement["_id"]
metric = Metric(data_model, metric_data)
metric_type = data_model["metrics"][metric.type()]
measurement["start"] = measurement["end"] = now = iso_timestamp()
for scale in metric_type["scales"]:
value = calculate_measurement_value(data_model, metric, measurement["sources"], scale)
status = metric.status(value)
measurement[scale] = dict(value=value, status=status, direction=metric.direction())
# We can't cover determine_status_start() returning False in the feature tests because all new measurements have
# a status start timestamp, hence the pragma: no cover-behave:
if status_start := determine_status_start(status, previous_measurement, scale, now): # pragma: no cover-behave
measurement[scale]["status_start"] = status_start
for target in ("target", "near_target", "debt_target"):
target_type = cast(TargetType, target)
measurement[scale][target] = determine_target_value(metric, measurement, scale, target_type)
database.measurements.insert_one(measurement)
del measurement["_id"]
return measurement
def calculate_measurement_value(data_model, metric: Metric, sources, scale: Scale) -> Optional[str]:
"""Calculate the measurement value from the source measurements."""
if not sources or any(source["parse_error"] or source["connection_error"] for source in sources):
return None
values = [int(source["value"]) - value_of_entities_to_ignore(data_model, metric, source) for source in sources]
add = metric.addition()
if scale == "percentage":
direction = metric.direction()
totals = [int(source["total"]) for source in sources]
if add is sum:
values, totals = [sum(values)], [sum(totals)]
values = [percentage(value, total, direction) for value, total in zip(values, totals)]
return str(add(values))
def value_of_entities_to_ignore(data_model, metric: Metric, source) -> int:
"""Return the value of ignored entities, i.e. entities marked as fixed, false positive or won't fix.
If the entities have a measured attribute, return the sum of the measured attributes of the ignored
entities, otherwise return the number of ignored attributes. For example, if the metric is the amount of ready
user story points, the source entities are user stories and the measured attribute is the amount of story
points of each user story.
"""
entities = source.get("entity_user_data", {}).items()
ignored_entities = [
entity[0] for entity in entities if entity[1].get("status") in ("fixed", "false_positive", "wont_fix")
]
source_type = metric.sources()[source["source_uuid"]]["type"]
if attribute := get_measured_attribute(data_model, metric.type(), source_type):
entity = data_model["sources"][source_type]["entities"].get(metric.type(), {})
attribute_type = get_attribute_type(entity, attribute)
convert = dict(float=float, integer=int, minutes=int)[attribute_type]
value = sum(convert(entity[attribute]) for entity in source["entities"] if entity["key"] in ignored_entities)
else:
value = len(ignored_entities)
return int(value)
def determine_status_start(
current_status: Optional[Status], previous_measurement: dict, scale: Scale, now: str
) -> Optional[str]:
"""Determine the date time since when the metric has the current status."""
if previous_measurement:
previous_status = previous_measurement.get(scale, {}).get("status")
if current_status == previous_status:
return str(previous_measurement.get(scale, {}).get("status_start", "")) or None
return now
def determine_target_value(metric: Metric, measurement: dict, scale: Scale, target: TargetType):
"""Determine the target, near target or debt target value."""
target_value = metric.get_target(target) if scale == metric.scale() else measurement.get(scale, {}).get(target)
return None if target == "debt_target" and metric.accept_debt_expired() else target_value
def changelog(database: Database, nr_changes: int, **uuids):
"""Return the changelog for the measurements belonging to the items with the specific uuids."""
return database.measurements.find(
filter={"delta.uuids": {"$in": list(uuids.values())}},
sort=[("start", pymongo.DESCENDING)],
limit=nr_changes,
projection=["delta", "start"],
)
|
the-stack_0_10482 | import os
import asyncio
from jina import __default_host__
from jina.importer import ImportExtensions
from jina.serve.runtimes.gateway import GatewayRuntime
from jina.serve.runtimes.gateway.http.app import get_fastapi_app
__all__ = ['HTTPGatewayRuntime']
class HTTPGatewayRuntime(GatewayRuntime):
"""Runtime for HTTP interface."""
async def async_setup(self):
"""
The async method setup the runtime.
Setup the uvicorn server.
"""
with ImportExtensions(required=True):
from uvicorn import Config, Server
class UviServer(Server):
"""The uvicorn server."""
async def setup(self, sockets=None):
"""
Setup uvicorn server.
:param sockets: sockets of server.
"""
config = self.config
if not config.loaded:
config.load()
self.lifespan = config.lifespan_class(config)
self.install_signal_handlers()
await self.startup(sockets=sockets)
if self.should_exit:
return
async def serve(self, **kwargs):
"""
Start the server.
:param kwargs: keyword arguments
"""
await self.main_loop()
from jina.helper import extend_rest_interface
uvicorn_kwargs = self.args.uvicorn_kwargs or {}
self._set_topology_graph()
self._set_connection_pool()
self._server = UviServer(
config=Config(
app=extend_rest_interface(
get_fastapi_app(
self.args,
topology_graph=self._topology_graph,
connection_pool=self._connection_pool,
logger=self.logger,
)
),
host=__default_host__,
port=self.args.port_expose,
log_level=os.getenv('JINA_LOG_LEVEL', 'error').lower(),
**uvicorn_kwargs
)
)
await self._server.setup()
async def async_run_forever(self):
"""Running method of ther server."""
self._connection_pool.start()
await self._server.serve()
async def _wait_for_cancel(self):
"""Do NOT override this method when inheriting from :class:`GatewayPod`"""
# handle terminate signals
while not self.is_cancel.is_set() and not self._server.should_exit:
await asyncio.sleep(0.1)
await self.async_cancel()
async def async_teardown(self):
"""Shutdown the server."""
await self._server.shutdown()
await self._connection_pool.close()
async def async_cancel(self):
"""Stop the server."""
self._server.should_exit = True
|
the-stack_0_10484 | from multiprocessing.connection import wait
from bot import dp
from aiogram import types
from aiogram.dispatcher.storage import FSMContext
from filters import Main, IsOwner
from functions.client import cidSelect, getpasswordState, sidSelect, pidSelect, cnSelect, sftSelect, scidSelect, getloginState, schoolInfo
from states import addAccount
from utils.db import db
from callbacks import cb_account
from functions.sgo import ns_sessions
from netschoolapi import NetSchoolAPI
from utils.db.data import Account
@dp.callback_query_handler(Main(), cb_account.filter(action='login'), state='*')
async def select_login_handler(call: types.CallbackQuery, callback_data: dict, state=FSMContext):
await call.answer()
await getloginState(call.message, state)
@dp.callback_query_handler(Main(), cb_account.filter(action='select_scid'), state=addAccount.scid)
async def select_sft_handler(call: types.CallbackQuery, callback_data: dict, state=FSMContext):
await call.answer()
account = await Account.get_registerAccount(call.from_user.id)
ns = ns_sessions[account['id']]
ns._prelogin_data['scid'] = callback_data.get('value')
await Account.update(account['id'], **ns._prelogin_data)
await schoolInfo(call.message, account['id'])
@dp.callback_query_handler(Main(), cb_account.filter(action='select_sft'), state=[addAccount.sft, '*'])
async def select_sft_handler(call: types.CallbackQuery, callback_data: dict, state=FSMContext):
account = await Account.get_registerAccount(call.from_user.id)
ns = ns_sessions[account['id']]
ns._prelogin_data['sft'] = callback_data.get('value')
await Account.update(account['id'], **ns._prelogin_data)
await call.answer()
await scidSelect(call.message, account['id'])
@dp.callback_query_handler(Main(), cb_account.filter(action='select_cn'), state=addAccount.cn)
async def select_cn_handler(call: types.CallbackQuery, callback_data: dict, state=FSMContext):
account = await Account.get_registerAccount(call.from_user.id)
ns = ns_sessions[account['id']]
ns._prelogin_data['cn'] = callback_data.get('value')
await Account.update(account['id'], **ns._prelogin_data)
await call.answer()
await sftSelect(call.message, account['id'])
@dp.callback_query_handler(Main(), cb_account.filter(action='select_pid'), state=addAccount.pid)
async def select_pid_handler(call: types.CallbackQuery, callback_data: dict, state=FSMContext):
account = await Account.get_registerAccount(call.from_user.id)
ns = ns_sessions[account['id']]
ns._prelogin_data['pid'] = callback_data.get('value')
await Account.update(account['id'], **ns._prelogin_data)
await call.answer()
await cnSelect(call.message, account['id'])
@dp.callback_query_handler(Main(), cb_account.filter(action='select_sid'), state=addAccount.sid)
async def select_sid_handler(call: types.CallbackQuery, callback_data: dict, state=FSMContext):
account = await Account.get_registerAccount(call.from_user.id)
ns = ns_sessions[account['id']]
ns._prelogin_data['sid'] = callback_data.get('value')
await Account.update(account['id'], **ns._prelogin_data)
await call.answer()
await pidSelect(call.message, account['id'])
@dp.callback_query_handler(Main(), cb_account.filter(action='select_cid'), state=addAccount.cid)
async def select_cid_handler(call: types.CallbackQuery, callback_data: dict, state=FSMContext):
account = await Account.get_registerAccount(call.from_user.id, 'id')
ns = ns_sessions[account['id']]
ns._prelogin_data['cid'] = callback_data.get('value')
await Account.update(account['id'], **ns._prelogin_data)
await call.answer()
await sidSelect(call.message, account['id'])
@dp.callback_query_handler(Main(), cb_account.filter(action='add', value=''), state=[addAccount.url, addAccount.wait_url, '*'])
async def account_add(call: types.CallbackQuery, state=FSMContext):
# register_account = await Account.get_registerAccount(call.from_user.id)
# if register_account:
# for account in register_account:
# print(account)
# if not account:
# ...
await call.answer()
await addAccount.url.set()
regions = await db.executeall("SELECT * FROM regions ORDER BY users_count DESC NULLS LAST LIMIT 3")
if regions:
await nsSelect(call.message)
else:
async with state.proxy() as data:
data['message'] = call.message
await call.message.edit_text("📎 Введите ссылку на ваш СГО")
await addAccount.wait_url.set()
@dp.callback_query_handler(Main(), cb_account.filter(action='region_select'), state=['*'])
async def regionSelect(call: types.CallbackQuery, callback_data: dict):
region = await db.execute("SELECT url FROM regions WHERE id = %s", [callback_data['value']])
account = await Account.add(call.from_user.id, region[0])
await addAccount.cid.set()
ns_sessions[account['id']] = NetSchoolAPI(region[0])
await cidSelect(account['id'], call.message)
async def nsSelect(message: types.Message):
regions = await db.executeall("SELECT * FROM regions ORDER BY users_count DESC NULLS LAST LIMIT 3")
markup = types.InlineKeyboardMarkup()
button_loc = types.InlineKeyboardButton(
"📍 Определить регион", callback_data=cb_account.new(action='geo', value=''))
button_custom = types.InlineKeyboardButton(
"✏️ Ввести свою ссылку", callback_data=cb_account.new(action='url', value=''))
markup.row(button_loc, button_custom)
for x in regions:
markup.add(types.InlineKeyboardButton(
x[1], callback_data=cb_account.new(action='region_select', value=str(x[0]))))
text = "🏙 Выбрите город или другой метод добавления Сетевого Города. Образование"
if message.text != text:
await message.edit_text(text, reply_markup=markup)
@dp.callback_query_handler(Main(), cb_account.filter(action='geo', value=''), state=addAccount.url)
async def requestGeo(call: types.CallbackQuery, state=FSMContext):
await call.answer()
markup = types.ReplyKeyboardMarkup(resize_keyboard=True)
markup.add(types.KeyboardButton(
"📍 Оптравить местоположение", request_location=True))
markup.add(types.KeyboardButton(
"❌ Отмена"))
georequest_msg = await call.message.answer("📍 Воспользуйтесь специальной кнопкой для отправки своего местоположения", reply_markup=markup)
await call.message.delete()
async with state.proxy() as data:
data["message"] = georequest_msg
await addAccount.wait_geo.set()
@dp.callback_query_handler(Main(), cb_account.filter(action='url', value=''), state=addAccount.url)
async def waitUrl(call: types.CallbackQuery, state: FSMContext):
await call.answer()
markup = types.InlineKeyboardMarkup()
markup.add(types.InlineKeyboardButton(
"◀️ Вернуться к другим методам", callback_data=cb_account.new(action='add', value='')))
async with state.proxy() as data:
data["message"] = call.message
await addAccount.wait_url.set()
await call.message.edit_text("💬 Отправьте ссылку на свою систему Сетевой Город. Образование, скопировав её из адресной строки вашего браузера", reply_markup=markup)
@dp.callback_query_handler(Main(), cb_account.filter(action='continue'), state=['*'])
async def account_continueAdd(call: types.CallbackQuery, callback_data: dict, state: FSMContext):
account = await Account.get_registerAccount(call.from_user.id)
if not account['url']:
await account_add(call, state)
else:
ns = NetSchoolAPI(account['url'])
ns_sessions[account['id']] = ns
regions = await db.execute("SELECT * FROM regions")
for key in account.items():
if key[1]:
ns._prelogin_data.update({key[0]: key[1]})
else:
if key[0] == 'cid':
await cidSelect(account['id'], call.message)
break
elif key[0] == 'sid':
await sidSelect(call.message, account['id'])
break
elif key[0] == 'pid':
await pidSelect(call.message, account['id'])
break
elif key[0] == 'cn':
await cnSelect(call.message, account['id'])
break
elif key[0] == 'sft':
await sftSelect(call.message, account['id'])
break
elif key[0] == 'scid':
await scidSelect(call.message, account['id'])
break
elif key[0] == 'username':
await schoolInfo(call.message, account['id'])
break
elif key[0] == 'password':
await schoolInfo(call.message, account['id'])
break
else:
await account_add(call, state)
break |
the-stack_0_10486 | # Title: 개미
# Link: https://www.acmicpc.net/problem/4307
import sys
sys.setrecursionlimit(10 ** 6)
read_single_int = lambda: int(sys.stdin.readline().strip())
read_list_int = lambda: list(map(int, sys.stdin.readline().strip().split(' ')))
def solution(l: int, n: int, ants: list):
fast, slow = 0, 0
for ant in ants:
fast = max(fast, min(l-ant, ant))
slow = max(slow, max(ant, l-ant))
return '{} {}'.format(fast, slow)
def main():
t = read_single_int()
for _ in range(t):
l, n = read_list_int()
ants = []
for _ in range(n):
ants.append(read_single_int())
print(solution(l, n, ants))
if __name__ == '__main__':
main() |
the-stack_0_10487 | from datetime import datetime
import pytz as pytz
import scrapy
from fosdem_event_scraper.settings import INPUT_FILE
class FosdemEventSpider(scrapy.Spider):
name = "fosdem-event"
def start_requests(self):
with open(INPUT_FILE, "r") as fhandle:
for url in map(str.rstrip, fhandle.readlines()):
yield scrapy.Request(url=url)
def parse(self, response, **kwargs):
ret = dict()
for info_element in response.css("ul.side-box > li"):
infoid = info_element.css("strong::text").extract_first().lower()
ret[infoid] = info_element.css("a::text").extract_first()
ret["title"] = response.css("#pagetitles h1::text").extract_first()
ret["url"] = response.url
ret["time"] = datetime.now(pytz.UTC)
yield ret
|
the-stack_0_10488 | from __future__ import print_function
import os
import warnings
warnings.filterwarnings('ignore')
import time
import torch
import shutil
import argparse
from m2det import build_net
import torch.utils.data as data
import torch.backends.cudnn as cudnn
from torch.nn.utils.clip_grad import clip_grad_norm_
from layers.functions import Detect,PriorBox
from data import detection_collate
from configs.CC import Config
from utils.core import *
from tensorboardX import SummaryWriter
parser = argparse.ArgumentParser(description='M2Det Training')
parser.add_argument('-c', '--config', default='configs/m2det320_resnet101.py')
parser.add_argument('-d', '--dataset', default='COCO', help='VOC or COCO dataset')
parser.add_argument('--ngpu', default=1, type=int, help='gpus')
parser.add_argument('--resume_net', default=None, help='resume net for retraining')
parser.add_argument('--resume_epoch', default=0, type=int, help='resume iter for retraining')
parser.add_argument('-t', '--tensorboard', type=bool, default=False, help='Use tensorborad to show the Loss Graph')
args = parser.parse_args()
print_info('----------------------------------------------------------------------\n'
'| M2Det Training Program |\n'
'----------------------------------------------------------------------',['yellow','bold'])
#Enable Tensorboard logger
logger = set_logger(args.tensorboard)
writer = SummaryWriter()
#Use configs from specified
global cfg
cfg = Config.fromfile(args.config)
net = build_net('train',
size = cfg.model.input_size, # Only 320, 512, 704 and 800 are supported
config = cfg.model.m2det_config)
init_net(net, cfg, args.resume_net) # init the network with pretrained weights or resumed weights
if args.ngpu>1:
net = torch.nn.DataParallel(net)
if cfg.train_cfg.cuda:
net.cuda()
cudnn.benchmark = True
optimizer = set_optimizer(net, cfg)
criterion = set_criterion(cfg)
anchor_config = anchors(cfg)
priorbox = PriorBox(anchor_config)
#Detector
detector = Detect(cfg.model.m2det_config.num_classes, cfg.loss.bkg_label, anchor_config)
with torch.no_grad():
priors = priorbox.forward()
if cfg.train_cfg.cuda:
priors = priors.cuda()
if __name__ == '__main__':
net.train()
epoch = args.resume_epoch
print_info('===> Loading Dataset...',['yellow','bold'])
dataset = get_dataloader(cfg, args.dataset, 'train_sets')
epoch_size = len(dataset) // (cfg.train_cfg.per_batch_size * args.ngpu)
max_iter = getattr(cfg.train_cfg.step_lr,args.dataset)[-1] * epoch_size
stepvalues = [_*epoch_size for _ in getattr(cfg.train_cfg.step_lr, args.dataset)[:-1]]
print_info('===> Training M2Det on ' + args.dataset, ['yellow','bold'])
step_index = 0
if args.resume_epoch > 0:
start_iter = args.resume_epoch * epoch_size
else:
start_iter = 0
for iteration in range(start_iter, max_iter):
if iteration % epoch_size == 0:
batch_iterator = iter(data.DataLoader(dataset,
cfg.train_cfg.per_batch_size * args.ngpu,
shuffle=True,
num_workers=cfg.train_cfg.num_workers,
collate_fn=detection_collate))
if epoch % cfg.model.save_eposhs == 0:
save_checkpoint(net, cfg, final=False, datasetname = args.dataset, epoch=epoch)
epoch += 1
load_t0 = time.time()
if iteration in stepvalues:
step_index += 1
lr = adjust_learning_rate(optimizer, cfg.train_cfg.gamma, epoch, step_index, iteration, epoch_size, cfg)
images, targets = next(batch_iterator)
if cfg.train_cfg.cuda:
images = images.cuda()
targets = [anno.cuda() for anno in targets]
out = net(images)
optimizer.zero_grad()
loss_l, loss_c = criterion(out, priors, targets)
loss = loss_l + loss_c
write_logger({'loc_loss':loss_l.item(),
'conf_loss':loss_c.item(),
'loss':loss.item()},logger,iteration,status=args.tensorboard)
loss.backward()
#clip_grad_norm_(net.parameters(), 5) # Clip gradients at 5, because exploding gradients occurred once
optimizer.step()
load_t1 = time.time()
print_train_log(iteration, cfg.train_cfg.print_epochs,
[time.ctime(),epoch,iteration%epoch_size,epoch_size,iteration,loss_l.item(),loss_c.item(),load_t1-load_t0,lr])
save_checkpoint(net, cfg, final=True, datasetname=args.dataset,epoch=-1)
|
the-stack_0_10489 | from typing import *
class Solution:
def fourSum(self, nums: List[int], target: int) -> List[List[int]]:
result = set()
nums_len = len(nums)
nums.sort()
for i in range(nums_len):
for j in range(i + 1, nums_len):
k = j + 1
l = nums_len - 1
while k < l:
temp_sum = nums[i] + nums[j] + nums[k] + nums[l]
if temp_sum == target:
result.add((nums[i], nums[j], nums[k], nums[l]))
k += 1
elif temp_sum < target:
k += 1
else:
l -= 1
return list(map(lambda tuple: list(tuple), result))
s = Solution()
print(s.fourSum([-3,-2,-1,0,0,1,2,3], 0)) |
the-stack_0_10490 | import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch
import torchvision
import torchvision.transforms as transforms
from sklearn.model_selection import train_test_split
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
import pickle
import torch.optim as optim
def run_full_code_gn(device, model_file, test_data_file, output_file, n):
def unpickle(file):
with open(file, 'rb') as fo:
dict = pickle.load(fo, encoding='bytes')
return dict
def normalise(X):
return (X - X.mean(axis = 0))/(X.std(axis = 0) + (np.ones((1,X.shape[1]))*(1e-06)))
dict6 = unpickle(test_data_file)
Xtest = np.array(dict6[b'data'])
# Ytest = np.array(dict6[b'labels'])
Xtest = normalise(Xtest)
Xtest = Xtest.reshape(10000, 3, 32, 32)
Xtest = torch.from_numpy(Xtest)
# Ytest = Ytest.astype(int)
# Ytest = torch.from_numpy(Ytest)
Xtest = Xtest.to(torch.float32)
# Ytest = Ytest.to(torch.int64)
class LambdaLayer(nn.Module):
def __init__(self, lambd):
super(LambdaLayer, self).__init__()
self.lambd = lambd
def forward(self, x):
return self.lambd(x)
class Group_Normalisation(nn.Module):
def __init__(self, numlayer, G):
super().__init__()
self.gamma = nn.Parameter(torch.ones((1, numlayer, 1, 1)), requires_grad = True)
self.beta = nn.Parameter(torch.zeros((1, numlayer, 1, 1)), requires_grad = True)
self.eps = 1e-6
self.G = G
def forward(self, x):
x = x.reshape((x.shape[0], self.G, x.shape[1]//self.G, x.shape[2], x.shape[3]))
mean = x.mean(dim = (2, 3, 4), keepdim=True)
var = x.var(dim = (2, 3, 4), keepdim=True)
x = (x - mean) / torch.sqrt(var + self.eps)
x = x.reshape((x.shape[0], x.shape[2]*self.G, x.shape[3], x.shape[4]))
x = self.gamma * x + self.beta
return x
class ResNetBlock(nn.Module):
def __init__(self, numlayer, n, G):
super(ResNetBlock, self).__init__()
self.conv1 = nn.Conv2d(numlayer, numlayer, 3, padding = 1)
self.group_norm1 = Group_Normalisation(numlayer, G)
self.conv2 = nn.Conv2d(numlayer, numlayer, 3, padding = 1)
self.group_norm2 = Group_Normalisation(numlayer, G)
def forward(self, x):
y = x
x = self.conv1(x)
x = self.group_norm1(x)
x = F.relu(x)
x = self.conv2(x)
x = self.group_norm2(x)
x = x + y
x = F.relu(x);
return x
class ResNet_Layer(nn.Module):
def __init__(self, numlayer, n, G):
super(ResNet_Layer, self).__init__()
self.conv_blocs = nn.Sequential(*[ResNetBlock(numlayer, n, G)
for i in range(0, n)])
def forward(self, x):
x = self.conv_blocs(x);
return x
class ResNet_Downsample(nn.Module):
def __init__(self, numlayerin, numlayerout, n, G):
super(ResNet_Downsample, self).__init__()
self.conv1 = nn.Conv2d(numlayerin, numlayerout, 3, stride = 2, padding = 1)
self.layer_norm1 = Group_Normalisation(numlayerout, G)
self.conv2 = nn.Conv2d(numlayerout, numlayerout, 3, padding = 1)
self.layer_norm2 = Group_Normalisation(numlayerout, G)
self.s1A = LambdaLayer(lambda x: F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, int(numlayerin/2), int(numlayerin/2)), "constant", 0))
def forward(self, x):
y = x
x = self.conv1(x)
x = self.layer_norm1(x)
x = F.relu(x)
x = self.conv2(x)
x = self.layer_norm2(x)
x = x + self.s1A(y)
x = F.relu(x)
return x
class ResNet(nn.Module):
def __init__(self, n1, r1):
super(ResNet, self).__init__()
self.n = n1
self.r = r1
self.conv_3_16 = nn.Conv2d(3, 16, 3, padding = 1)
self.group_norm1 = Group_Normalisation(16, 4)
self.resnet_layer1 = ResNet_Layer(16, n1, 4)
self.resnet_block1 = ResNet_Downsample(16, 32, n1, 8)
self.resnet_layer2 = ResNet_Layer(32, n1-1, 8)
self.resnet_block2 = ResNet_Downsample(32, 64, n1, 8)
self.resnet_layer3 = ResNet_Layer(64, n1-1, 8)
self.globalAvg = nn.AdaptiveAvgPool2d((1, 1))
self.fc1 = nn.Linear(64, self.r)
def forward(self, x):
x = self.conv_3_16(x)
x = self.group_norm1(x)
x = F.relu(x)
x = self.resnet_layer1(x)
x = self.resnet_block1(x)
x = self.resnet_layer2(x)
x = self.resnet_block2(x)
x = self.resnet_layer3(x)
#Global average pooling
x = self.globalAvg(x)
y = x.view(-1, 64)
x = self.fc1(y)
return x, y
model = ResNet(n, 10)
model.load_state_dict(torch.load(model_file))
model = model.to(device)
len_Xtest = Xtest.shape[0]
final_preds = np.array([]).reshape((0, 10))
batch_size = 128
for i in range(0, (len_Xtest//batch_size)):
x = torch.FloatTensor(Xtest[i*batch_size:(i+1)*batch_size, :]).to(device)
with torch.no_grad():
preds, _ = model(x)
final_preds = np.vstack((final_preds, preds.detach().cpu().numpy()))
if(len_Xtest - ((len_Xtest//batch_size)*batch_size) > 0):
x = torch.FloatTensor(Xtest[((len_Xtest//batch_size)*batch_size):len_Xtest, :]).to(device)
with torch.no_grad():
preds, _ = model(x)
final_preds = np.vstack((final_preds, preds.detach().cpu().numpy()))
print(final_preds.shape)
final_preds = final_preds.argmax(axis = 1)
final_preds = final_preds.reshape(final_preds.shape[0])
# # get predictions for val data
# with torch.no_grad():
# preds, _ = model(Xtest.to(device))
# preds = preds.detach().cpu().numpy()
# # prediction
# prediction = preds.argmax(axis = 1)
s = ""
for x in final_preds:
s += str(x) + "\n"
with open(output_file, "w") as f:
f.write(s)
|
the-stack_0_10491 | #from .exchanges import Exchange, FTXSpot
import asyncio
class Order:
def __init__(self, order_id: str, base: str, quote: str, side: str, volume: float):
self.id = order_id
self.base = base
self.quote = quote
self.side = side.upper()
self.volume = volume
self.remaining_volume = volume
self.open = True
self.completed = False
self.filled_volume = 0 #Total order volume (including fees)
self.total_fees = {} #Fees paid, format {currency: fee}
self.fills = {}
self.fill_event = asyncio.Event()
self.close_event = asyncio.Event()
self.price = None
self.reported_fill = None
self.modifyed = False
def update(self, update_type, data):
balance_changes = {self.quote: 0, self.base: 0}
if update_type == 'FILL':
if data['trade_id'] in self.fills:
return balance_changes
volume_modifyer = 1 if self.side == 'BUY' else -1
self.remaining_volume -= data['volume']
print('Order', self.id, self.base, self.quote, ' fill, remaining volume: ', self.remaining_volume)
balance_changes[self.base] += volume_modifyer * data['volume']
balance_changes[self.quote] -= volume_modifyer * data['volume'] * data['price']
for currency, fee in data['fees'].items():
if currency not in self.total_fees:
self.total_fees[currency] = 0
if currency not in balance_changes:
balance_changes[currency] = 0
self.total_fees[currency] += fee
balance_changes[currency] -= fee
self.fills[data['trade_id']] = dict(balance_changes)
if self.remaining_volume < 10**-5 or (self.reported_fill is not None and self.reported_fill - 10**-5 <= self.volume - self.remaining_volume):
self.open = False
self.completed = True
self.fill_event.set()
if update_type == 'UPDATE':
if data['status'] == 'CLOSED' and data['id'] == self.id and not self.modifyed:
self.open = False
self.close_event.set()
self.reported_fill = data['filled_size']
if self.reported_fill - 10**-5 <= self.volume - self.remaining_volume:
self.fill_event.set()
if self.reported_fill == 0.0 and self.price is None:
print('Order canceled by exchange, no reason given')
return balance_changes
class LimitOrder(Order):
pass
class MarketOrder(Order):
pass
class Account:
'''Account class to manage orders and store basic data'''
def __init__(self, api, secret, exchange):
self.api_key = api
self.secret_key = secret
self.exchange = exchange
self.balance = None
self.order_update_queue = exchange.user_update_queue
self.parse_order_update_task = asyncio.create_task(self.parse_order_updates())
self.orders = {}
self.unhandled_order_updates = {}
self.fill_queues = {}
async def get_balance(self):
self.balance = await self.exchange.get_account_balance(self.api_key, self.secret_key)
def __str__(self):
r = ''
for coin, balance in self.balance.items():
if balance > 0:
r += coin + '\t| ' + '{0:.4f}'.format(balance)
r += '\n'
return r
def remove_closed_orders(self):
to_delete = []
for order_id, order in self.orders.items():
if not order.open:
to_delete.append(order_id)
for order_id in to_delete:
del self.orders[order_id]
async def get_open_orders(self):
pass
async def parse_order_updates(self):
try:
while True and self.exchange.connection_manager.open:
if self.balance is None:
await self.get_balance()
order_update = await self.order_update_queue.get()
if order_update['type'] == 'FILL':
volume_modifyer = 1 if order_update['side'] == 'BUY' else -1
base, quote = order_update['market']
if base not in self.balance:
self.balance[base] = 0.0
if quote not in self.balance:
self.balance[quote] = 0.0
self.balance[base] += volume_modifyer * order_update['volume']
self.balance[quote] -= volume_modifyer * order_update['volume'] * order_update['price']
for fee_currency, fee in order_update['fees'].items():
if fee_currency not in self.balance:
self.balance[fee_currency] = 0.0
self.balance[fee_currency] -= fee
print(order_update['id'], self.fill_queues)
if order_update['id'] in self.fill_queues:
await self.fill_queues[order_update['id']].put(order_update)
if order_update['id'] not in self.orders:
if order_update['id'] not in self.unhandled_order_updates:
self.unhandled_order_updates[order_update['id']] = []
self.unhandled_order_updates[order_update['id']].append(order_update)
else:
self.orders[order_update['id']].update(order_update['type'], order_update)
self.order_update_queue.task_done()
except Exception as e:
print('Error in Account.parse_order_updates():', e)
def add_order(self, order):
if order.id in self.unhandled_order_updates:
for update in self.unhandled_order_updates[order.id]:
order.update(update['type'], update)
self.orders[order.id] = order
async def refresh_fills(self, start_time):
fills = await self.exchange.get_order_fills(start_time, self.api_key, self.secret_key)
for fill in fills:
if fill['id'] not in self.orders:
print('Error in account class, orders out of sync!')
#need to update orders
elif fill['trade_id'] not in self.orders[fill['id']]:
self.orders[fill['id']].update('FILL', fill)
async def market_order(self, base, quote, side, **kwargs):
if 'quote_volume' not in kwargs and 'volume' not in kwargs:
print('ERROR: missing required argument')
#TODO: proper exception
return
if 'volume' in kwargs:
response = await self.exchange.market_order(base, quote, side, kwargs['volume'], self.api_key, self.secret_key)
else:
response = await self.exchange.market_order_quote_volume(base, quote, side, kwargs['quote_volume'], self.api_key, self.secret_key)
async def limit_order(self, base, quote, side, price, volume, fill_queue = None):
order = await self.exchange.limit_order(base, quote, side, price, volume, self.api_key, self.secret_key)
self.fill_queues[order.id] = fill_queue
return order
async def change_order(self, order, **kwargs):
print(kwargs)
order.modifyed = True
if order.remaining_volume < 10**-6:
return
if 'exchange' in kwargs:
exchange = kwargs['exchange']
if 'price' in kwargs and float(self.exchange.price_renderers[(order.base, order.quote)].render(kwargs['price'])) == order.price:
del kwargs['price']
if 'price' in kwargs and 'size' in kwargs:
new_order_id, new_price, new_remaining = await self.exchange.change_order(order.id, order.base, order.quote, self.api_key, self.secret_key, self.subaccount, price=kwargs['price'], size=kwargs['size'])
elif 'price' in kwargs:
new_order_id, new_price, new_remaining = await self.exchange.change_order(order.id, order.base, order.quote, self.api_key, self.secret_key, self.subaccount, price=kwargs['price'])
elif 'size' in kwargs:
new_order_id, new_price, new_remaining = await self.exchange.change_order(order.id, order.base, order.quote, self.api_key, self.secret_key, self.subaccount, size=kwargs['size'])
else:
print('no change to order')
order.modifyed = False
return
order.price = new_price
if order.id in self.fill_queues:
self.fill_queues[new_order_id] = self.fill_queues[order.id]
order.id = new_order_id
order.modifyed = False
self.orders[new_order_id] = order
class BinanceAccount(Account):
async def get_dividend_record(self, limit = 20):
return await self.exchange.get_asset_dividend(limit, self.api_key, self.secret_key)
async def get_account_websocket_key(self):
response = await self.exchange.connection_manager.signed_get()
class FuturesAccount(Account):
pass
class FTXAccount(Account):
def __init__(self, api, secret, exchange, subaccount = None, connection_manager = None):
self.subaccount = subaccount
super().__init__(api, secret, exchange)
if connection_manager is not None:
self.connection_manager = connection_manager
async def market_order(self, base, quote, side, **kwargs):
if 'exchange' in kwargs:
exchange = kwargs['exchange']
else:
exchange = self.exchange
if 'quote_volume' not in kwargs and 'volume' not in kwargs:
print('ERROR: missing required argument')
#TODO: proper exception
return
if 'volume' in kwargs:
order = await exchange.market_order(base, quote, side, kwargs['volume'], self.api_key, self.secret_key, self.subaccount)
else:
order = await exchange.market_order_quote_volume(base, quote, side, kwargs['quote_volume'], self.api_key, self.secret_key, self.subaccount)
if order is None:
#failed to place order...
return
self.add_order(order)
return order
async def limit_order(self, base, quote, side, price, volume, **kwargs):
if 'exchange' in kwargs:
exchange = kwargs['exchange']
else:
exchange = self.exchange
response = await exchange.limit_order(base, quote, side, price, volume, self.api_key, self.secret_key, self.subaccount)
self.add_order(response)
if 'fill_queue' in kwargs:
self.fill_queues[response.id] = kwargs['fill_queue']
else:
print(kwargs)
return response
async def cancel_order(self, order_id, **kwargs):
response = await self.exchange.cancel_order(order_id.id, self.api_key, self.secret_key, self.subaccount)
async def get_balance(self):
self.balance = await self.exchange.get_account_balance(self.api_key, self.secret_key, self.subaccount)
async def subscribe_to_user_data(self):
await self.get_balance()
await self.exchange.subscribe_to_user_data(self.api_key, self.secret_key, self.subaccount)
async def cancel_all_orders(self):
await self.exchange.cancel_all_orders(self.api_key, self.secret_key, self.subaccount)
|
the-stack_0_10492 | __all__ = ["Monitor", "get_monitor_files", "load_results"]
import csv
import json
import os
import time
from glob import glob
from typing import List, Optional, Tuple, Union
import gym
import numpy as np
import pandas
from stable_baselines3.common.type_aliases import GymObs, GymStepReturn
class Monitor(gym.Wrapper):
"""
A monitor wrapper for Gym environments, it is used to know the episode reward, length, time and other data.
:param env: The environment
:param filename: the location to save a log file, can be None for no log
:param allow_early_resets: allows the reset of the environment before it is done
:param reset_keywords: extra keywords for the reset call,
if extra parameters are needed at reset
:param info_keywords: extra information to log, from the information return of env.step()
"""
EXT = "monitor.csv"
def __init__(
self,
env: gym.Env,
filename: Optional[str] = None,
allow_early_resets: bool = True,
reset_keywords: Tuple[str, ...] = (),
info_keywords: Tuple[str, ...] = (),
):
super(Monitor, self).__init__(env=env)
self.t_start = time.time()
if filename is None:
self.file_handler = None
self.logger = None
else:
if not filename.endswith(Monitor.EXT):
if os.path.isdir(filename):
filename = os.path.join(filename, Monitor.EXT)
else:
filename = filename + "." + Monitor.EXT
self.file_handler = open(filename, "wt")
self.file_handler.write("#%s\n" % json.dumps(
{"t_start": self.t_start, "env_id": env.spec and env.spec.id}))
self.logger = csv.DictWriter(self.file_handler, fieldnames=(
"r", "l", "s", "t") + reset_keywords + info_keywords)
self.logger.writeheader()
self.file_handler.flush()
self.reset_keywords = reset_keywords
self.info_keywords = info_keywords
self.allow_early_resets = allow_early_resets
self.rewards = None
self.needs_reset = True
self.episode_rewards = []
self.episode_lengths = []
self.episode_times = []
self.total_steps = 0
self.success = []
# extra info about the current episode, that was passed in during reset()
self.current_reset_info = {}
def reset(self, **kwargs) -> GymObs:
"""
Calls the Gym environment reset. Can only be called if the environment is over, or if allow_early_resets is True
:param kwargs: Extra keywords saved for the next episode. only if defined by reset_keywords
:return: the first observation of the environment
"""
if not self.allow_early_resets and not self.needs_reset:
raise RuntimeError(
"Tried to reset an environment before done. If you want to allow early resets, "
"wrap your env with Monitor(env, path, allow_early_resets=True)"
)
self.rewards = []
self.success = []
self.needs_reset = False
for key in self.reset_keywords:
value = kwargs.get(key)
if value is None:
raise ValueError(
"Expected you to pass kwarg {} into reset".format(key))
self.current_reset_info[key] = value
return self.env.reset(**kwargs)
def step(self, action: Union[np.ndarray, int]) -> GymStepReturn:
"""
Step the environment with the given action
:param action: the action
:return: observation, reward, done, information
"""
if self.needs_reset:
raise RuntimeError("Tried to step environment that needs reset")
observation, reward, done, info = self.env.step(action)
self.rewards.append(reward)
if done:
self.needs_reset = True
ep_rew = sum(self.rewards)
ep_len = len(self.rewards)
ep_success = sum(self.success)
ep_info = {"r": round(ep_rew, 6), "l": ep_len, "t": round(
time.time() - self.t_start, 6)}
for key in self.info_keywords:
ep_info[key] = info[key]
self.episode_rewards.append(ep_rew)
self.episode_lengths.append(ep_len)
self.episode_times.append(time.time() - self.t_start)
ep_info.update(self.current_reset_info)
if self.logger:
self.logger.writerow(ep_info)
self.file_handler.flush()
info["episode"] = ep_info
self.total_steps += 1
return observation, reward, done, info
def close(self) -> None:
"""
Closes the environment
"""
super(Monitor, self).close()
if self.file_handler is not None:
self.file_handler.close()
def get_total_steps(self) -> int:
"""
Returns the total number of timesteps
:return:
"""
return self.total_steps
def get_episode_rewards(self) -> List[float]:
"""
Returns the rewards of all the episodes
:return:
"""
return self.episode_rewards
def get_episode_lengths(self) -> List[int]:
"""
Returns the number of timesteps of all the episodes
:return:
"""
return self.episode_lengths
def get_episode_times(self) -> List[float]:
"""
Returns the runtime in seconds of all the episodes
:return:
"""
return self.episode_times
class LoadMonitorResultsError(Exception):
"""
Raised when loading the monitor log fails.
"""
pass
def get_monitor_files(path: str) -> List[str]:
"""
get all the monitor files in the given path
:param path: the logging folder
:return: the log files
"""
return glob(os.path.join(path, "*" + Monitor.EXT))
def load_results(path: str) -> pandas.DataFrame:
"""
Load all Monitor logs from a given directory path matching ``*monitor.csv``
:param path: the directory path containing the log file(s)
:return: the logged data
"""
monitor_files = get_monitor_files(path)
if len(monitor_files) == 0:
raise LoadMonitorResultsError(
f"No monitor files of the form *{Monitor.EXT} found in {path}")
data_frames, headers = [], []
for file_name in monitor_files:
with open(file_name, "rt") as file_handler:
first_line = file_handler.readline()
assert first_line[0] == "#"
header = json.loads(first_line[1:])
data_frame = pandas.read_csv(file_handler, index_col=None)
headers.append(header)
data_frame["t"] += header["t_start"]
data_frames.append(data_frame)
data_frame = pandas.concat(data_frames)
data_frame.sort_values("t", inplace=True)
data_frame.reset_index(inplace=True)
data_frame["t"] -= min(header["t_start"] for header in headers)
return data_frame
|
the-stack_0_10494 | # Copyright 2021 The NetKet Authors - All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numbers
from typing import Union, Tuple, List, Optional
from numpy.typing import DTypeLike, ArrayLike
import numpy as np
from numba import jit
import jax
import jax.numpy as jnp
from netket.hilbert import AbstractHilbert, Fock
from ._abstract_operator import AbstractOperator
from ._lazy import Transpose, Adjoint, Squared
@jit(nopython=True)
def _number_to_state(number, hilbert_size_per_site, local_states_per_site, out):
out[:] = local_states_per_site[:, 0]
size = out.shape[0]
ip = number
k = size - 1
while ip > 0:
local_size = hilbert_size_per_site[k]
out[k] = local_states_per_site[k, ip % local_size]
ip = ip // local_size
k -= 1
return out
def is_hermitian(a: np.ndarray, rtol=1e-05, atol=1e-08) -> bool:
return np.allclose(a, a.T.conj(), rtol=rtol, atol=atol)
def _dtype(obj: Union[numbers.Number, ArrayLike, "LocalOperator"]) -> DTypeLike:
if isinstance(obj, numbers.Number):
return type(obj)
elif isinstance(obj, AbstractOperator):
return obj.dtype
elif isinstance(obj, np.ndarray):
return obj.dtype
else:
raise TypeError(f"cannot deduce dtype of object type {type(obj)}: {obj}")
def resize(
arr: ArrayLike,
shape: List[int],
dtype: Optional[DTypeLike] = None,
init: Optional[numbers.Number] = None,
) -> ArrayLike:
"""
resizes the input array to the new shape that must be larger than the old.
The resulting array is initialized with the old array in the corresponding indices, and with init
in the rest.
Args:
arr: The array to be resized
shape: The new shape
dtype: optional dtype of the new array. If unspecified the old array dtype is used
init: optional initialization value for the new entries
Returns:
a numpy array with the chosen shape and dtype.
"""
if dtype is None:
dtype = arr.dtype
if isinstance(shape, int):
shape = (shape,)
if arr.shape == shape:
return arr
arr_shape = arr.shape
if len(shape) != arr.ndim:
raise ValueError("the number of dimensions should not change.")
for (i, ip) in zip(arr_shape, shape):
if ip < i:
raise ValueError(
f"The new dimensions ({shape}) should all be larger than the old ({arr_shape})."
)
new_arr = np.empty(shape=shape, dtype=arr.dtype)
if init is not None:
new_arr[...] = init
if arr.ndim == 0:
raise ValueError("Cannot resize a 0-dimensional scalar")
elif arr.ndim == 1:
new_arr[: arr_shape[0]] = arr
elif arr.ndim == 2:
new_arr[: arr_shape[0], : arr_shape[1]] = arr
elif arr.ndim == 3:
new_arr[: arr_shape[0], : arr_shape[1], : arr_shape[2]] = arr
elif arr.ndim == 4:
new_arr[: arr_shape[0], : arr_shape[1], : arr_shape[2], : arr_shape[3]] = arr
else:
raise ValueError(f"unsupported number of dimensions: {arr.ndim}")
return new_arr
def _reorder_matrix(hi, mat, acting_on):
acting_on_sorted = np.sort(acting_on)
if np.all(acting_on_sorted == acting_on):
return mat, acting_on
acting_on_sorted_ids = np.argsort(acting_on)
# could write custom binary <-> int logic instead of using Fock...
# Since i need to work with bit-strings (where instead of bits i
# have integers, in order to support arbitrary size spaces) this
# is exactly what hilbert.to_number() and viceversa do.
# target ordering binary representation
hi_subspace = Fock(hi.shape[acting_on_sorted[0]] - 1)
for site in acting_on_sorted[1:]:
hi_subspace = Fock(hi.shape[site] - 1) * hi_subspace
# find how to map target ordering back to unordered
acting_on_unsorted_ids = np.zeros(len(acting_on), dtype=np.intp)
for (i, site) in enumerate(acting_on):
acting_on_unsorted_ids[i] = np.argmax(site == acting_on_sorted)
# now it is valid that
# acting_on_sorted == acting_on[acting_on_unsorted_ids]
# generate n-bit strings in the target ordering
v = hi_subspace.all_states()
# convert them to origin (unordered) ordering
v_unsorted = v[:, acting_on_unsorted_ids]
# convert the unordered bit-strings to numbers in the target space.
n_unsorted = hi_subspace.states_to_numbers(v_unsorted)
# reorder the matrix
mat_sorted = mat[n_unsorted, :][:, n_unsorted]
return mat_sorted, acting_on_sorted
class LocalOperator(AbstractOperator):
"""A custom local operator. This is a sum of an arbitrary number of operators
acting locally on a limited set of k quantum numbers (i.e. k-local,
in the quantum information sense).
"""
def __init__(
self,
hilbert: AbstractHilbert,
operators: Union[List[ArrayLike], ArrayLike] = [],
acting_on: Union[List[int], List[List[int]]] = [],
constant: numbers.Number = 0,
dtype: Optional[DTypeLike] = None,
):
r"""
Constructs a new ``LocalOperator`` given a hilbert space and (if
specified) a constant level shift.
Args:
hilbert (netket.AbstractHilbert): Hilbert space the operator acts on.
operators (list(numpy.array) or numpy.array): A list of operators, in matrix form.
acting_on (list(numpy.array) or numpy.array): A list of sites, which the corresponding operators act on.
constant (float): Level shift for operator. Default is 0.0.
Examples:
Constructs a ``LocalOperator`` without any operators.
>>> from netket.hilbert import CustomHilbert
>>> from netket.operator import LocalOperator
>>> hi = CustomHilbert(local_states=[1, -1])**20
>>> empty_hat = LocalOperator(hi)
>>> print(len(empty_hat.acting_on))
0
"""
super().__init__(hilbert)
self._constant = constant
# check if passing a single operator or a list of operators
if isinstance(acting_on, numbers.Number):
acting_on = [acting_on]
is_nested = any(hasattr(i, "__len__") for i in acting_on)
if not is_nested:
operators = [operators]
acting_on = [acting_on]
operators = [np.asarray(operator) for operator in operators]
# If we asked for a specific dtype, enforce it.
if dtype is None:
dtype = np.promote_types(operators[0].dtype, np.float32)
for op in operators[1:]:
np.promote_types(dtype, op.dtype)
self._dtype = dtype
self._init_zero()
self.mel_cutoff = 1.0e-6
for op, act in zip(operators, acting_on):
if len(act) > 0:
self._add_operator(op, act)
@property
def operators(self) -> List[np.ndarray]:
"""List of the matrices of the operators encoded in this Local Operator.
Returns a copy.
"""
return self._operators_list()
@property
def acting_on(self) -> List[List[int]]:
"""List containing the list of the sites on which every operator acts.
Every operator `self.operators[i]` acts on the sites `self.acting_on[i]`
"""
actions = [action[action >= 0].tolist() for action in self._acting_on]
return actions
@property
def dtype(self) -> DTypeLike:
return self._dtype
@property
def size(self) -> int:
return self._size
@property
def is_hermitian(self) -> bool:
"""Returns true if this operator is hermitian."""
return self._is_hermitian
@property
def mel_cutoff(self) -> float:
r"""float: The cutoff for matrix elements.
Only matrix elements such that abs(O(i,i))>mel_cutoff
are considered"""
return self._mel_cutoff
@mel_cutoff.setter
def mel_cutoff(self, mel_cutoff):
self._mel_cutoff = mel_cutoff
assert self.mel_cutoff >= 0
@property
def constant(self) -> numbers.Number:
return self._constant
@property
def n_operators(self) -> int:
return self._n_operators
def __add__(self, other: Union["LocalOperator", numbers.Number]):
op = self.copy(dtype=np.promote_types(self.dtype, _dtype(other)))
op = op.__iadd__(other)
return op
def __radd__(self, other):
return self.__add__(other)
def __iadd__(self, other):
if isinstance(other, LocalOperator):
if self.hilbert != other.hilbert:
return NotImplemented
if not np.can_cast(other.dtype, self.dtype, casting="same_kind"):
raise ValueError(
f"Cannot add inplace operator with dtype {other.dtype} to operator with dtype {self.dtype}"
)
assert other.mel_cutoff == self.mel_cutoff
for i in range(other._n_operators):
acting_on = other._acting_on[i, : other._acting_size[i]]
operator = other._operators[i]
self._add_operator(operator, acting_on)
self._constant += other.constant
return self
if isinstance(other, numbers.Number):
if not np.can_cast(type(other), self.dtype, casting="same_kind"):
raise ValueError(
f"Cannot add inplace operator with dtype {type(other)} to operator with dtype {self.dtype}"
)
self._constant += other
return self
return NotImplemented
def __sub__(self, other):
return self + (-other)
def __rsub__(self, other):
return other + (-self)
def __isub__(self, other):
return self.__iadd__(-other)
def __neg__(self):
return -1 * self
def __mul__(self, other):
if isinstance(other, AbstractOperator):
op = self.copy(dtype=np.promote_types(self.dtype, _dtype(other)))
return op.__imatmul__(other)
elif not isinstance(other, numbers.Number):
return NotImplemented
op = self.copy(dtype=np.promote_types(self.dtype, _dtype(other)))
op._diag_mels *= other
op._mels *= other
op._constant *= other
for _op in op._operators:
_op *= other
return op
def __imul__(self, other):
if isinstance(other, AbstractOperator):
return self.__imatmul__(other)
elif not isinstance(other, numbers.Number):
return NotImplemented
if not np.can_cast(type(other), self.dtype, casting="same_kind"):
raise ValueError(
f"Cannot add inplace operator with dtype {type(other)} to operator with dtype {self.dtype}"
)
self._diag_mels *= other
self._mels *= other
self._constant *= other
for _op in self._operators:
_op *= other
return self
def __imatmul__(self, other):
if not isinstance(other, LocalOperator):
return NotImplemented
if not np.can_cast(other.dtype, self.dtype, casting="same_kind"):
raise ValueError(
f"Cannot add inplace operator with dtype {type(other)} to operator with dtype {self.dtype}"
)
return self._concrete_imatmul_(other)
def _op__matmul__(self, other):
return self._concrete_matmul_(other)
def _concrete_matmul_(self, other: "LocalOperator") -> "LocalOperator":
if not isinstance(other, LocalOperator):
return NotImplemented
op = self.copy(dtype=np.promote_types(self.dtype, _dtype(other)))
op @= other
return op
def _concrete_imatmul_(self, other: "LocalOperator") -> "LocalOperator":
if not isinstance(other, LocalOperator):
return NotImplemented
tot_operators = []
tot_act = []
for i in range(other._n_operators):
act_i = other._acting_on[i, : other._acting_size[i]].tolist()
ops, act = self._multiply_operator(other._operators[i], act_i)
tot_operators += ops
tot_act += act
prod = LocalOperator(self.hilbert, tot_operators, tot_act, dtype=self.dtype)
self_constant = self._constant
if np.abs(other._constant) > self.mel_cutoff:
self *= other._constant
self += prod
self._constant = 0.0
else:
self = prod
if np.abs(self_constant) > self.mel_cutoff:
self += other * self_constant
return self
def __truediv__(self, other):
if not isinstance(other, numbers.Number):
raise TypeError("Only divison by a scalar number is supported.")
if other == 0:
raise ValueError("Dividing by 0")
return self.__mul__(1.0 / other)
def __rmul__(self, other):
return self.__mul__(other)
def _init_zero(self):
self._operators = []
self._n_operators = 0
self._max_op_size = 0
self._max_acting_size = 0
self._max_local_hilbert_size = 0
self._size = 0
self._acting_on = np.zeros((0, 0), dtype=np.intp)
self._acting_size = np.zeros(0, dtype=np.intp)
self._diag_mels = np.zeros((0, 0), dtype=self.dtype)
self._mels = np.empty((0, 0, 0), dtype=self.dtype)
self._x_prime = np.empty((0, 0, 0, 0))
self._n_conns = np.empty((0, 0), dtype=np.intp)
self._local_states = np.zeros((0, 0, 0), dtype=np.float64)
self._basis = np.zeros((0, 0), dtype=np.int64)
self._is_hermitian = True
def _acting_on_list(self):
acting_on = []
for i in range(self.n_operators):
acting_on.append(np.copy(self._acting_on[i, : self._acting_size[i]]))
return acting_on
def _operators_list(self):
"A deep copy of the operators"
operators = [np.copy(op) for op in self._operators]
return operators
def _add_operator(self, operator: ArrayLike, acting_on: List[int]):
if not np.can_cast(operator, self.dtype, casting="same_kind"):
raise ValueError(f"Cannot cast type {operator.dtype} to {self.dtype}")
acting_on = np.asarray(acting_on, dtype=np.intp)
operator = np.asarray(operator, dtype=self.dtype)
if np.unique(acting_on).size != acting_on.size:
raise ValueError("acting_on contains repeated entries.")
if any(acting_on >= self.hilbert.size):
raise ValueError("acting_on points to a site not in the hilbert space.")
if operator.ndim != 2:
raise ValueError("The operator should be a matrix")
if np.all(np.abs(operator) < self.mel_cutoff):
return
# re-sort the operator
operator, acting_on = _reorder_matrix(self.hilbert, operator, acting_on)
# find overlapping support
support_i = None
for (i, support) in enumerate(self._acting_on_list()):
if np.all(acting_on == support):
support_i = i
break
# If overlapping support, add the local operators themselves
if support_i is not None:
dim = min(operator.shape[0], self._operators[support_i].shape[0])
_opv = self._operators[support_i][:dim, :dim]
_opv += operator[:dim, :dim]
n_local_states_per_site = np.asarray(
[self.hilbert.size_at_index(i) for i in acting_on]
)
self._append_matrix(
self._operators[support_i],
self._diag_mels[support_i],
self._mels[support_i],
self._x_prime[support_i],
self._n_conns[support_i],
self._acting_size[support_i],
self._local_states[support_i],
self.mel_cutoff,
n_local_states_per_site,
)
isherm = True
for op in self._operators:
isherm = isherm and is_hermitian(op)
self._is_hermitian = isherm
else:
self.__add_new_operator__(operator, acting_on)
def __add_new_operator__(self, operator: np.ndarray, acting_on: np.ndarray):
# Else, we must add a completely new operator
self._n_operators += 1
self._operators.append(operator)
# Add a new row and eventually resize the acting_on
self._acting_size = np.resize(self._acting_size, (self.n_operators,))
self._acting_size[-1] = acting_on.size
acting_size = acting_on.size
self._max_op_size = max((operator.shape[0], self._max_op_size))
n_local_states_per_site = np.asarray(
[self.hilbert.size_at_index(i) for i in acting_on]
)
if operator.shape[0] != np.prod(n_local_states_per_site):
raise RuntimeError(
r"""the given operator matrix has shape={} and acts on
the sites={}, which have a local hilbert space size of
sizes={} giving an expected shape
for the operator expected_shape={}.""".format(
operator.shape,
acting_on,
n_local_states_per_site,
np.prod(n_local_states_per_site),
)
)
self._max_acting_size = max(self._max_acting_size, acting_on.size)
self._max_local_hilbert_size = max(
self._max_local_hilbert_size, np.max(n_local_states_per_site)
)
self._acting_on = resize(
self._acting_on, shape=(self.n_operators, self._max_acting_size), init=-1
)
self._acting_on[-1, :acting_size] = acting_on
if (
self._acting_on[-1, :acting_size].max() > self.hilbert.size
or self._acting_on[-1, :acting_size].min() < 0
):
raise InvalidInputError("Operator acts on an invalid set of sites")
self._local_states = resize(
self._local_states,
shape=(
self.n_operators,
self._max_acting_size,
self._max_local_hilbert_size,
),
init=np.nan,
)
## add an operator to local_states
for site in range(acting_size):
self._local_states[-1, site, : n_local_states_per_site[site]] = np.asarray(
self.hilbert.states_at_index(acting_on[site])
)
## add an operator to basis
self._basis = resize(
self._basis, shape=(self.n_operators, self._max_acting_size), init=1e10
)
ba = 1
for s in range(acting_on.size):
self._basis[-1, s] = ba
ba *= n_local_states_per_site[acting_on.size - s - 1]
##
self._diag_mels = resize(
self._diag_mels, shape=(self.n_operators, self._max_op_size), init=np.nan
)
self._mels = resize(
self._mels,
shape=(self.n_operators, self._max_op_size, self._max_op_size - 1),
init=np.nan,
)
self._x_prime = resize(
self._x_prime,
shape=(
self.n_operators,
self._max_op_size,
self._max_op_size - 1,
self._max_acting_size,
),
init=-1,
)
self._n_conns = resize(
self._n_conns, shape=(self.n_operators, self._max_op_size), init=-1
)
if acting_on.max() + 1 >= self._size:
self._size = acting_on.max() + 1
self._append_matrix(
operator,
self._diag_mels[-1],
self._mels[-1],
self._x_prime[-1],
self._n_conns[-1],
self._acting_size[-1],
self._local_states[-1],
self.mel_cutoff,
n_local_states_per_site,
)
isherm = True
for op in self._operators:
isherm = isherm and is_hermitian(op)
self._is_hermitian = isherm
@staticmethod
@jit(nopython=True)
def _append_matrix(
operator,
diag_mels,
mels,
x_prime,
n_conns,
acting_size,
local_states_per_site,
epsilon,
hilb_size_per_site,
):
op_size = operator.shape[0]
assert op_size == operator.shape[1]
for i in range(op_size):
diag_mels[i] = operator[i, i]
n_conns[i] = 0
for j in range(op_size):
if i != j and np.abs(operator[i, j]) > epsilon:
k_conn = n_conns[i]
mels[i, k_conn] = operator[i, j]
_number_to_state(
j,
hilb_size_per_site,
local_states_per_site[:acting_size, :],
x_prime[i, k_conn, :acting_size],
)
n_conns[i] += 1
def _multiply_operator(self, op, act):
operators = []
acting_on = []
act = np.asarray(act)
for i in range(self.n_operators):
act_i = self._acting_on[i, : self._acting_size[i]]
inters = np.intersect1d(act_i, act, return_indices=False)
if act.size == act_i.size and np.array_equal(act, act_i):
# non-interesecting with same support
operators.append(np.copy(np.matmul(self._operators[i], op)))
acting_on.append(act_i.tolist())
elif inters.size == 0:
# disjoint supports
operators.append(np.copy(np.kron(self._operators[i], op)))
acting_on.append(act_i.tolist() + act.tolist())
else:
_act = list(act)
_act_i = list(act_i)
_op = op.copy()
_op_i = self._operators[i].copy()
# expand _act to match _act_i
actmin = min(act)
for site in act_i:
if site not in act:
I = np.eye(self.hilbert.shape[site], dtype=self.dtype)
if site < actmin:
_act = [site] + _act
_op = np.kron(I, _op)
else: # site > actmax
_act = _act + [site]
_op = np.kron(_op, I)
act_i_min = min(act_i)
for site in act:
if site not in act_i:
I = np.eye(self.hilbert.shape[site], dtype=self.dtype)
if site < act_i_min:
_act_i = [site] + _act_i
_op_i = np.kron(I, _op_i)
else: # site > actmax
_act_i = _act_i + [site]
_op_i = np.kron(_op_i, I)
# reorder
_op, _act = _reorder_matrix(self.hilbert, _op, _act)
_op_i, _act_i = _reorder_matrix(self.hilbert, _op_i, _act_i)
if len(_act) == len(_act_i) and np.array_equal(_act, _act_i):
# non-interesecting with same support
operators.append(np.matmul(_op_i, _op))
acting_on.append(_act_i)
else:
raise ValueError("Something failed")
return operators, acting_on
def copy(self, *, dtype: Optional = None):
"""Returns a copy of the operator, while optionally changing the dtype
of the operator.
Args:
dtype: optional dtype
"""
if dtype is None:
dtype = self.dtype
if not np.can_cast(self.dtype, dtype, casting="same_kind"):
raise ValueError(f"Cannot cast {self.dtype} to {dtype}")
return LocalOperator(
hilbert=self.hilbert,
operators=[np.copy(op) for op in self._operators],
acting_on=self._acting_on_list(),
constant=self._constant,
dtype=dtype,
)
def transpose(self, *, concrete=False):
r"""LocalOperator: Returns the tranpose of this operator."""
if concrete:
new_ops = [np.copy(ops.transpose()) for ops in self._operators]
return LocalOperator(
hilbert=self.hilbert,
operators=new_ops,
acting_on=self._acting_on_list(),
constant=self._constant,
)
else:
return Transpose(self)
def conjugate(self, *, concrete=False):
r"""LocalOperator: Returns the complex conjugate of this operator."""
new_ops = [np.copy(ops).conjugate() for ops in self._operators]
return LocalOperator(
hilbert=self.hilbert,
operators=new_ops,
acting_on=self._acting_on_list(),
constant=np.conjugate(self._constant),
)
def get_conn_flattened(self, x, sections, pad=False):
r"""Finds the connected elements of the Operator. Starting
from a given quantum number x, it finds all other quantum numbers x' such
that the matrix element :math:`O(x,x')` is different from zero. In general there
will be several different connected states x' satisfying this
condition, and they are denoted here :math:`x'(k)`, for :math:`k=0,1...N_{\mathrm{connected}}`.
This is a batched version, where x is a matrix of shape (batch_size,hilbert.size).
Args:
x (matrix): A matrix of shape (batch_size,hilbert.size) containing
the batch of quantum numbers x.
sections (array): An array of size (batch_size) useful to unflatten
the output of this function.
See numpy.split for the meaning of sections.
pad (bool): Whether to use zero-valued matrix elements in order to return all equal sections.
Returns:
matrix: The connected states x', flattened together in a single matrix.
array: An array containing the matrix elements :math:`O(x,x')` associated to each x'.
"""
return self._get_conn_flattened_kernel(
x,
sections,
self._local_states,
self._basis,
self._constant,
self._diag_mels,
self._n_conns,
self._mels,
self._x_prime,
self._acting_on,
self._acting_size,
pad,
)
def _get_conn_flattened_closure(self):
_local_states = self._local_states
_basis = self._basis
_constant = self._constant
_diag_mels = self._diag_mels
_n_conns = self._n_conns
_mels = self._mels
_x_prime = self._x_prime
_acting_on = self._acting_on
_acting_size = self._acting_size
fun = self._get_conn_flattened_kernel
def gccf_fun(x, sections):
return fun(
x,
sections,
_local_states,
_basis,
_constant,
_diag_mels,
_n_conns,
_mels,
_x_prime,
_acting_on,
_acting_size,
)
return jit(nopython=True)(gccf_fun)
@staticmethod
@jit(nopython=True)
def _get_conn_flattened_kernel(
x,
sections,
local_states,
basis,
constant,
diag_mels,
n_conns,
all_mels,
all_x_prime,
acting_on,
acting_size,
pad=False,
):
batch_size = x.shape[0]
n_sites = x.shape[1]
dtype = all_mels.dtype
assert sections.shape[0] == batch_size
n_operators = n_conns.shape[0]
xs_n = np.empty((batch_size, n_operators), dtype=np.intp)
tot_conn = 0
max_conn = 0
for b in range(batch_size):
# diagonal element
conn_b = 1
# counting the off-diagonal elements
for i in range(n_operators):
acting_size_i = acting_size[i]
xs_n[b, i] = 0
x_b = x[b]
x_i = x_b[acting_on[i, :acting_size_i]]
for k in range(acting_size_i):
xs_n[b, i] += (
np.searchsorted(
local_states[i, acting_size_i - k - 1],
x_i[acting_size_i - k - 1],
)
* basis[i, k]
)
conn_b += n_conns[i, xs_n[b, i]]
tot_conn += conn_b
sections[b] = tot_conn
if pad:
max_conn = max(conn_b, max_conn)
if pad:
tot_conn = batch_size * max_conn
x_prime = np.empty((tot_conn, n_sites), dtype=x.dtype)
mels = np.empty(tot_conn, dtype=dtype)
c = 0
for b in range(batch_size):
c_diag = c
mels[c_diag] = constant
x_batch = x[b]
x_prime[c_diag] = np.copy(x_batch)
c += 1
for i in range(n_operators):
# Diagonal part
mels[c_diag] += diag_mels[i, xs_n[b, i]]
n_conn_i = n_conns[i, xs_n[b, i]]
if n_conn_i > 0:
sites = acting_on[i]
acting_size_i = acting_size[i]
for cc in range(n_conn_i):
mels[c + cc] = all_mels[i, xs_n[b, i], cc]
x_prime[c + cc] = np.copy(x_batch)
for k in range(acting_size_i):
x_prime[c + cc, sites[k]] = all_x_prime[
i, xs_n[b, i], cc, k
]
c += n_conn_i
if pad:
delta_conn = max_conn - (c - c_diag)
mels[c : c + delta_conn].fill(0)
x_prime[c : c + delta_conn, :] = np.copy(x_batch)
c += delta_conn
sections[b] = c
return x_prime, mels
def get_conn_filtered(self, x, sections, filters):
r"""Finds the connected elements of the Operator using only a subset of operators. Starting
from a given quantum number x, it finds all other quantum numbers x' such
that the matrix element :math:`O(x,x')` is different from zero. In general there
will be several different connected states x' satisfying this
condition, and they are denoted here :math:`x'(k)`, for :math:`k=0,1...N_{\mathrm{connected}}`.
This is a batched version, where x is a matrix of shape (batch_size,hilbert.size).
Args:
x (matrix): A matrix of shape (batch_size,hilbert.size) containing
the batch of quantum numbers x.
sections (array): An array of size (batch_size) useful to unflatten
the output of this function.
See numpy.split for the meaning of sections.
filters (array): Only operators op(filters[i]) are used to find the connected elements of
x[i].
Returns:
matrix: The connected states x', flattened together in a single matrix.
array: An array containing the matrix elements :math:`O(x,x')` associated to each x'.
"""
return self._get_conn_filtered_kernel(
x,
sections,
self._local_states,
self._basis,
self._constant,
self._diag_mels,
self._n_conns,
self._mels,
self._x_prime,
self._acting_on,
self._acting_size,
filters,
)
@staticmethod
@jit(nopython=True)
def _get_conn_filtered_kernel(
x,
sections,
local_states,
basis,
constant,
diag_mels,
n_conns,
all_mels,
all_x_prime,
acting_on,
acting_size,
filters,
):
batch_size = x.shape[0]
n_sites = x.shape[1]
dtype = all_mels.dtype
assert filters.shape[0] == batch_size and sections.shape[0] == batch_size
n_operators = n_conns.shape[0]
xs_n = np.empty((batch_size, n_operators), dtype=np.intp)
tot_conn = 0
for b in range(batch_size):
# diagonal element
tot_conn += 1
# counting the off-diagonal elements
i = filters[b]
assert i < n_operators and i >= 0
acting_size_i = acting_size[i]
xs_n[b, i] = 0
x_b = x[b]
x_i = x_b[acting_on[i, :acting_size_i]]
for k in range(acting_size_i):
xs_n[b, i] += (
np.searchsorted(
local_states[i, acting_size_i - k - 1],
x_i[acting_size_i - k - 1],
)
* basis[i, k]
)
tot_conn += n_conns[i, xs_n[b, i]]
sections[b] = tot_conn
x_prime = np.empty((tot_conn, n_sites))
mels = np.empty(tot_conn, dtype=dtype)
c = 0
for b in range(batch_size):
c_diag = c
mels[c_diag] = constant
x_batch = x[b]
x_prime[c_diag] = np.copy(x_batch)
c += 1
i = filters[b]
# Diagonal part
mels[c_diag] += diag_mels[i, xs_n[b, i]]
n_conn_i = n_conns[i, xs_n[b, i]]
if n_conn_i > 0:
sites = acting_on[i]
acting_size_i = acting_size[i]
for cc in range(n_conn_i):
mels[c + cc] = all_mels[i, xs_n[b, i], cc]
x_prime[c + cc] = np.copy(x_batch)
for k in range(acting_size_i):
x_prime[c + cc, sites[k]] = all_x_prime[i, xs_n[b, i], cc, k]
c += n_conn_i
return x_prime, mels
def __repr__(self):
ao = self.acting_on
acting_str = f"acting_on={ao}"
if len(acting_str) > 55:
acting_str = f"#acting_on={len(ao)} locations"
return f"{type(self).__name__}(dim={self.hilbert.size}, {acting_str}, constant={self.constant}, dtype={self.dtype})"
|
the-stack_0_10497 | from inspect import signature
from typing import Any, Type, TypeVar
from httpx import Response
from .errors import TelePayError
T = TypeVar("T")
def validate_response(response: Response) -> None:
if response.status_code < 200 or response.status_code >= 300:
error_data = response.json()
error = error_data.pop("error", None)
message = error_data.pop("message", None)
raise TelePayError(
status_code=response.status_code,
error=error,
message=message,
)
def parse_json(cls: Type[T], **json: Any) -> T:
cls_fields = {field for field in signature(cls).parameters}
native_args, new_args = {}, {}
for name, val in json.items():
if name in cls_fields:
native_args[name] = val
else:
new_args[name] = val
ret = cls(**native_args)
for new_name, new_val in new_args.items():
setattr(ret, new_name, new_val)
return ret
|
the-stack_0_10498 | # Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
# You can find misc modules, which dont fit in anything xD
""" scape module for other small commands. """
from random import randint
from time import sleep
from os import execl
import sys
import os
import io
import sys
from scape import BOTLOG, BOTLOG_CHATID, CMD_HELP, bot, GIT_REPO_NAME, ALIVE_NAME
from scape.events import register
from scape.utils import time_formatter
import urllib
import requests
from bs4 import BeautifulSoup
import re
from PIL import Image
# ================= CONSTANT =================
DEFAULTUSER = str(ALIVE_NAME) if ALIVE_NAME else uname().node
# ============================================
opener = urllib.request.build_opener()
useragent = 'Mozilla/5.0 (Linux; Android 9; SM-G960F Build/PPR1.180610.011; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/78.0.3904.70 Mobile Safari/537.36'
opener.addheaders = [('User-agent', useragent)]
@register(outgoing=True, pattern="^.random")
async def randomise(items):
""" For .random command, get a random item from the list of items. """
itemo = (items.text[8:]).split()
if len(itemo) < 2:
await items.edit(
"`2 or more items are required! Check .help random for more info.`"
)
return
index = randint(1, len(itemo) - 1)
await items.edit("**Query: **\n`" + items.text[8:] + "`\n**Output: **\n`" +
itemo[index] + "`")
@register(outgoing=True, pattern="^.sleep ([0-9]+)$")
async def sleepybot(time):
""" For .sleep command, let the scape snooze for a few second. """
counter = int(time.pattern_match.group(1))
await time.edit("`I am sulking and snoozing...`")
if BOTLOG:
str_counter = time_formatter(counter)
await time.client.send_message(
BOTLOG_CHATID,
f"You put the bot to sleep for {str_counter}.",
)
sleep(counter)
await time.edit("`OK, I'm awake now.`")
@register(outgoing=True, pattern="^.shutdown$")
async def killdabot(event):
""" For .shutdown command, shut the bot down."""
await event.edit("`Goodbye *Windows XP shutdown sound*....`")
if BOTLOG:
await event.client.send_message(BOTLOG_CHATID, "#SHUTDOWN \n"
"Bot shut down")
await bot.disconnect()
@register(outgoing=True, pattern="^.restart$")
async def killdabot(event):
await event.edit("`*i would be back in a moment*`")
if BOTLOG:
await event.client.send_message(BOTLOG_CHATID, "#RESTART \n"
"Bot Restarted")
await bot.disconnect()
# Spin a new instance of bot
execl(sys.executable, sys.executable, *sys.argv)
# Shut the existing one down
exit()
@register(outgoing=True, pattern="^.community$")
async def bot_community(community):
""" For .community command, just returns OG Paperplane's group link. """
await community.edit(
"Join RaphielGang's awesome scape community: @scape_support"
"\nDo note that Paperplane Extended is an unoficial fork of their "
"Paperplane project and it may get limited or no support for bugs.")
@register(outgoing=True, pattern="^.support$")
async def bot_support(wannahelp):
""" For .support command, just returns the group link. """
await wannahelp.edit(
"Join the scape-remix Channel: @scape-remix \
\nJoin the scape-remix Chat: @remixsupport")
@register(outgoing=True, pattern="^.creator$")
async def creator(e):
await e.edit("[Heyworld](https://github.com/sahyam2019)")
@register(outgoing=True, pattern="^.readme$")
async def reedme(e):
await e.edit(
"Here's something for you to read:\n"
"\n[Openscape's README.md file](https://github.com/mkaraniya/Openscape/blob/sql-extended/README.md)"
"\n[Setup Guide - Basic](https://telegra.ph/How-to-host-a-Telegram-scape-11-02)"
"\n[Setup Guide - Google Drive](https://telegra.ph/How-To-Setup-GDrive-11-02)"
"\n[Setup Guide - LastFM Module](https://telegra.ph/How-to-set-up-LastFM-module-for-Paperplane-scape-11-02)"
"\n[Video Tutorial - 576p](https://mega.nz/#!ErwCESbJ!1ZvYAKdTEfb6y1FnqqiLhHH9vZg4UB2QZNYL9fbQ9vs)"
"\n[Video Tutorial - 1080p](https://mega.nz/#!x3JVhYwR!u7Uj0nvD8_CyyARrdKrFqlZEBFTnSVEiqts36HBMr-o)"
"\n[Special - Note](https://telegra.ph/Special-Note-11-02)")
# Copyright (c) Gegham Zakaryan | 2019
@register(outgoing=True, pattern="^.repeat (.*)")
async def repeat(rep):
cnt, txt = rep.pattern_match.group(1).split(' ', 1)
replyCount = int(cnt)
toBeRepeated = txt
replyText = toBeRepeated + "\n"
for i in range(0, replyCount - 1):
replyText += toBeRepeated + "\n"
await rep.edit(replyText)
@register(outgoing=True, pattern="^.repo$")
async def repo_is_here(wannasee):
""" For .repo command, just returns the repo URL. """
await wannasee.edit(
"Click [here](https://github.com/Calliope-K/Scape-Remix) to open scape-remix's GitHub page."
)
@register(outgoing=True, pattern="^.myrepo$")
async def myrepo_is_here(wannaseeme):
""" For .myrepo command, just returns the repo URL. """
await wannaseeme.edit(
f'Click [here](https://github.com/{GIT_REPO_NAME}/tree/sql-extended/) to open {DEFAULTUSER}`s GitHub page'
)
@register(outgoing=True, pattern="^.raw$")
async def raw(event):
the_real_message = None
reply_to_id = None
if event.reply_to_msg_id:
previous_message = await event.get_reply_message()
the_real_message = previous_message.stringify()
reply_to_id = event.reply_to_msg_id
else:
the_real_message = event.stringify()
reply_to_id = event.message.id
with io.BytesIO(str.encode(the_real_message)) as out_file:
out_file.name = "raw_message_data.txt"
await event.edit(
"`Check the scape log for the decoded message data !!`")
await event.client.send_file(
BOTLOG_CHATID,
out_file,
force_document=True,
allow_cache=False,
reply_to=reply_to_id,
caption="`Here's the decoded message data !!`")
@register(outgoing=True, pattern=r"^.reverse(?: |$)(\d*)")
async def okgoogle(img):
""" For .reverse command, Google search images and stickers. """
if os.path.isfile("okgoogle.png"):
os.remove("okgoogle.png")
message = await img.get_reply_message()
if message and message.media:
photo = io.BytesIO()
await bot.download_media(message, photo)
else:
await img.edit("`Reply to photo or sticker nigger.`")
return
if photo:
await img.edit("`Processing...`")
try:
image = Image.open(photo)
except OSError:
await img.edit('`Unsupported sexuality, most likely.`')
return
name = "okgoogle.png"
image.save(name, "PNG")
image.close()
# https://stackoverflow.com/questions/23270175/google-reverse-image-search-using-post-request#28792943
searchUrl = 'https://www.google.com/searchbyimage/upload'
multipart = {
'encoded_image': (name, open(name, 'rb')),
'image_content': ''
}
response = requests.post(searchUrl,
files=multipart,
allow_redirects=False)
fetchUrl = response.headers['Location']
if response != 400:
await img.edit("`Image successfully uploaded to Google. Maybe.`"
"\n`Parsing source now. Maybe.`")
else:
await img.edit("`Google told me to fuck off.`")
return
os.remove(name)
match = await ParseSauce(fetchUrl +
"&preferences?hl=en&fg=1#languages")
guess = match['best_guess']
imgspage = match['similar_images']
if guess and imgspage:
await img.edit(f"[{guess}]({fetchUrl})\n\n`Looking for images...`")
else:
await img.edit("`Couldn't find anything for your uglyass.`")
return
if img.pattern_match.group(1):
lim = img.pattern_match.group(1)
else:
lim = 3
images = await scam(match, lim)
yeet = []
for i in images:
k = requests.get(i)
yeet.append(k.content)
try:
await img.client.send_file(entity=await
img.client.get_input_entity(img.chat_id
),
file=yeet,
reply_to=img)
except TypeError:
pass
await img.edit(
f"[{guess}]({fetchUrl})\n\n[Visually similar images]({imgspage})")
async def ParseSauce(googleurl):
"""Parse/Scrape the HTML code for the info we want."""
source = opener.open(googleurl).read()
soup = BeautifulSoup(source, 'html.parser')
results = {'similar_images': '', 'best_guess': ''}
try:
for similar_image in soup.findAll('input', {'class': 'gLFyf'}):
url = 'https://www.google.com/search?tbm=isch&q=' + \
urllib.parse.quote_plus(similar_image.get('value'))
results['similar_images'] = url
except BaseException:
pass
for best_guess in soup.findAll('div', attrs={'class': 'r5a77d'}):
results['best_guess'] = best_guess.get_text()
return results
async def scam(results, lim):
single = opener.open(results['similar_images']).read()
decoded = single.decode('utf-8')
imglinks = []
counter = 0
pattern = r'^,\[\"(.*[.png|.jpg|.jpeg])\",[0-9]+,[0-9]+\]$'
oboi = re.findall(pattern, decoded, re.I | re.M)
for imglink in oboi:
counter += 1
if not counter >= int(lim):
imglinks.append(imglink)
else:
break
return imglinks
CMD_HELP.update({
"misc":
"`.random` <item1> <item2> ... <itemN>\
\nUsage: Get a random item from the list of items.\
\n\n`.sleep` <seconds>\
\nusage:scapes get tired too. Let yours snooze for a few seconds.\
\n\n`.shutdown`\
\nUsage: Shutdowns the bot temporarily\
\n\n`.support`\
\nUsage: if you need help, use this command\
\n\n`.community`\
\nUsage: Join the awesome Openscape community !!\
\n\n`.repo`\
\nUsage: If you are curious what makes the scape work, this is what you need !!\
\n\n`.myrepo`\
\nUsage: If you are curious which is your personal repo, this is what you have.\
\n\n`.readme`\
\nUsage: Provide links to setup the scape and it's modules.\
\n\n`.creator`\
\nUsage: Know who created this awesome scape !!\
\n\n`.repeat` <no.> <text>\
\nUsage: Repeats the text for a number of times. Don't confuse this with spam tho.\
\n\n`.restart`\
\nUsage: Restarts the bot !!\
\n\n`.raw`\
\nUsage: Get detailed JSON-like formatted data about replied message.\
\n\n`.reverse`\
\nUsage: Reply to a pic/sticker to revers-search it on Google Images.\
\n\n: `.poll`\
\nUsage:If you doesnt give any input it sends a default poll. if you like customize it then use this syntax:\
\n `.poll question ; option 1; option2 ;`\
\n ';' this seperates the each option and question."
})
|
the-stack_0_10500 | import operator
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
@pytest.fixture
def data():
return pd.array(
[True, False] * 4 + [np.nan] + [True, False] * 44 + [np.nan] + [True, False],
dtype="boolean",
)
@pytest.fixture
def left_array():
return pd.array([True] * 3 + [False] * 3 + [None] * 3, dtype="boolean")
@pytest.fixture
def right_array():
return pd.array([True, False, None] * 3, dtype="boolean")
# Basic test for the arithmetic array ops
# -----------------------------------------------------------------------------
@pytest.mark.parametrize(
"opname, exp",
[
("add", [True, True, None, True, False, None, None, None, None]),
("mul", [True, False, None, False, False, None, None, None, None]),
],
ids=["add", "mul"],
)
def test_add_mul(left_array, right_array, opname, exp):
op = getattr(operator, opname)
result = op(left_array, right_array)
expected = pd.array(exp, dtype="boolean")
tm.assert_extension_array_equal(result, expected)
def test_sub(left_array, right_array):
with pytest.raises(TypeError):
# numpy points to ^ operator or logical_xor function instead
left_array - right_array
def test_div(left_array, right_array):
# for now division gives a float numpy array
result = left_array / right_array
expected = np.array(
[1.0, np.inf, np.nan, 0.0, np.nan, np.nan, np.nan, np.nan, np.nan],
dtype="float64",
)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"opname",
[
"floordiv",
"mod",
pytest.param(
"pow", marks=pytest.mark.xfail(reason="TODO follow int8 behaviour? GH34686")
),
],
)
def test_op_int8(left_array, right_array, opname):
op = getattr(operator, opname)
result = op(left_array, right_array)
expected = op(left_array.astype("Int8"), right_array.astype("Int8"))
tm.assert_extension_array_equal(result, expected)
# Test generic characteristics / errors
# -----------------------------------------------------------------------------
def test_error_invalid_values(data, all_arithmetic_operators):
# invalid ops
op = all_arithmetic_operators
s = pd.Series(data)
ops = getattr(s, op)
# invalid scalars
with pytest.raises(TypeError):
ops("foo")
with pytest.raises(TypeError):
ops(pd.Timestamp("20180101"))
# invalid array-likes
if op not in ("__mul__", "__rmul__"):
# TODO(extension) numpy's mul with object array sees booleans as numbers
with pytest.raises(TypeError):
ops(pd.Series("foo", index=s.index))
|
the-stack_0_10505 | import os
import re
import copy
import json
import logging
import configparser
from androguard.misc import *
from androguard.core import *
from analysis_utils import AnalysisUtils
from code_analyser_trace_adv import CodeTraceAdvanced
from common import Conversions
TRACE_FORWARD = 'FORWARD'
TRACE_REVERSE = 'REVERSE'
TRACE_TYPE_BASIC = 'BASIC'
TRACE_TYPE_ADVANCED = 'ADVANCED'
STOP_CONDITION_TRUE = 'True'
STOP_CONDITION_FALSE = 'False'
STOP_CONDITION_MAYBE = 'Maybe'
class CodeTrace:
"""The main code tracing class."""
def __init__(self, base_dir):
"""Sets paths and initialises variables.
:param a: androguard.core.bytecodes.apk.APK object
:param d: array of androguard.core.bytecodes.dvm.DalvikVMFormat objects
:param dx: androguard.core.analysis.analysis.Analysis object
:param base_dir: string indicating script base path
"""
# Set paths.
self.path_base_dir = base_dir
self.path_config_file = os.path.join(
self.path_base_dir,
'config',
'jandroid.conf'
)
# Set a default max trace length.
self.default_trace_length_max = 25
# Read config file.
config = configparser.ConfigParser()
config.read(self.path_config_file)
if config.has_section('TRACEPARAMS'):
if config.has_option('TRACEPARAMS', 'TRACE_LENGTH_MAX'):
self.default_trace_length_max = \
int(config['TRACEPARAMS']['TRACE_LENGTH_MAX'])
self.trace_length_max = self.default_trace_length_max
# Initialise special case object.
self.special_case_object_list_reverse = {
'doInBackground': {
'Landroid/os/AsyncTask;': [
'execute([Ljava/lang/Object;)Landroid/os/AsyncTask;',
'execute(Ljava/lang/Runnable;)'
]
}
}
self.special_case_object_list_forward = {
'execute([Ljava/lang/Object;)Landroid/os/AsyncTask;': 'doInBackground',
'execute(Ljava/lang/Runnable;)V': 'doInBackground'
}
# Store returns.
self.current_returns = []
# This is to let us know whether to perform a "lenient" stop check or not.
self.hardcoded_traceto = False
self.advanced_trace = CodeTraceAdvanced(self.path_base_dir)
def fn_reset(self):
"""Resets objects to free up memory."""
self.androguard_apk_obj = None
self.androguard_d_array = None
self.androguard_dx = None
self.inst_analysis_utils = None
self.current_returns = []
def fn_perform_code_trace(self, a, d, dx, code_trace_template, links):
"""Traces within code based on a trace template.
:param code_trace_template: dictionary object corresponding to the
trace part of a bug template
:param links: dictionary object containing linked items
:returns: list containing boolean value indicating whether the trace
was satisfied, and a dictionary object of updated links
"""
logging.debug('Performing code trace.')
# Androguard variables for this APK.
self.androguard_apk_obj = a
self.androguard_d_array = d
self.androguard_dx = dx
# Start up utility helper.
self.inst_analysis_utils = AnalysisUtils(
self.path_base_dir,
self.androguard_apk_obj,
self.androguard_d_array,
self.androguard_dx
)
# The TRACE-relevant part of the bug template.
self.trace_template = code_trace_template
# Linked elements from checking previous parts of the template.
self.current_links = links
# Keep track of trace chains (to be converted to RETURN items).
self.output_chains = []
# Variables to determine how many traces to perform and
# to keep track of how many have been satisfied.
total_traces = 0
satisfied_traces = 0
# Variable to determine whether the overall TRACE is satisfied.
bool_satisfied = False
# The trace template can either be a dictionary or a list
# of dictionary objects.
if type(self.trace_template) is dict:
bool_satisfied = \
self.fn_process_individual_trace_list_item(self.trace_template)
# If the search is a list, then all individual sub-traces
# must be satisfied.
elif type(self.trace_template) is list:
for trace_item in self.trace_template:
total_traces += 1
bool_one_satisfied = \
self.fn_process_individual_trace_list_item(trace_item)
if bool_one_satisfied == True:
satisfied_traces += 1
if satisfied_traces == total_traces:
bool_satisfied = True
# Process returns as links.
if bool_satisfied == True:
self.current_links = \
self.inst_analysis_utils.fn_convert_returns_to_links(
self.current_returns,
self.current_links
)
self.fn_reset()
# Return the outcome and the links, to be used by next code segment.
return [bool_satisfied, self.current_links]
def fn_process_individual_trace_list_item(self, trace_dictionary):
"""Processes an individual trace object.
:param trace_dictionary: dictionary object containing details of an
individual trace to perform
:returns: boolean indicating whether the trace requirements were
satisfied
"""
# Each item within the list must be a dictionary trace object.
bool_satisfied = False
# Get parameters such as trace direction, etc.
self.fn_get_trace_parameters(trace_dictionary)
if self.trace_type == TRACE_TYPE_ADVANCED:
bool_adv_trace_output, output_chains = \
self.advanced_trace.fn_start_adv_trace(
self.androguard_apk_obj,
self.androguard_d_array,
self.androguard_dx,
trace_dictionary,
self.current_links,
self.trace_direction,
self.trace_length_max
)
return bool_adv_trace_output
# There may be a number of combinations, if the trace from/to
# have elements separated by OR.
[trace_from_string_list, trace_to_string_list] = \
self.fn_enumerate_trace_source_sinks(trace_dictionary)
# For each combination, run trace.
for trace_from_string_element in trace_from_string_list:
for trace_to_string_element in trace_to_string_list:
bool_single_trace_satisfied = self.fn_trace_through_code(
trace_from_string_element,
trace_to_string_element
)
if bool_single_trace_satisfied == True:
bool_satisfied = True
if bool_satisfied == True:
if 'RETURN' in trace_dictionary:
self.fn_analyse_returns(trace_dictionary)
return bool_satisfied
def fn_get_trace_parameters(self, trace_template):
"""Sets trace parameters based on trace template.
:param trace_template: dictionary object corresponding to a single
trace, from which trace parameters are to be extracted
"""
# Set max trace length, if available.
if 'TRACELENGTHMAX' in trace_template:
self.trace_length_max = int(trace_template['TRACELENGTHMAX'])
else:
self.trace_length_max = self.default_trace_length_max
# Set trace direction.
if 'TRACEDIRECTION' in trace_template:
trace_direction = trace_template['TRACEDIRECTION']
if trace_direction == TRACE_FORWARD:
self.trace_direction = TRACE_FORWARD
else:
self.trace_direction = TRACE_REVERSE
else:
# Default is REVERSE.
self.trace_direction = TRACE_REVERSE
# Set trace type.
if 'TRACETYPE' in trace_template:
self.trace_type = trace_template['TRACETYPE']
else:
self.trace_type = TRACE_TYPE_BASIC
def fn_enumerate_trace_source_sinks(self, trace_template):
"""Enumerates the (list of) trace start and end points from template.
:param trace_template: dictionary object corresponding to a single
trace, from which trace end points are to be extracted
:returns: list containing two lists - the first a list of possible
start points and the second, a list of possible end points
"""
# Get the start points.
trace_from_string = trace_template['TRACEFROM']
if ' OR ' in trace_from_string:
trace_from_string_list = trace_from_string.split(' OR ')
else:
trace_from_string_list = [trace_from_string]
# Get the end points.
trace_to_string = trace_template['TRACETO']
if ' OR ' in trace_to_string:
trace_to_string_list = trace_to_string.split(' OR ')
else:
trace_to_string_list = [trace_to_string]
return [trace_from_string_list, trace_to_string_list]
def fn_trace_through_code(self, trace_from_string, trace_to_string):
"""Begins the actual trace.
:param trace_from_string: string corresponding to a single start point
:param trace_to_string: string corresponding to a single end point
:returns: boolean indicating whether at least one path between the start
and end points was found
"""
# Get trace types.
[self.from_class_method, trace_from_string] = \
self.fn_get_trace_type(trace_from_string)
[self.to_class_method, trace_to_string] = \
self.fn_get_trace_type(trace_to_string)
# Get any linked items.
trace_from_list = self.fn_get_trace_items(
trace_from_string,
self.from_class_method
)
trace_to_list = self.fn_get_trace_items(
trace_to_string,
self.to_class_method
)
if ((trace_from_list == []) or (trace_to_list == [])):
logging.debug('Either TraceFrom or TraceTo evaluated to None.')
return False
self.trace_to_list = trace_to_list
return self.fn_trace_handler(trace_from_list)
def fn_get_trace_type(self, string):
"""Gets trace starting point type.
:param string: string containing trace start point type (either
"<class>" or "<method>". The string may not directly contain
these values, in which case the type will have to be inferred.
:returns: list containing the start point type and the modified string
(within the "<class>" or "<method>" indication removed)
"""
trace_type = '<class>'
if ':' in string:
trace_type = string.split(':')[0]
string = string[len(trace_type)+1:]
else:
if '->' in string:
trace_type = '<method>'
return [trace_type, string]
def fn_get_trace_items(self, string, trace_type):
"""Gets the actual strings to use as start/end points of trace.
:param string: the string specified within the template
:param trace_type: string (either "<class>" or "<method>"), indicating
whether the trace should begin/end at the class level or method
level
:returns: list of possible start/end points
"""
output_items = []
# If the string begins with @, then we need to find linked items.
if string[0] == '@':
self.hardcoded_traceto = False
# If a sub-part has not been specified, then assume that the
# entire string is the link name.
if ']' not in string:
link_name = string
link_subpart = ''
remaining_string = ''
# If a sub-part has been specified, then split the string to
# identify the link name, relevant sub-part, and remainder
# of string.
else:
split_for_link = string.split(']')
remaining_string = split_for_link[1]
second_split = split_for_link[0].split('[')
link_name = second_split[0]
link_subpart = second_split[1].replace(' ', '')
# Get all linked items.
linked_items = self.inst_analysis_utils.fn_get_linked_items(
self.current_links,
link_name
)
if link_subpart == '':
for linked_item in linked_items:
return_string = linked_item + remaining_string
if trace_type == '<class>':
return_string = return_string.split('->')[0]
output_items.append(return_string)
elif link_subpart == '<class>':
for linked_item in linked_items:
class_part_only = linked_item.split('->')[0]
return_string = class_part_only + remaining_string
if trace_type == '<class>':
return_string = return_string.split('->')[0]
output_items.append(return_string)
elif link_subpart == '<method>':
for linked_item in linked_items:
if '->' not in linked_item:
continue
return_string = linked_item + remaining_string
if trace_type == '<class>':
return_string = return_string.split('->')[0]
output_items.append(return_string)
# If the string doesn't begin with @, then it's a normal string.
else:
self.hardcoded_traceto = True
if trace_type == '<class>':
string = string.split('->')[0]
output_items = [string]
return output_items
def fn_trace_handler(self, trace_from_list):
"""Starts the trace process and outputs the result.
:param trace_from_list: list containing possible start points
for trace
:returns: boolean indicating whether at least one path was identified
between the start and end points
"""
for trace_from in trace_from_list:
self.checked_methods = set()
# Set a stop condition.
self.stop_condition = STOP_CONDITION_FALSE
# Get class/method/desc parts.
[class_part, method_part, desc_part] = \
self.fn_determine_class_method_desc(
trace_from,
self.from_class_method
)
# Start the forward or reverse tracers, based on template.
if self.trace_direction == TRACE_REVERSE:
self.fn_trace_reverse(
class_part,
method_part,
desc_part,
trace_from
)
else:
self.fn_trace_forward(
class_part,
method_part,
desc_part,
trace_from
)
# If the output chain list is not empty, it means at least one path
# between the start and end points was identified.
if self.output_chains != []:
return True
else:
return False
def fn_trace_reverse(self, class_part, method_part, desc_part,
trace_chain=''):
"""Performs the reverse tracing function.
Reverse tracing starts from TRACEFROM and gets all xref_from at each
level. The collection of all xref_from's are stored in an
"ordered string".
:param class_part: string denoting class part of trace start point
:param method_part: string denoting method part of trace start point
:param desc_part: string denoting descriptor part of trace start point
:param trace_chain: string denoting ordered trace chain
"""
# Get starting points.
starting_points = \
self.inst_analysis_utils.fn_get_calls_to_method(
class_part,
method_part,
desc_part
)
# Include subclasses.
all_subclasses = []
all_subclasses.extend(
self.inst_analysis_utils.fn_find_subclasses(class_part)
)
for subclass in all_subclasses:
starting_points.extend(
self.inst_analysis_utils.fn_get_calls_to_method(
subclass,
method_part,
desc_part
)
)
# We want to also add the original method to the search as it might not be directly called, for example OnCreate.
if desc_part != '.':
desc_part = re.escape(desc_part)
class_part = re.escape(class_part)
method_part = re.escape(method_part)
mathcing_methods = self.androguard_dx.find_methods(
class_part,
method_part,
desc_part)
for method in mathcing_methods:
starting_points.append(method.get_method())
# Reset.
class_part = None
method_part = None
desc_part = None
# Start trace for each starting point.
for starting_point in starting_points:
# Get class/method/desc parts.
[class_part, method_part, desc_part] = \
self.inst_analysis_utils.fn_get_class_method_desc_from_method(
starting_point
)
# If we want to consider subclasses as well.
# Note that this is different to the step above. Above, we get
# subclasses of the class/method that is being called. Here, we
# get the subclasses for the class that is doing the calling.
class_parts = [class_part]
class_parts.extend(
self.inst_analysis_utils.fn_find_subclasses(class_part)
)
# Handle any special cases (AsyncTask, etc).
# The class name remains the same for these special cases.
# Only the method/descriptor changes.
if method_part in self.special_case_object_list_reverse:
method_descriptors = \
self.fn_handle_special_case_reverse(
class_part,
method_part,
desc_part
)
else:
method_descriptors = [method_part + desc_part]
if not method_descriptors:
method_descriptors = [method_part + desc_part]
# Go to the next step of the trace.
for class_part in class_parts:
for method_descriptor in method_descriptors:
method_part = method_descriptor.split('(')[0]
desc_part = '(' + method_descriptor.split('(')[1]
self.fn_analyse_trace_point(
class_part,
method_part,
desc_part,
trace_chain
)
def fn_handle_special_case_reverse(self, class_part, method_part,
desc_part):
"""Handles cases such as AsyncTask, where no direct link can be made.
:param class_part: string name for class
:param method_part: string name for method
:param desc_part: string name for descriptor
:returns: list of revised method_part+desc_part
"""
relevant_object = self.special_case_object_list_reverse[method_part]
new_method_to_search = []
all_superclasses = \
self.inst_analysis_utils.fn_find_superclasses(class_part)
# Is this needed?
all_superclasses.append(class_part)
for superclass in all_superclasses:
superclass = superclass.strip()
if superclass in relevant_object:
return relevant_object[superclass]
def fn_trace_forward(self, class_part, method_part, desc_part,
trace_chain=''):
"""Performs the forward tracing function.
Forward tracing starts from TRACEFROM and gets all xref_to at each
level. The collection of all xref_to's are stored in an
"ordered string".
:param class_part: string denoting class part of trace start point
:param method_part: string denoting method part of trace start point
:param desc_part: string denoting descriptor part of trace start point
:param trace_chain: string denoting ordered trace chain
"""
# Get starting points.
# These will still be methods that call the method of interest
# (even though the trace direction is Forward).
starting_points = \
self.inst_analysis_utils.fn_get_calls_from_method(
class_part,
method_part,
desc_part
)
# We want to also add the original method to the search as it might not be directly called, for example OnCreate.
if desc_part != '.':
desc_part = re.escape(desc_part)
class_part = re.escape(class_part)
method_part = re.escape(method_part)
mathcing_methods = self.androguard_dx.find_methods(
class_part,
method_part,
desc_part)
for method in mathcing_methods:
starting_points.append(method.get_method())
# Reset.
class_part = None
method_part = None
desc_part = None
for starting_point in starting_points:
# If the method is external, we won't get any further.
# Get class/method/desc parts.
[class_part, method_part, desc_part] = \
self.inst_analysis_utils.fn_get_class_method_desc_from_method(
starting_point
)
class_parts = [class_part]
# Special case handling.
method_descriptor = method_part + desc_part
if method_descriptor in self.special_case_object_list_forward:
method_part = \
self.fn_handle_special_case_forward(method_descriptor)
desc_part = '.'
# Go to next step.
for class_part in class_parts:
self.fn_analyse_trace_point(
class_part,
method_part,
desc_part,
trace_chain
)
def fn_handle_special_case_forward(self, method_descriptor):
"""Handle special cases, such as AsyncTask, in forward traces.
:param method_descriptor: string denoting combined method and
descriptor parts
:returns: string for method part
"""
return self.special_case_object_list_forward[method_descriptor]
def fn_analyse_trace_point(self, class_part, method_part, desc_part,
trace_chain):
"""Checks current trace point against stop condition; else continues.
:param class_part: string denoting class part of current trace point
:param method_part: string denoting method part of current trace point
:param desc_part: string denoting descriptor part of current trace point
:param trace_chain: string denoting ordered trace chain
"""
compound_name = class_part + '->' + method_part + desc_part
if compound_name.startswith('Landroid') or compound_name.startswith('Ljava') or compound_name.startswith('Lcom/google/android'):
return
if compound_name in self.checked_methods:
return
else:
self.checked_methods.add(compound_name)
tmpChain = []
# Check if stop condition is met.
self.fn_check_stop_condition(compound_name)
if self.stop_condition == STOP_CONDITION_TRUE:
self.stop_condition = STOP_CONDITION_FALSE
if trace_chain == '':
trace_chain = compound_name
else:
trace_chain = trace_chain + ',' + compound_name
# If somehow we have the same chain repeated:
if trace_chain in self.output_chains:
return
self.output_chains.append(trace_chain)
for trace_chain in tmpChain:
if trace_chain in self.output_chains:
return
self.output_chains.append(trace_chain)
return
elif self.stop_condition == STOP_CONDITION_MAYBE:
self.stop_condition = STOP_CONDITION_FALSE
compound_name = '|MAYBE|' + compound_name
if trace_chain == '':
trace_chain = compound_name
else:
trace_chain = trace_chain + ',' + compound_name
# If somehow we have the same chain repeated:
if trace_chain in self.output_chains:
return
self.output_chains.append(trace_chain)
for trace_chain in tmpChain:
if trace_chain in self.output_chains:
return
self.output_chains.append(trace_chain)
else:
if trace_chain == '':
trace_chain = compound_name
else:
trace_chain = trace_chain + ',' + compound_name
# If somehow we have the same chain repeated:
if trace_chain in tmpChain:
return
tmpChain.append(trace_chain)
# If the stop condition wasn't met,
# and we haven't exceeded the max chain length.
trace_chain_as_list = trace_chain.split(',')
if len(trace_chain_as_list) > self.trace_length_max:
return
if self.trace_direction == TRACE_FORWARD:
self.fn_trace_forward(
class_part,
method_part,
desc_part,
trace_chain
)
else:
self.fn_trace_reverse(
class_part,
method_part,
desc_part,
trace_chain
)
def fn_check_stop_condition(self, check_value):
"""Checks whether the stop condition has been satisfied for the trace.
This does not return a value, but rather sets a variable to a pre-defined
value if the stop condition is satisfied.
:param check_value: string value to be checked against stop condition
"""
if self.to_class_method == '<class>':
check_value = check_value.split('->')[0]
if check_value in self.trace_to_list:
self.stop_condition = STOP_CONDITION_TRUE
return
# Special types of checks for when the traceto is hardcoded.
if self.hardcoded_traceto == False:
return
# This should never be true. Hardcoded traceto's will only have one
# value in the list (even with ORs).
if len(self.trace_to_list) > 1:
return
trace_to_item = self.trace_to_list[0]
# Check for wildcard classes.
if ((self.to_class_method == '<class>') and ('*' in trace_to_item)):
trace_to_item = trace_to_item.replace('*', '')
if trace_to_item in check_value:
self.stop_condition = STOP_CONDITION_TRUE
else:
self.stop_condition = STOP_CONDITION_FALSE
return
# Do a partial search for methods only. Do this only when the entire
# trace-to is hardcoded.
# If traceto is only a class, we can't do much.
if '->' not in trace_to_item:
return
if '->' not in check_value:
return
# If traceto doesn't have descriptor, don't proceed.
# Else, we might end up with way too many FPs.
if '(' not in trace_to_item:
return
if '(' not in check_value:
return
if trace_to_item.split('->')[1] == check_value.split('->')[1]:
self.stop_condition = STOP_CONDITION_MAYBE
return
def fn_determine_class_method_desc(self, trace_from, trace_from_type):
"""Determines the class/method/desc parts based on trace start point.
:param trace_from: string denoting trace start point
:param trace_from_type: string containing trace start point type
(either "<class>" or "<method>")
:returns: list containing class, method, descriptor parts
"""
[class_part, method_part, desc_part] = \
self.inst_analysis_utils.fn_get_class_method_desc_from_string(
trace_from
)
# If we care only about the class part, overwrite the method/desc
# parts with '.' (i.e., "don't care")
if trace_from_type == '<class>':
method_part = '.'
desc_part = '.'
return [class_part, method_part, desc_part]
def fn_analyse_returns(self, trace_template):
"""Analyses the return object and appends items to returns list.
:param trace_template: dictionary object containing RETURN element
"""
returnables = trace_template['RETURN']
returnable_elements_name = returnables.split(' AS ')[1]
return_type = returnables.split(' AS ')[0]
# Analyse each chain.
for chain_string in self.output_chains:
chain = chain_string.split(',')
if self.trace_direction == TRACE_REVERSE:
chain.reverse()
output_str = ''
for chain_node in chain:
chain_node = chain_node.strip()
if output_str == '':
output_str = chain_node
else:
output_str = output_str + ',' + chain_node
self.current_returns.append({returnable_elements_name: output_str}) |
the-stack_0_10506 | params = {
'type': 'MBPO',
'universe': 'gym',
'domain': 'FetchPickAndPlace',
'task': 'v1',
'log_dir': '~/ray_mbpo/',
'exp_name': 'defaults',
'kwargs': {
'epoch_length': 1000,
'train_every_n_steps': 1,
'actor_train_repeat': 1,
'critic_train_repeat': 20,
'eval_render_mode': None,
'eval_n_episodes': 10,
'eval_deterministic': True,
'discount': 0.99,
'tau': 5e-3,
'reward_scale': 1.0,
'model_train_freq': 250,
'model_retain_epochs': 1,
'rollout_batch_size': 100e3,
'sample_repeat': 1, # repeatedly propose actions on one start state
'deterministic': False,
'num_networks': 7,
'num_elites': 5,
'real_ratio': 1,
'critic_same_as_actor': True,
'target_entropy': -1,
'max_model_t': None,
'rollout_schedule': [20, 150, 1, 15],
}
}
|
the-stack_0_10507 | #!/usr/bin/env python
import codecs
import os.path
import re
import sys
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
return codecs.open(os.path.join(here, *parts), 'r').read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
requires = ['jmespath>=0.7.1,<1.0.0',
'docutils>=0.10,<0.16']
if sys.version_info[:2] == (2, 6):
# For python2.6 we have a few other dependencies.
# First we need an ordered dictionary so we use the
# 2.6 backport.
requires.append('ordereddict==1.1')
# Then we need simplejson. This is because we need
# a json version that allows us to specify we want to
# use an ordereddict instead of a normal dict for the
# JSON objects. The 2.7 json module has this. For 2.6
# we need simplejson.
requires.append('simplejson==3.3.0')
requires.append('python-dateutil>=2.1,<2.7.0')
else:
requires.append('python-dateutil>=2.1,<3.0.0')
if sys.version_info[:2] == (2, 6):
requires.append('urllib3>=1.20,<1.24')
elif sys.version_info[:2] == (3, 3):
requires.append('urllib3>=1.20,<1.23')
else:
requires.append('urllib3>=1.20,<1.26')
setup(
name='botocore',
version=find_version("botocore", "__init__.py"),
description='Low-level, data-driven core of boto 3.',
long_description=open('README.rst').read(),
author='Amazon Web Services',
url='https://github.com/boto/botocore',
scripts=[],
packages=find_packages(exclude=['tests*']),
package_data={'botocore': ['cacert.pem', 'data/*.json', 'data/*/*.json'],
'botocore.vendored.requests': ['*.pem']},
include_package_data=True,
install_requires=requires,
extras_require={
':python_version=="2.6"': [
'ordereddict==1.1',
'simplejson==3.3.0',
]
},
license="Apache License 2.0",
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Natural Language :: English',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
]
)
|
the-stack_0_10508 | from PyQt5 import QtCore, QtWidgets
def __package_list_updated(scene):
"""Rename package."""
for i in range(scene.ui.packages_listWidget.count()):
scene.project_data.packages[i] = scene.ui.packages_listWidget.item(i).text()
def __add_new_package(scene):
"""Add new package."""
selector = scene.ui.packages_listWidget # alias for the listWidget
if selector.item(selector.count() - 1).text() != '':
item = QtWidgets.QListWidgetItem('')
item.setFlags(item.flags() | QtCore.Qt.ItemIsEditable)
selector.addItem(item)
selector.editItem(item)
selector.verticalScrollBar().setValue(selector.verticalScrollBar().maximum())
if selector.count() > len(scene.project_data.packages):
scene.project_data.packages.append('')
def __delete_package(scene):
"""Delete package."""
current_row = scene.ui.packages_listWidget.currentRow()
if current_row > 6:
scene.ui.packages_listWidget.takeItem(current_row)
del scene.project_data.packages[current_row]
def __connect_code_plain_text_edit(scene, plain_text_edit_widget, is_before):
"""Connect code before/after plainTextEdit."""
if is_before:
scene.project_data.code_before = plain_text_edit_widget.toPlainText()
else:
scene.project_data.code_after = plain_text_edit_widget.toPlainText()
def __connect_code_text_edit_pushbutton_apply(scene):
"""Connect apply changes pushbutton."""
scene.edit.add_undo_item(scene)
def connect_code(scene):
"""Connect signals in the code tab."""
scene.ui.packages_listWidget.itemChanged.connect(lambda: __package_list_updated(scene))
scene.ui.packages_add_new.clicked.connect(lambda: __add_new_package(scene))
scene.ui.packages_delete.clicked.connect(lambda: __delete_package(scene))
scene.ui.code_before_text.textChanged.connect(
lambda: __connect_code_plain_text_edit(scene, scene.ui.code_before_text, True))
scene.ui.code_before_apply.clicked.connect(
lambda: __connect_code_text_edit_pushbutton_apply(scene))
scene.ui.code_after_text.textChanged.connect(
lambda: __connect_code_plain_text_edit(scene, scene.ui.code_after_text, False))
scene.ui.code_after_apply.clicked.connect(
lambda: __connect_code_text_edit_pushbutton_apply(scene))
|
the-stack_0_10511 | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
import re
import os
import sys
import json
import time
import random
import getpass
import datetime
import gettext
import requests
import base64
from ikabot import config
from ikabot.config import *
from ikabot.helpers.botComm import *
from ikabot.helpers.gui import banner
from ikabot.helpers.aesCipher import *
from ikabot.helpers.pedirInfo import read
from ikabot.helpers.getJson import getCity
from urllib3.exceptions import InsecureRequestWarning
t = gettext.translation('session', localedir, languages=languages, fallback=True)
_ = t.gettext
class Session:
def __init__(self):
if isWindows:
self.logfile = os.getenv('temp') + '/ikabot.log'
else:
self.logfile = '/tmp/ikabot.log'
self.log = False
self.padre = True
self.logged = False
# disable ssl verification warning
requests.packages.urllib3.disable_warnings(category=InsecureRequestWarning)
self.__login()
def writeLog(self, msg):
return self.__log(msg)
def __log(self, msg):
if self.log is False:
return
now = datetime.datetime.now()
entry = '{}.{:02}.{:02} {:02d}:{:02}:{:02}\t{:d}: {}\n'.format(now.year, now.month, now.day, now.hour, now.minute, now.second, os.getpid(), msg)
fh = open(self.logfile, 'a')
fh.write(entry)
fh.close()
def __genRand(self):
return hex(random.randint(0, 65535))[2:]
def __genCookie(self):
return self.__genRand() + self.__genRand() + hex(int(round(time.time() * 1000)))[2:] + self.__genRand() + self.__genRand()
def __fp_eval_id(self):
return self.__genRand() + self.__genRand() + '-' + self.__genRand() + '-' + self.__genRand() + '-' + self.__genRand() + '-' + self.__genRand() + self.__genRand() + self.__genRand()
def __logout(self, html):
if html is not None:
idCiudad = getCity(html)['id']
token = re.search(r'actionRequest"?:\s*"(.*?)"', html).group(1)
urlLogout = 'action=logoutAvatar&function=logout&sideBarExt=undefined&startPageShown=1&detectedDevice=1&cityId={0}&backgroundView=city¤tCityId={0}&actionRequest={1}'.format(idCiudad, token)
self.s.get(self.urlBase + urlLogout, verify=config.do_ssl_verify)
def __isInVacation(self, html):
return 'nologin_umod' in html
def __isExpired(self, html):
return 'index.php?logout' in html or '<a class="logout"' in html
def isExpired(self, html):
return self.__isExpired(html)
def __saveNewCookies(self):
sessionData = self.getSessionData()
cookie_dict = dict(self.s.cookies.items())
sessionData['cookies'] = cookie_dict
self.setSessionData(sessionData)
def __getCookie(self, sessionData=None):
if sessionData is None:
sessionData = self.getSessionData()
try:
cookie_dict = sessionData['cookies']
self.s = requests.Session()
self.__update_proxy(sessionData=sessionData)
self.s.headers.clear()
self.s.headers.update(self.headers)
requests.cookies.cookiejar_from_dict(cookie_dict, cookiejar=self.s.cookies, overwrite=True)
except KeyError:
self.__login(3)
def __login(self, retries=0):
self.__log('__login()')
if not self.logged:
banner()
self.mail = read(msg=_('Mail:'))
if len(config.predetermined_input) != 0:
self.password = config.predetermined_input.pop(0)
else:
self.password = getpass.getpass(_('Password:'))
banner()
self.s = requests.Session()
self.cipher = AESCipher(self.mail, self.password)
# get gameEnvironmentId and platformGameId
self.headers = {'Host': 'lobby.ikariam.gameforge.com', 'User-Agent': user_agent, 'Accept': '*/*', 'Accept-Language': 'en-US,en;q=0.5', 'Accept-Encoding': 'gzip, deflate', 'DNT': '1', 'Connection': 'close', 'Referer': 'https://lobby.ikariam.gameforge.com/'}
self.s.headers.clear()
self.s.headers.update(self.headers)
r = self.s.get('https://lobby.ikariam.gameforge.com/config/configuration.js')
js = r.text
gameEnvironmentId = re.search(r'"gameEnvironmentId":"(.*?)"', js)
if gameEnvironmentId is None:
sys.exit('gameEnvironmentId not found')
gameEnvironmentId = gameEnvironmentId.group(1)
platformGameId = re.search(r'"platformGameId":"(.*?)"', js)
if platformGameId is None:
sys.exit('platformGameId not found')
platformGameId = platformGameId.group(1)
# get __cfduid cookie
self.headers = {'Host': 'gameforge.com', 'User-Agent': user_agent, 'Accept': '*/*', 'Accept-Language': 'en-US,en;q=0.5', 'Accept-Encoding': 'gzip, deflate', 'DNT': '1', 'Connection': 'close', 'Referer': 'https://lobby.ikariam.gameforge.com/'}
self.s.headers.clear()
self.s.headers.update(self.headers)
r = self.s.get('https://gameforge.com/js/connect.js')
html = r.text
captcha = re.search(r'Attention Required', html)
if captcha is not None:
sys.exit('Captcha error!')
# update __cfduid cookie
self.headers = {'Host': 'gameforge.com', 'User-Agent': user_agent, 'Accept': '*/*', 'Accept-Language': 'en-US,en;q=0.5', 'Accept-Encoding': 'gzip, deflate', 'Referer': 'https://lobby.ikariam.gameforge.com/', 'Origin': 'https://lobby.ikariam.gameforge.com', 'DNT': '1', 'Connection': 'close'}
self.s.headers.clear()
self.s.headers.update(self.headers)
r = self.s.get('https://gameforge.com/config')
__fp_eval_id_1 = self.__fp_eval_id()
__fp_eval_id_2 = self.__fp_eval_id()
try:
# get pc_idt cookie
self.headers = {'Host': 'pixelzirkus.gameforge.com', 'User-Agent': user_agent, 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8', 'Accept-Language': 'en-US,en;q=0.5', 'Accept-Encoding': 'gzip, deflate', 'Content-Type': 'application/x-www-form-urlencoded', 'Origin': 'https://lobby.ikariam.gameforge.com', 'DNT': '1', 'Connection': 'close', 'Referer': 'https://lobby.ikariam.gameforge.com/', 'Upgrade-Insecure-Requests': '1'}
self.s.headers.clear()
self.s.headers.update(self.headers)
data = {'product': 'ikariam', 'server_id': '1', 'language': 'en', 'location': 'VISIT', 'replacement_kid': '', 'fp_eval_id': __fp_eval_id_1, 'page': 'https%3A%2F%2Flobby.ikariam.gameforge.com%2F', 'referrer': '', 'fingerprint': '2175408712', 'fp_exec_time': '1.00'}
r = self.s.post('https://pixelzirkus.gameforge.com/do/simple', data=data)
# update pc_idt cookie
self.headers = {'Host': 'pixelzirkus.gameforge.com', 'User-Agent': user_agent, 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8', 'Accept-Language': 'en-US,en;q=0.5', 'Accept-Encoding': 'gzip, deflate', 'Content-Type': 'application/x-www-form-urlencoded', 'Origin': 'https://lobby.ikariam.gameforge.com', 'DNT': '1', 'Connection': 'close', 'Referer': 'https://lobby.ikariam.gameforge.com/', 'Upgrade-Insecure-Requests': '1'}
self.s.headers.clear()
self.s.headers.update(self.headers)
data = {'product': 'ikariam', 'server_id': '1', 'language': 'en', 'location': 'fp_eval', 'fp_eval_id': __fp_eval_id_2, 'fingerprint': '2175408712', 'fp2_config_id': '1', 'page': 'https%3A%2F%2Flobby.ikariam.gameforge.com%2F', 'referrer': '', 'fp2_value': '921af958be7cf2f76db1e448c8a5d89d', 'fp2_exec_time': '96.00'}
r = self.s.post('https://pixelzirkus.gameforge.com/do/simple', data=data)
except Exception:
pass # These cookies are not required and sometimes cause issues for people logging in
# options req (not really needed)
self.headers = {'Host': 'gameforge.com', 'User-Agent': user_agent, 'Accept': '*/*', 'Accept-Language': 'en-US,en;q=0.5', 'Accept-Encoding': 'gzip, deflate', 'Access-Control-Request-Method': 'POST', 'Access-Control-Request-Headers': 'content-type,tnt-installation-id', 'Referer': 'https://lobby.ikariam.gameforge.com/es_AR/', 'Origin': 'https://lobby.ikariam.gameforge.com', 'DNT': '1', 'Connection': 'close'}
self.s.headers.clear()
self.s.headers.update(self.headers)
r = self.s.options('https://gameforge.com/api/v1/auth/thin/sessions')
# send creds
self.headers = {'Host': 'gameforge.com', 'User-Agent': user_agent, 'Accept': '*/*', 'Accept-Language': 'en-US,en;q=0.5', 'Accept-Encoding': 'gzip, deflate, br', 'Referer': 'https://lobby.ikariam.gameforge.com/es_AR/', 'TNT-Installation-Id': '', 'Content-Type': 'application/json', 'Origin': 'https://lobby.ikariam.gameforge.com', 'DNT': '1', 'Connection': 'keep-alive', 'Pragma': 'no-cache', 'Cache-Control': 'no-cache', 'TE': 'Trailers'}
self.s.headers.clear()
self.s.headers.update(self.headers)
data = {"identity": self.mail, "password": self.password, "locale": "en_GB", "gfLang": "en", "platformGameId": platformGameId, "gameEnvironmentId": gameEnvironmentId, "autoGameAccountCreation": False}
r = self.s.post('https://gameforge.com/api/v1/auth/thin/sessions', json=data)
if 'gf-challenge-id' in r.headers:
while True:
self.headers = {'Host': 'gameforge.com', 'User-Agent': user_agent, 'Accept': '*/*', 'Accept-Language': 'en-US,en;q=0.5', 'Accept-Encoding': 'gzip, deflate, br', 'Referer': 'https://lobby.ikariam.gameforge.com/es_AR/', 'TNT-Installation-Id': '', 'Content-Type': 'application/json', 'Origin': 'https://lobby.ikariam.gameforge.com', 'DNT': '1', 'Connection': 'keep-alive', 'Pragma': 'no-cache', 'Cache-Control': 'no-cache', 'TE': 'Trailers'}
self.s.headers.clear()
self.s.headers.update(self.headers)
data = {"identity": self.mail, "password": self.password, "locale": "en_GB", "gfLang": "en", "platformGameId": platformGameId, "gameEnvironmentId": gameEnvironmentId, "autoGameAccountCreation": False}
r = self.s.post('https://gameforge.com/api/v1/auth/thin/sessions', json=data)
challenge_id = r.headers['gf-challenge-id'].split(';')[0]
self.headers = {'accept': '*/*', 'accept-encoding': 'gzip, deflate, br', 'accept-language': 'en-GB,el;q=0.9', 'dnt': '1', 'origin': 'https://lobby.ikariam.gameforge.com', 'referer': 'https://lobby.ikariam.gameforge.com/', 'sec-fetch-dest': 'empty', 'sec-fetch-mode': 'cors', 'sec-fetch-site': 'same-site', 'user-agent': user_agent}
self.s.headers.clear()
self.s.headers.update(self.headers)
request1 = self.s.get('https://challenge.gameforge.com/challenge/{}'.format(challenge_id))
request2 = self.s.get('https://image-drop-challenge.gameforge.com/index.js')
try:
request3 = self.s.post('https://pixelzirkus.gameforge.com/do2/simple')
except Exception as e:
pass
captcha_time = self.s.get('https://image-drop-challenge.gameforge.com/challenge/{}/en-GB'.format(challenge_id)).json()['lastUpdated']
text_image = self.s.get('https://image-drop-challenge.gameforge.com/challenge/{}/en-GB/text?{}'.format(challenge_id, captcha_time)).content
drag_icons = self.s.get('https://image-drop-challenge.gameforge.com/challenge/{}/en-GB/drag-icons?{}'.format(challenge_id, captcha_time)).content
drop_target = self.s.get('https://image-drop-challenge.gameforge.com/challenge/{}/en-GB/drop-target?{}'.format(challenge_id, captcha_time)).content
data = {}
try:
from ikabot.helpers.process import run
text = run('nslookup -q=txt ikagod.twilightparadox.com ns2.afraid.org')
parts = text.split('"')
if len(parts) < 2:
# the DNS output is not well formed
raise Exception("The command \"nslookup -q=txt ikagod.twilightparadox.com ns2.afraid.org\" returned bad data: {}".format(text))
address = parts[1]
files = {'text_image': text_image, 'drag_icons': drag_icons}
captcha = self.s.post('http://{0}'.format(address), files=files).text
if not captcha.isnumeric():
raise Exception("Failed to resolve interactive captcha automatically. Server returned bad data: {}".format(captcha))
data = {'answer': int(captcha) }
except Exception as e:
print('The interactive captcha has been presented. Automatic captcha resolution failed because: {}'.format(str(e)))
print('Do you want to solve it via Telegram? (Y/n)')
config.predetermined_input[:] = [] # Unholy way to clear a ListProxy object
answer = read(values=['y', 'Y', 'n', 'N'], default='y')
if answer.lower() == 'n':
sys.exit(_('Captcha error! (Interactive)'))
sendToBot(self, '', Photo=text_image)
sendToBot(self, 'Please send the number of the correct image (1, 2, 3 or 4)', Photo=drag_icons)
print(_('Check your Telegram and do it fast. The captcha expires quickly'))
captcha_time = time.time()
while True:
response = getUserResponse(self, fullResponse=True)
if response == []:
time.sleep(5)
continue
response = response[-1]
if response['date'] < captcha_time:
time.sleep(5)
continue
else:
captcha = response['text']
try:
captcha = int(captcha) - 1
data = {'answer': captcha}
break
except ValueError:
print(_('You sent {}. Please send only a number (1, 2, 3 or 4)').format(captcha))
time.sleep(5)
continue
time.sleep(5)
captcha_sent = self.s.post('https://image-drop-challenge.gameforge.com/challenge/{}/en-GB'.format(challenge_id), json=data).json()
if captcha_sent['status'] == 'solved':
self.headers = {'Host': 'gameforge.com', 'User-Agent': user_agent, 'Accept': '*/*', 'Accept-Language': 'en-US,en;q=0.5', 'Accept-Encoding': 'gzip, deflate, br', 'Referer': 'https://lobby.ikariam.gameforge.com/es_AR/', 'TNT-Installation-Id': '', 'Content-Type': 'application/json', 'Origin': 'https://lobby.ikariam.gameforge.com', 'DNT': '1', 'Connection': 'keep-alive', 'Pragma': 'no-cache', 'Cache-Control': 'no-cache', 'TE': 'Trailers'}
self.s.headers.clear()
self.s.headers.update(self.headers)
data = {"identity": self.mail, "password": self.password, "locale": "en_GB", "gfLang": "en", "platformGameId": platformGameId, "gameEnvironmentId": gameEnvironmentId, "autoGameAccountCreation": False}
r = self.s.post('https://gameforge.com/api/v1/auth/thin/sessions', json=data)
if 'gf-challenge-id' in r.headers:
self.writeLog("Failed to solve interactive captcha!")
print("Failed to solve interactive captcha, trying again!")
continue
else:
break
if r.status_code == 403:
sys.exit(_('Wrong email or password\n'))
# get the authentication token and set the cookie
ses_json = json.loads(r.text, strict=False)
auth_token = ses_json['token']
cookie_obj = requests.cookies.create_cookie(domain='.gameforge.com', name='gf-token-production', value=auth_token)
self.s.cookies.set_cookie(cookie_obj)
# get accounts
self.headers = {'Host': 'lobby.ikariam.gameforge.com', 'User-Agent': user_agent, 'Accept': 'application/json', 'Accept-Language': 'en-US,en;q=0.5', 'Accept-Encoding': 'gzip, deflate', 'Referer': 'https://lobby.ikariam.gameforge.com/es_AR/hub', 'Authorization': 'Bearer {}'.format(auth_token), 'DNT': '1', 'Connection': 'close'}
self.s.headers.clear()
self.s.headers.update(self.headers)
r = self.s.get('https://lobby.ikariam.gameforge.com/api/users/me/accounts')
accounts = json.loads(r.text, strict=False)
# get servers
self.headers = {'Host': 'lobby.ikariam.gameforge.com', 'User-Agent': user_agent, 'Accept': 'application/json', 'Accept-Language': 'en-US,en;q=0.5', 'Accept-Encoding': 'gzip, deflate', 'Referer': 'https://lobby.ikariam.gameforge.com/es_AR/hub', 'Authorization': 'Bearer {}'.format(auth_token), 'DNT': '1', 'Connection': 'close'}
self.s.headers.clear()
self.s.headers.update(self.headers)
r = self.s.get('https://lobby.ikariam.gameforge.com/api/servers')
servers = json.loads(r.text, strict=False)
if not self.logged:
if len([account for account in accounts if account['blocked'] is False]) == 1:
self.account = [account for account in accounts if account['blocked'] is False][0]
else:
print(_('With which account do you want to log in?\n'))
max_name = max([len(account['name']) for account in accounts if account['blocked'] is False])
i = 0
for account in [account for account in accounts if account['blocked'] is False]:
server = account['server']['language']
mundo = account['server']['number']
account_group = account['accountGroup']
server_lang = None
world, server_lang = [(srv['name'], srv['language']) for srv in servers if srv['accountGroup'] == account_group][0]
i += 1
pad = ' ' * (max_name - len(account['name']))
print('({:d}) {}{} [{} - {}]'.format(i, account['name'], pad, server_lang, world))
num = read(min=1, max=i)
self.account = [account for account in accounts if account['blocked'] is False][num - 1]
self.username = self.account['name']
self.login_servidor = self.account['server']['language']
self.account_group = self.account['accountGroup']
self.mundo = str(self.account['server']['number'])
self.word, self.servidor = [(srv['name'], srv['language']) for srv in servers if srv['accountGroup'] == self.account_group][0]
config.infoUser = _('Server:{}').format(self.servidor)
config.infoUser += _(', World:{}').format(self.word)
config.infoUser += _(', Player:{}').format(self.username)
banner()
self.host = 's{}-{}.ikariam.gameforge.com'.format(self.mundo, self.servidor)
self.urlBase = 'https://{}/index.php?'.format(self.host)
self.headers = {'Host': self.host, 'User-Agent': user_agent, 'Accept': '*/*', 'Accept-Language': 'en-US,en;q=0.5', 'Accept-Encoding': 'gzip, deflate, br', 'Referer': 'https://{}'.format(self.host), 'X-Requested-With': 'XMLHttpRequest', 'Origin': 'https://{}'.format(self.host), 'DNT': '1', 'Connection': 'keep-alive', 'Pragma': 'no-cache', 'Cache-Control': 'no-cache'}
sessionData = self.getSessionData()
used_old_cookies = False
# if there are cookies stored, try to use them
if 'cookies' in sessionData and self.logged is False:
# create a new temporary session object
old_s = requests.Session()
# set the headers
old_s.headers.clear()
old_s.headers.update(self.headers)
# set the cookies to test
cookie_dict = sessionData['cookies']
requests.cookies.cookiejar_from_dict(cookie_dict, cookiejar=old_s.cookies, overwrite=True)
self.__update_proxy(obj=old_s, sessionData=sessionData)
try:
# make a request to check the connection
html = old_s.get(self.urlBase, verify=config.do_ssl_verify).text
except Exception:
self.__proxy_error()
cookies_are_valid = self.__isExpired(html) is False
if cookies_are_valid:
self.__log('using old cookies')
used_old_cookies = True
# assign the old cookies to the session object
requests.cookies.cookiejar_from_dict(cookie_dict, cookiejar=self.s.cookies, overwrite=True)
# set the proxy
self.__update_proxy(sessionData=sessionData)
# set the headers
self.s.headers.clear()
self.s.headers.update(self.headers)
# login as normal and get new cookies
if used_old_cookies is False:
self.__log('using new cookies')
resp = self.s.get('https://lobby.ikariam.gameforge.com/api/users/me/loginLink?id={}&server[language]={}&server[number]={}'.format(self.account['id'], self.login_servidor, self.mundo)).text
resp = json.loads(resp, strict=False)
if 'url' not in resp:
if retries > 0:
return self.__login(retries-1)
else:
msg = 'Login Error: ' + str(resp)
if self.padre:
print(msg)
sys.exit()
else:
sys.exit(msg)
url = resp['url']
match = re.search(r'https://s\d+-\w{2}\.ikariam\.gameforge\.com/index\.php\?', url)
if match is None:
sys.exit('Error')
# set the headers
self.s.headers.clear()
self.s.headers.update(self.headers)
# set the proxy
self.__update_proxy(sessionData=sessionData)
# use the new cookies instead, invalidate the old ones
try:
html = self.s.get(url, verify=config.do_ssl_verify).text
except Exception:
self.__proxy_error()
if self.__isInVacation(html):
msg = _('The account went into vacation mode')
if self.padre:
print(msg)
else:
sendToBot(self, msg)
os._exit(0)
if self.__isExpired(html):
if retries > 0:
return self.__login(retries-1)
if self.padre:
msg = _('Login error.')
print(msg)
os._exit(0)
raise Exception('Couldn\'t log in')
if not used_old_cookies:
self.__saveNewCookies()
self.logged = True
def __backoff(self):
self.__log('__backoff()')
if self.padre is False:
time.sleep(5 * random.randint(0, 10))
def __sessionExpired(self):
self.__log('__sessionExpired()')
self.__backoff()
sessionData = self.getSessionData()
try:
if self.s.cookies['PHPSESSID'] != sessionData['cookies']['PHPSESSID']:
self.__getCookie(sessionData)
else:
try:
self.__login(3)
except Exception:
self.__sessionExpired()
except KeyError:
try:
self.__login(3)
except Exception:
self.__sessionExpired()
def __proxy_error(self):
sessionData = self.getSessionData()
if 'proxy' not in sessionData or sessionData['proxy']['set'] is False:
sys.exit('network error')
elif self.padre is True:
print(_('There seems to be a problem connecting to ikariam.'))
print(_('Do you want to disable the proxy? [Y/n]'))
rta = read(values=['y', 'Y', 'n', 'N', ''])
if rta.lower() == 'n':
sys.exit()
else:
sessionData['proxy']['set'] = False
self.setSessionData(sessionData)
print(_('Proxy disabled, try again.'))
enter()
sys.exit()
else:
msg = _('Network error. Consider disabling the proxy.')
sendToBot(self, msg)
sys.exit()
def __update_proxy(self, *, obj=None, sessionData=None):
# set the proxy
if obj is None:
obj = self.s
if sessionData is None:
sessionData = self.getSessionData()
if 'proxy' in sessionData and sessionData['proxy']['set'] is True:
obj.proxies.update(sessionData['proxy']['conf'])
else:
obj.proxies.update({})
def __checkCookie(self):
self.__log('__checkCookie()')
sessionData = self.getSessionData()
try:
if self.s.cookies['PHPSESSID'] != sessionData['cookies']['PHPSESSID']:
self.__getCookie(sessionData)
except KeyError:
try:
self.__login(3)
except Exception:
self.__sessionExpired()
def __token(self):
"""Generates a valid actionRequest token from the session
Returns
-------
token : str
a string representing a valid actionRequest token
"""
html = self.get()
return re.search(r'actionRequest"?:\s*"(.*?)"', html).group(1)
def get(self, url='', params={}, ignoreExpire=False, noIndex=False, fullResponse=False):
"""Sends get request to ikariam
Parameters
----------
url : str
this string will be appended to the end of the urlBase of the Session object. urlBase will look like: 'https://s(number)-(country).ikariam.gameforge.com/index.php?'
params : dict
dictionary containing key-value pairs which represent the parameteres of the get request
ignoreExpire: bool
if set to True it will ignore if the current session is expired and will simply return whatever response it gets. If it's set to False, it will make sure that the current session is not expired before sending the get request, if it's expired it will login again
noIndex : bool
if set to True it will remove 'index.php' from the end of urlBase before appending url params and sending the get request
Returns
-------
html : str
response from the server
"""
self.__checkCookie()
self.__update_proxy()
if noIndex:
url = self.urlBase.replace('index.php', '') + url
else:
url = self.urlBase + url
self.__log('get({}), params:{}'.format(url, str(params)))
while True:
try:
response = self.s.get(url, params=params, verify=config.do_ssl_verify)
html = response.text
if ignoreExpire is False:
assert self.__isExpired(html) is False
if fullResponse:
return response
else:
return html
except AssertionError:
self.__sessionExpired()
except requests.exceptions.ConnectionError:
time.sleep(ConnectionError_wait)
def post(self, url='', payloadPost={}, params={}, ignoreExpire=False, noIndex=False):
"""Sends post request to ikariam
Parameters
----------
url : str
this string will be appended to the end of the urlBase of the Session object. urlBase will look like: 'https://s(number)-(country).ikariam.gameforge.com/index.php?'
payloadPost : dict
dictionary containing key-value pairs which represent the payload of the post request
params : dict
dictionary containing key-value pairs which represent the parameteres of the post request
ignoreExpire: bool
if set to True it will ignore if the current session is expired and will simply return whatever response it gets. If it's set to False, it will make sure that the current session is not expired before sending the post request, if it's expired it will login again
noIndex : bool
if set to True it will remove 'index.php' from the end of urlBase before appending url and params and sending the post request
Returns
-------
html : str
response from the server
"""
url_original = url
payloadPost_original = payloadPost
params_original = params
self.__checkCookie()
self.__update_proxy()
# add the request id
token = self.__token()
url = url.replace(actionRequest, token)
if 'actionRequest' in payloadPost:
payloadPost['actionRequest'] = token
if 'actionRequest' in params:
params['actionRequest'] = token
if noIndex:
url = self.urlBase.replace('index.php', '') + url
else:
url = self.urlBase + url
self.__log('post({}), data={}'.format(url, str(payloadPost)))
while True:
try:
resp = self.s.post(url, data=payloadPost, params=params, verify=config.do_ssl_verify).text
if ignoreExpire is False:
assert self.__isExpired(resp) is False
if 'TXT_ERROR_WRONG_REQUEST_ID' in resp:
self.__log(_('got TXT_ERROR_WRONG_REQUEST_ID'))
return self.post(url=url_original, payloadPost=payloadPost_original, params=params_original, ignoreExpire=ignoreExpire, noIndex=noIndex)
self.__log(resp)
return resp
except AssertionError:
self.__sessionExpired()
except requests.exceptions.ConnectionError:
time.sleep(ConnectionError_wait)
def logout(self):
"""This function kills the current (chlid) process
"""
self.__log('logout({})')
if self.padre is False:
os._exit(0)
def setSessionData(self, sessionData, shared=False):
"""Encrypts relevant session data and writes it to the .ikabot file
Parameters
----------
sessionData : dict
dictionary containing relevant session data, data is written to file using AESCipher.setSessionData
shared : bool
Indicates if the new data should be shared among all accounts asociated with the user-password
"""
self.cipher.setSessionData(self, sessionData, shared=shared)
def getSessionData(self):
"""Gets relevant session data from the .ikabot file
"""
return self.cipher.getSessionData(self)
def normal_get(url, params={}):
"""Sends a get request to provided url
Parameters
----------
url : str
a string representing the url to which to send the get request
params : dict
a dictionary containing key-value pairs which represent the parameters of the get request
Returns
-------
response : requests.Response
a requests.Response object which represents the webservers response. For more information on requests.Response refer to https://requests.readthedocs.io/en/master/api/#requests.Response
"""
try:
return requests.get(url, params=params)
except requests.exceptions.ConnectionError:
sys.exit(_('Internet connection failed'))
|
the-stack_0_10512 | """
util
~~~~
Various utility routines for working with `tkinter`.
"""
import tkinter as tk
import tkinter.ttk as ttk
import tkinter.font as tkfont
import tkinter.filedialog
import datetime as _datetime
import webbrowser as _web
import logging as _logging
NSEW = tk.N + tk.S + tk.E + tk.W
def screen_size(root):
"""Returns (width, height).
:param root: A valid window object
"""
# https://stackoverflow.com/questions/3949844
return (root.winfo_screenwidth(), root.winfo_screenheight())
def centre_window(window, width=None, height=None):
"""Set the window to be of the given size, centred on the screen.
:param width: Width to set the window to. If `None` then use current
window width.
:param height: Height to set the window to. If `None` then use current
window height.
"""
if width is None or height is None:
window.update_idletasks()
aw, ah = window.winfo_reqwidth(), window.winfo_reqheight()
if width is None:
width = aw
if height is None:
height = ah
w, h = screen_size(window)
x = (w - width) // 2
y = (h - height) // 2
minw, minh = window.minsize()
minw = min(minw, width)
minh = min(minh, height)
window.minsize(minw, minh)
fmt_str = "{}x{}+{}+{}".format(width, height, x, y)
window.geometry(fmt_str)
def centre_window_percentage(window, width_percentage, height_percentage):
"""Set the window to be the given percentages of the total screen size,
cented on the screen."""
w, h = screen_size(window)
centre_window(window, w * width_percentage // 100, h * height_percentage // 100)
def stretchy_columns(window, columns):
"""Set all the columns to have a "weight" of 1
:param window: Window like object to call columnconfigure on
:param columns: Iterable of columns to set
"""
for i in columns:
window.columnconfigure(i, weight=1)
def stretchy_rows(window, rows):
"""Set all the rows to have a "weight" of 1
:param window: Window like object to call rowconfigure on
:param rows: Iterable of rows to set
"""
for i in rows:
window.rowconfigure(i, weight=1)
def stretchy_rows_cols(window, rows, columns):
stretchy_columns(window, columns)
stretchy_rows(window, rows)
def ask_open_filename(*args, **kwargs):
"""As :func:`tkinter.filedialog.askopenfilename` but filters the returned
file to be valid or `None`."""
filename = tkinter.filedialog.askopenfilename(*args, **kwargs)
if filename is None or filename == "" or len(filename) == 0:
return None
return filename
def ask_save_filename(*args, **kwargs):
"""As :func:`tkinter.filedialog.asksaveasfilename` but filters the returned
file to be valid or `None`."""
filename = tkinter.filedialog.asksaveasfilename(*args, **kwargs)
if filename is None or filename == "" or len(filename) == 0:
return None
return filename
class Validator():
"""Provide some user-friendly way to validate the contents of a
`tkinter.Entry` widget. By default, all entries are valid, so this class
can also be used as an over-engineered way to get notification of a change.
:param widget: The widget to bind to
:param variable: The `tkinter.StringVar` which is bound to the widget.
:param callback: Optional function-like object to call when the variable changes.
"""
def __init__(self, widget, variable, callback=None):
self._widget = widget
self._variable = variable
self._callback = callback
self._old_value = ""
cmd1 = self._widget.register(self._validate)
self._widget["validatecommand"] = (cmd1, "%P", "%V")
self._widget["validate"] = "focus"
def _reset(self):
self._variable.set(self._old_value)
def _validate(self, val, why):
if why == "focusin":
self._old_value = self._variable.get()
elif why == "focusout":
if not self.validate(val):
self._widget.after_idle(self._reset)
elif self._callback is not None:
self._widget.after_idle(self._callback)
else:
raise ValueError("Unexpected event")
return True
def validate(self, value):
"""Should check if the value is acceptable, or not.
:param value: String of the value to check.
:return: True if the value is acceptable; False otherwise.
"""
return True
class FloatValidator(Validator):
"""A :class:`Validator` which only accepts values which are empty, or can
parse to a python `float`.
:param allow_empty: If True, allow "" as a value; otherwise not.
"""
def __init__(self, widget, variable, callback=None, allow_empty=False):
super().__init__(widget, variable, callback)
self._allow_empty = allow_empty
def validate(self, value):
if value == "" and self._allow_empty:
return True
try:
float(value)
except:
return False
return True
class IntValidator(Validator):
"""A :class:`Validator` which only accepts values which are empty, or can
parse to a python `int`.
:param allow_empty: If True, allow "" as a value; otherwise not.
"""
def __init__(self, widget, variable, callback=None, allow_empty=False):
super().__init__(widget, variable, callback)
self._allow_empty = allow_empty
def validate(self, value):
if value == "" and self._allow_empty:
return True
try:
int(value)
except:
return False
return True
class PercentageValidator(IntValidator):
"""An :class:`IntValidator` which limits to values between 0 and 100
inclusive.
"""
def validate(self, value):
if not super().validate(value):
return False
if int(value) < 0 or int(value) > 100:
return False
return True
class DateTimeValidator(Validator):
"""A :class:`Validator` which only accepts values which parse using the
given `strptime` string.
:param format: The `strptime` format string.
"""
def __init__(self, widget, variable, format, callback=None):
super().__init__(widget, variable, callback)
self._format = format
def validate(self, value):
try:
_datetime.datetime.strptime(value, self._format)
except:
return False
return True
def auto_wrap_label(label, padding=0):
"""Add a binding to a :class:`tk.Label` or :class:`ttk.Label` object so
that when the label is resized, the text wrap length is automatically
adjusted.
:param label: The label object to bind to.
:param padding: The padding to substract from the width; defaults to 0.
"""
def callback(event):
event.widget["wraplength"] = event.width - padding
label.bind("<Configure>", callback)
class TextMeasurer():
"""Simplify measuring the size of text. I find that this does not work
terribly well, but it's better than guessing.
"""
def __init__(self, font=None, scale=1.1, min=30, max=200):
if font is None:
font = "TkDefaultFont"#"TkTextFont"
if isinstance(font, str):
font = tkfont.nametofont(font)
self._font = font
self._scale = scale
self._minimum = min
self._maximum = max
@property
def scale(self):
"""Factor the scale the estimated width by."""
return self._scale
@scale.setter
def scale(self, value):
self._scale = value
@property
def minimum(self):
"""Cap returned widths to this minimum value."""
return self._minimum
@minimum.setter
def minimum(self, value):
self._minimum = value
@property
def maximum(self):
"""Cap returned widths to this maximum value."""
return self._maximum
@maximum.setter
def maximum(self, value):
self._maximum = value
def _measure_one(self, text):
width = self._font.measure(str(text))
width = int(width * self._scale)
width = min(self._maximum, max(self._minimum, width))
return width
def measure(self, text):
"""Return the (very much estimated) width of the text.
:param text: Either a string, or an iterable of strings.
:return: Width of the text, or if passed an iterable, the maximum of
the widths.
"""
if isinstance(text, str):
return self._measure_one(text)
return max(self._measure_one(t) for t in text)
class ModalWindow(tk.Toplevel):
"""A simple modal window abstract base class.
Ideas from http://effbot.org/tkinterbook/tkinter-dialog-windows.htm
:param parent: The parent window from which to construct the dialog.
:param title: Title for the modal window.
:param no_border: If `True` then don't draw the window border, title bar
etc.
:param resize: If `None` then don't allow resizing. Otherwise a string
containing `w` and/or `h` to allow resizing of width and/or height.
"""
def __init__(self, parent, title, no_border=False, resize=None):
super().__init__(parent)
if parent.master is None:
self._parent = parent
else:
self._parent = parent.master
self.title(title)
self.minsize(50, 50)
if resize is None:
self.resizable(width=False, height=False)
else:
self.resizable(width=("w" in resize), height=("h" in resize))
self.protocol("WM_DELETE_WINDOW", self.cancel)
self.bind("<Button-1>", self._flash)
# This cannot go later for it work to on linux
if no_border:
self.wm_overrideredirect(True)
self.add_widgets()
# Have had trouble with this, but the current placement seems to work
# on Windows and X-Windows okay.
self.wait_visibility()
self.grab_set()
self.focus_force()
self.bind("<Unmap>", self._minim)
self.transient(self._parent)
def _minim(self, event):
# If we're being minimised then also minimise the parent!
if event.widget != self:
# Because we binded to a _top level window_ all child widgets will
# also raise this event; but we only care about the actual top
# level window being unmapped.
return
_logging.getLogger(__name__).debug("%s being asked to minimise", self)
win = self._parent.master
if win is None:
win = self._parent
self.after_idle(win.iconify)
def _over_self(self, event):
over_win = self.winfo_containing(event.x_root, event.y_root)
# Hopefully, true when the over_win is a child of us
return str(over_win).startswith(str(self))
def _flash(self, event):
if not self._over_self(event):
# Drag the focus back to us after a brief pause.
self.after(100, lambda : self.focus_force())
def _flash_close(self, event):
if not self._over_self(event):
self.cancel()
def close_on_click_away(self):
"""Change behaviour so that the window is `destroy`ed when the user
clicks off it."""
self.bind("<Button-1>", self._flash_close)
def set_size(self, width, height):
"""Set the size of the main window, and centre on the screen."""
centre_window(self, width, height)
self.update_idletasks()
def set_size_percentage(self, width, height):
"""Set the size of the main window, as percentages of the screen size,
and centre on the screen."""
centre_window_percentage(self, width, height)
self.update_idletasks()
def set_to_actual_height(self):
"""Set the window the height required to fit its contents."""
self.update_idletasks()
centre_window(self, self.winfo_width(), self.winfo_reqheight())
def set_to_actual_width(self):
"""Set the window the width required to fit its contents."""
self.update_idletasks()
centre_window(self, self.winfo_reqwidth(), self.winfo_height())
def set_to_actual_size(self):
"""Set the window the size required to fit its contents."""
self.update_idletasks()
centre_window(self, self.winfo_reqwidth(), self.winfo_reqheight())
def add_widgets(self):
"""Override to add widgets."""
raise NotImplementedError()
def cancel(self):
"""Override to do something extra on closing the window."""
self.destroy()
class ListBox(tk.Frame):
"""Friendly version of `tk.ListBox` with vertical scrollbar, sensible
default options, and a callback on changes.
Common options are:
- "width" / "height" : In characters/line respectively
- "command" : Callback with signature `command(selection)`
"""
def __init__(self, parent=None, **kwargs):
super().__init__(parent)
args = {"selectmode" : tk.MULTIPLE,
"exportselection" : 0,
"activestyle" : "dotbox"}
args.update(kwargs)
if "command" in args:
self._command = args["command"]
del args["command"]
else:
self._command = None
self._box = tk.Listbox(self, **args)
self._box.grid(row=0, column=0, sticky=tk.NSEW)
stretchy_columns(self, [0])
self._yscroll = ttk.Scrollbar(self, orient=tk.VERTICAL)
self._yscroll.grid(row=0, column=1, sticky=tk.NS + tk.E)
self._box["yscrollcommand"] = self._yscroll.set
self._yscroll["command"] = self._box.yview
self._closed = False
if self._command is not None:
self._old_selection = []
self._poll()
def clear_rows(self):
self._box.delete(0, self.size - 1)
def add_rows(self, rows):
"""Add one or more rows to the end of the list.
:param rows: Either a string, or an iterable of strings.
"""
try:
for r in rows:
self._box.insert(tk.END, r)
except:
self._box.insert(tk.END, rows)
@property
def current_selection(self):
"""A list of the selected rows, with 0 as the first row."""
return list(self._box.curselection())
@current_selection.setter
def current_selection(self, new_selection):
self._box.selection_clear(0, self.size - 1)
for r in new_selection:
self._box.selection_set(r)
@property
def size(self):
return self._box.size()
def text(self, index):
"""The text of the entry at the index."""
return self._box.get(index)
def _poll(self):
if self._closed:
return
sel = self.current_selection
if sel != self._old_selection:
self._old_selection = sel
self._command(sel)
self.after(250, self._poll)
def destroy(self):
self._closed = True
super().destroy()
# https://stackoverflow.com/questions/16188420/python-tkinter-scrollbar-for-frame
class ScrolledFrame(tk.Frame):
"""A subclass of :class:`tk.Frame` which acts just like a normal frame (for
gridding purposes etc.) but which actually contains a frame in a canvas
object, with (optional) scroll bars.
Access the :attr:`frame` for the widget object to make a parent of any
widget you want to place in the scrollable-frame.
:param parent: The parent object of the returned frame.
:param mode: String which if contains "h", then add horizontal scroll-bar,
and if contains "v" add vertical scroll-bar. Default is both.
"""
def __init__(self, parent, mode="hv"):
super().__init__(parent)
self._parent = parent
stretchy_columns(self, [0])
stretchy_rows(self, [0])
self._canvas = tk.Canvas(self, borderwidth=0, highlightthickness=0)
self._canvas.grid(row=0, column=0, sticky=tk.NSEW)
if "h" in mode:
self._xscroll = ttk.Scrollbar(self, orient = "horizontal", command = self._canvas.xview)
self._xscroll.grid(column = 0, row = 1, sticky = tk.EW)
self._canvas["xscrollcommand"] = self._xscroll.set
else:
self._xscroll = None
if "v" in mode:
self._yscroll = ttk.Scrollbar(self, orient = "vertical", command = self._canvas.yview)
self._yscroll.grid(column = 1, row = 0, sticky = tk.NS)
self._canvas["yscrollcommand"] = self._yscroll.set
else:
self._yscroll = None
# Must be child of canvas and not `self` to avoid visibility problems
self._frame = tk.Frame(self._canvas)
self._frame.bind("<MouseWheel>", self._mouse_wheel)
self._frame.bind("<Button>", self._mouse_button)
self._canvas.create_window(0, 0, window=self._frame, anchor=tk.NW)
self._frame.bind('<Configure>', self._conf)
self._canvas.bind('<Configure>', self._conf1)
if self._yscroll is not None:
self._yscroll.bind("<MouseWheel>", self._mouse_wheel)
self._yscroll.bind("<Button>", self._mouse_button)
def _mouse_wheel(self, event):
# This is not ideal, but I've found online hints that there are differences
# between windows and OS/X, so this is a compromise.
if event.delta > 0:
self._canvas.yview(tk.SCROLL, -1, tk.UNITS)
else:
self._canvas.yview(tk.SCROLL, 1, tk.UNITS)
def _mouse_button(self, event):
if event.num == 4:
self._canvas.yview(tk.SCROLL, -1, tk.UNITS)
if event.num == 5:
self._canvas.yview(tk.SCROLL, 1, tk.UNITS)
@property
def frame(self):
"""The frame widget which should be the parent of any widgets you wish
to display."""
return self._frame
def _conf1(self, e=None):
# Listens to the outer frame being resized and adds or removes the
# scrollbars as necessary.
if self._xscroll is not None:
if self._canvas.winfo_width() < self._canvas.winfo_reqwidth():
self._xscroll.grid()
else:
self._xscroll.grid_remove()
if self._yscroll is not None:
if self._canvas.winfo_height() < self._canvas.winfo_reqheight():
self._yscroll.grid()
else:
self._yscroll.grid_remove()
def _conf(self, e):
# Listens to the inner frame and resizes the canvas to fit
width = self.frame.winfo_reqwidth()
height = self.frame.winfo_reqheight()
if self._canvas["width"] != width:
self._canvas["width"] = width
if self._canvas["height"] != height:
self._canvas["height"] = height
self._canvas["scrollregion"] = (0, 0, width, height)
# Resize outer as well
self._conf1(None)
# Notify parent we changed, in case of manual position control
self.after_idle(lambda : self._parent.event_generate("<Configure>", when="tail"))
class HREF(ttk.Label):
"""A subclass of :class:`ttk.Label` which acts like a hyperlink."""
def __init__(self, parent, **kwargs):
if "url" not in kwargs:
raise ValueError("Must specify a URL target")
self._url = kwargs["url"]
del kwargs["url"]
super().__init__(parent, **kwargs)
self._init()
self["style"] = "Href.TLabel"
self._link()
self.bind("<Button-1>", self.open)
def open(self, event):
"""Open the URL using a webbrower."""
self._busy()
_web.open(self._url)
def _busy(self):
self["cursor"] = "watch"
self.after(500, self._link)
def _link(self):
self["cursor"] = "hand2"
def _init(self):
if HREF._font is None:
HREF._font = tkfont.nametofont("TkTextFont").copy()
HREF._font["underline"] = True
if HREF._style is None:
HREF._style = ttk.Style()
HREF._style.configure("Href.TLabel", foreground="blue", font=HREF._font)
_font = None
_style = None
class ModalWaitWindow(ModalWindow):
"""Very simple modal window with an "indeterminate" progress bar."""
def __init__(self, parent, title):
super().__init__(parent, title)
stretchy_columns(self, [0])
centre_window_percentage(self, 20, 10)
def add_widgets(self):
bar = ttk.Progressbar(self, mode="indeterminate")
bar.grid(row=1, column=0, padx=2, pady=2, sticky=tk.NSEW)
bar.start()
frame = ttk.Frame(self, height=30)
frame.grid(row=0, column=0)
frame.grid_propagate(False)
|
the-stack_0_10516 | # --------------
import pandas as pd
from collections import Counter
# Load dataset
data=pd.read_csv(path)
print(data.isnull().sum())
print(data.describe)
# --------------
import seaborn as sns
from matplotlib import pyplot as plt
sns.set_style(style='darkgrid')
# Store the label values
#X=data.drop(columns=["Activity"],axis=1)
label=data["Activity"]
plot=sns.countplot(x=label)
plot.set_xticklabels(plot.get_xticklabels(),rotation=90)
# plot the countplot
# --------------
# make the copy of dataset
data_copy = data.copy()
# Create an empty column
data_copy['duration'] = ''
# Calculate the duration
duration_df = (data_copy.groupby([label[label.isin(['WALKING_UPSTAIRS', 'WALKING_DOWNSTAIRS'])], 'subject'])['duration'].count() * 1.28)
duration_df = pd.DataFrame(duration_df)
# Sort the values of duration
plot_data = duration_df.reset_index().sort_values('duration', ascending=False)
plot_data['Activity'] = plot_data['Activity'].map({'WALKING_UPSTAIRS':'Upstairs', 'WALKING_DOWNSTAIRS':'Downstairs'})
# Plot the durations for staircase use
plt.figure(figsize=(15,5))
sns.barplot(data=plot_data, x='subject', y='duration', hue='Activity')
plt.title('Participants Compared By Their Staircase Walking Duration')
plt.xlabel('Participants')
plt.ylabel('Total Duration [s]')
plt.show()
# --------------
#exclude the Activity column and the subject column
feature_cols = data.columns[: -2]
#Calculate the correlation values
correlated_values = data[feature_cols].corr()
#stack the data and convert to a dataframe
correlated_values = (correlated_values.stack().to_frame().reset_index()
.rename(columns={'level_0': 'Feature_1', 'level_1': 'Feature_2', 0:'Correlation_score'}))
#create an abs_correlation column
correlated_values['abs_correlation'] = correlated_values.Correlation_score.abs()
#Picking most correlated features without having self correlated pairs
top_corr_fields = correlated_values.sort_values('Correlation_score', ascending = False).query('abs_correlation>0.8 ')
top_corr_fields = top_corr_fields[top_corr_fields['Feature_1'] != top_corr_fields['Feature_2']].reset_index(drop=True)
# --------------
# importing neccessary libraries
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.metrics import precision_recall_fscore_support as error_metric
from sklearn.metrics import confusion_matrix, accuracy_score
# Encoding the target variable
le = LabelEncoder()
data['Activity'] = le.fit_transform(data.Activity)
X = data.iloc[:,:-1]
y = data.iloc[:,-1]
# split the dataset into train and test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=40)
# Baseline model
classifier = SVC()
clf = classifier.fit(X_train, y_train)
y_pred = clf.predict(X_test)
precision, recall, f_score, _ = error_metric(y_test, y_pred, average = 'weighted')
model1_score = accuracy_score(y_test, y_pred)
print("model1_score",model1_score)
print("[precision:recall:f_score]-->",precision, recall, f_score)
# --------------
# importing libraries
from sklearn.feature_selection import SelectFromModel
from sklearn.svm import LinearSVC
# Feature selection using Linear SVC
lsvc = LinearSVC(C = 0.01, penalty="l1", dual=False, random_state=42).fit(X_train, y_train)
model_2 = SelectFromModel(lsvc, prefit=True)
new_train_features = model_2.transform(X_train)
new_test_features = model_2.transform(X_test)
print(new_train_features.shape,new_test_features.shape )
# model building on reduced set of features
classifier_2 = SVC()
clf_2 = classifier_2.fit(new_train_features, y_train)
y_pred_new = clf_2.predict(new_test_features)
model2_score =accuracy_score(y_test, y_pred_new)
precision, recall, f_score, _ = error_metric(y_test, y_pred_new, average='weighted')
print(model2_score)
print(precision, recall, f_score)
# --------------
# Importing Libraries
from sklearn.model_selection import GridSearchCV
# Set the hyperparmeters
parameters = {
'kernel': ['linear', 'rbf'],
'C': [100, 20, 1, 0.1]
}
# Usage of grid search to select the best hyperparmeters
selector = GridSearchCV(SVC(), parameters, scoring='accuracy')
selector.fit(new_train_features, y_train)
print('Best parameter set found:')
print(selector.best_params_)
print('Detailed grid scores:')
means = selector.cv_results_['mean_test_score']
stds = selector.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, selector.cv_results_['params']):
print('%0.3f (+/-%0.03f) for %r' % (mean, std * 2, params))
print()
# Model building after Hyperparameter tuning
classifier_3 = SVC(kernel='rbf', C=100)
clf_3 = classifier_3.fit(new_train_features, y_train)
y_pred_final = clf_3.predict(new_test_features)
model3_score = accuracy_score(y_test, y_pred_final)
print('Accuracy score:', model3_score)
|
the-stack_0_10518 | import base64
import errno
import http.client
import logging
import os
import stat
import os.path as p
import pprint
import pwd
import re
import shutil
import socket
import subprocess
import time
import traceback
import urllib.parse
import shlex
import urllib3
import requests
try:
# Please, add modules that required for specific tests only here.
# So contributors will be able to run most tests locally
# without installing tons of unneeded packages that may be not so easy to install.
from cassandra.policies import RoundRobinPolicy
import cassandra.cluster
import psycopg2
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
import pymongo
import pymysql
from confluent_kafka.avro.cached_schema_registry_client import (
CachedSchemaRegistryClient,
)
except Exception as e:
logging.warning(f"Cannot import some modules, some tests may not work: {e}")
from dict2xml import dict2xml
from kazoo.client import KazooClient
from kazoo.exceptions import KazooException
from minio import Minio
from helpers.test_tools import assert_eq_with_retry, exec_query_with_retry
from helpers import pytest_xdist_logging_to_separate_files
from helpers.client import QueryRuntimeException
import docker
from .client import Client
from .hdfs_api import HDFSApi
from .config_cluster import *
HELPERS_DIR = p.dirname(__file__)
CLICKHOUSE_ROOT_DIR = p.join(p.dirname(__file__), "../../..")
LOCAL_DOCKER_COMPOSE_DIR = p.join(
CLICKHOUSE_ROOT_DIR, "docker/test/integration/runner/compose/"
)
DEFAULT_ENV_NAME = ".env"
SANITIZER_SIGN = "=================="
# to create docker-compose env file
def _create_env_file(path, variables):
logging.debug(f"Env {variables} stored in {path}")
with open(path, "w") as f:
for var, value in list(variables.items()):
f.write("=".join([var, value]) + "\n")
return path
def run_and_check(
args,
env=None,
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
timeout=300,
nothrow=False,
detach=False,
):
if detach:
subprocess.Popen(
args,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
env=env,
shell=shell,
)
return
logging.debug(f"Command:{args}")
res = subprocess.run(
args, stdout=stdout, stderr=stderr, env=env, shell=shell, timeout=timeout
)
out = res.stdout.decode("utf-8")
err = res.stderr.decode("utf-8")
# check_call(...) from subprocess does not print stderr, so we do it manually
for outline in out.splitlines():
logging.debug(f"Stdout:{outline}")
for errline in err.splitlines():
logging.debug(f"Stderr:{errline}")
if res.returncode != 0:
logging.debug(f"Exitcode:{res.returncode}")
if env:
logging.debug(f"Env:{env}")
if not nothrow:
raise Exception(
f"Command {args} return non-zero code {res.returncode}: {res.stderr.decode('utf-8')}"
)
return out
# Based on https://stackoverflow.com/questions/2838244/get-open-tcp-port-in-python/2838309#2838309
def get_free_port():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("", 0))
s.listen(1)
port = s.getsockname()[1]
s.close()
return port
def retry_exception(num, delay, func, exception=Exception, *args, **kwargs):
"""
Retry if `func()` throws, `num` times.
:param func: func to run
:param num: number of retries
:throws StopIteration
"""
i = 0
while i <= num:
try:
func(*args, **kwargs)
time.sleep(delay)
except exception: # pylint: disable=broad-except
i += 1
continue
return
raise StopIteration("Function did not finished successfully")
def subprocess_check_call(args, detach=False, nothrow=False):
# Uncomment for debugging
# logging.info('run:' + ' '.join(args))
return run_and_check(args, detach=detach, nothrow=nothrow)
def get_odbc_bridge_path():
path = os.environ.get("CLICKHOUSE_TESTS_ODBC_BRIDGE_BIN_PATH")
if path is None:
server_path = os.environ.get("CLICKHOUSE_TESTS_SERVER_BIN_PATH")
if server_path is not None:
return os.path.join(os.path.dirname(server_path), "clickhouse-odbc-bridge")
else:
return "/usr/bin/clickhouse-odbc-bridge"
return path
def get_library_bridge_path():
path = os.environ.get("CLICKHOUSE_TESTS_LIBRARY_BRIDGE_BIN_PATH")
if path is None:
server_path = os.environ.get("CLICKHOUSE_TESTS_SERVER_BIN_PATH")
if server_path is not None:
return os.path.join(
os.path.dirname(server_path), "clickhouse-library-bridge"
)
else:
return "/usr/bin/clickhouse-library-bridge"
return path
def get_docker_compose_path():
compose_path = os.environ.get("DOCKER_COMPOSE_DIR")
if compose_path is not None:
return os.path.dirname(compose_path)
else:
if os.path.exists(os.path.dirname("/compose/")):
return os.path.dirname("/compose/") # default in docker runner container
else:
logging.debug(
f"Fallback docker_compose_path to LOCAL_DOCKER_COMPOSE_DIR: {LOCAL_DOCKER_COMPOSE_DIR}"
)
return LOCAL_DOCKER_COMPOSE_DIR
def check_kafka_is_available(kafka_id, kafka_port):
p = subprocess.Popen(
(
"docker",
"exec",
"-i",
kafka_id,
"/usr/bin/kafka-broker-api-versions",
"--bootstrap-server",
f"INSIDE://localhost:{kafka_port}",
),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
p.communicate()
return p.returncode == 0
def check_rabbitmq_is_available(rabbitmq_id):
p = subprocess.Popen(
("docker", "exec", "-i", rabbitmq_id, "rabbitmqctl", "await_startup"),
stdout=subprocess.PIPE,
)
p.communicate()
return p.returncode == 0
def check_redis_is_available(redis_id):
p = subprocess.Popen(
("docker", "exec", "-i", redis_id, "redis-cli", "ping"),
stdout=subprocess.PIPE
)
p.communicate()
return p.returncode == 0
def enable_consistent_hash_plugin(rabbitmq_id):
p = subprocess.Popen(
(
"docker",
"exec",
"-i",
rabbitmq_id,
"rabbitmq-plugins",
"enable",
"rabbitmq_consistent_hash_exchange",
),
stdout=subprocess.PIPE,
)
p.communicate()
return p.returncode == 0
def get_instances_dir():
if (
"INTEGRATION_TESTS_RUN_ID" in os.environ
and os.environ["INTEGRATION_TESTS_RUN_ID"]
):
return "_instances_" + shlex.quote(os.environ["INTEGRATION_TESTS_RUN_ID"])
else:
return "_instances"
class ClickHouseCluster:
"""ClickHouse cluster with several instances and (possibly) ZooKeeper.
Add instances with several calls to add_instance(), then start them with the start() call.
Directories for instances are created in the directory of base_path. After cluster is started,
these directories will contain logs, database files, docker-compose config, ClickHouse configs etc.
"""
def __init__(
self,
base_path,
name=None,
base_config_dir=None,
server_bin_path=None,
client_bin_path=None,
odbc_bridge_bin_path=None,
library_bridge_bin_path=None,
zookeeper_config_path=None,
custom_dockerd_host=None,
zookeeper_keyfile=None,
zookeeper_certfile=None,
):
for param in list(os.environ.keys()):
logging.debug("ENV %40s %s" % (param, os.environ[param]))
self.base_path = base_path
self.base_dir = p.dirname(base_path)
self.name = name if name is not None else ""
self.base_config_dir = base_config_dir or os.environ.get(
"CLICKHOUSE_TESTS_BASE_CONFIG_DIR", "/etc/clickhouse-server/"
)
self.server_bin_path = p.realpath(
server_bin_path
or os.environ.get("CLICKHOUSE_TESTS_SERVER_BIN_PATH", "/usr/bin/clickhouse")
)
self.odbc_bridge_bin_path = p.realpath(
odbc_bridge_bin_path or get_odbc_bridge_path()
)
self.library_bridge_bin_path = p.realpath(
library_bridge_bin_path or get_library_bridge_path()
)
self.client_bin_path = p.realpath(
client_bin_path
or os.environ.get(
"CLICKHOUSE_TESTS_CLIENT_BIN_PATH", "/usr/bin/clickhouse-client"
)
)
self.zookeeper_config_path = (
p.join(self.base_dir, zookeeper_config_path)
if zookeeper_config_path
else p.join(HELPERS_DIR, "zookeeper_config.xml")
)
project_name = (
pwd.getpwuid(os.getuid()).pw_name + p.basename(self.base_dir) + self.name
)
# docker-compose removes everything non-alphanumeric from project names so we do it too.
self.project_name = re.sub(r"[^a-z0-9]", "", project_name.lower())
instances_dir_name = "_instances"
if self.name:
instances_dir_name += "_" + self.name
if (
"INTEGRATION_TESTS_RUN_ID" in os.environ
and os.environ["INTEGRATION_TESTS_RUN_ID"]
):
instances_dir_name += "_" + shlex.quote(
os.environ["INTEGRATION_TESTS_RUN_ID"]
)
self.instances_dir = p.join(self.base_dir, instances_dir_name)
self.docker_logs_path = p.join(self.instances_dir, "docker.log")
self.env_file = p.join(self.instances_dir, DEFAULT_ENV_NAME)
self.env_variables = {}
self.env_variables["TSAN_OPTIONS"] = "second_deadlock_stack=1"
self.env_variables["CLICKHOUSE_WATCHDOG_ENABLE"] = "0"
self.up_called = False
custom_dockerd_host = custom_dockerd_host or os.environ.get(
"CLICKHOUSE_TESTS_DOCKERD_HOST"
)
self.docker_api_version = os.environ.get("DOCKER_API_VERSION")
self.docker_base_tag = os.environ.get("DOCKER_BASE_TAG", "latest")
self.base_cmd = ["docker-compose"]
if custom_dockerd_host:
self.base_cmd += ["--host", custom_dockerd_host]
self.base_cmd += ["--env-file", self.env_file]
self.base_cmd += ["--project-name", self.project_name]
self.base_zookeeper_cmd = None
self.base_mysql_cmd = []
self.base_kafka_cmd = []
self.base_kerberized_kafka_cmd = []
self.base_rabbitmq_cmd = []
self.base_cassandra_cmd = []
self.base_jdbc_bridge_cmd = []
self.base_redis_cmd = []
self.pre_zookeeper_commands = []
self.instances = {}
self.with_zookeeper = False
self.with_zookeeper_secure = False
self.with_mysql_client = False
self.with_mysql = False
self.with_mysql8 = False
self.with_mysql_cluster = False
self.with_postgres = False
self.with_postgres_cluster = False
self.with_kafka = False
self.with_kerberized_kafka = False
self.with_rabbitmq = False
self.with_odbc_drivers = False
self.with_hdfs = False
self.with_kerberized_hdfs = False
self.with_mongo = False
self.with_mongo_secure = False
self.with_net_trics = False
self.with_redis = False
self.with_cassandra = False
self.with_jdbc_bridge = False
self.with_nginx = False
self.with_hive = False
self.with_minio = False
self.minio_dir = os.path.join(self.instances_dir, "minio")
self.minio_certs_dir = None # source for certificates
self.minio_host = "minio1"
self.minio_ip = None
self.minio_bucket = "root"
self.minio_bucket_2 = "root2"
self.minio_port = 9001
self.minio_client = None # type: Minio
self.minio_redirect_host = "proxy1"
self.minio_redirect_ip = None
self.minio_redirect_port = 8080
self.with_azurite = False
# available when with_hdfs == True
self.hdfs_host = "hdfs1"
self.hdfs_ip = None
self.hdfs_name_port = 50070
self.hdfs_data_port = 50075
self.hdfs_dir = p.abspath(p.join(self.instances_dir, "hdfs"))
self.hdfs_logs_dir = os.path.join(self.hdfs_dir, "logs")
self.hdfs_api = None # also for kerberized hdfs
# available when with_kerberized_hdfs == True
self.hdfs_kerberized_host = "kerberizedhdfs1"
self.hdfs_kerberized_ip = None
self.hdfs_kerberized_name_port = 50070
self.hdfs_kerberized_data_port = 1006
self.hdfs_kerberized_dir = p.abspath(
p.join(self.instances_dir, "kerberized_hdfs")
)
self.hdfs_kerberized_logs_dir = os.path.join(self.hdfs_kerberized_dir, "logs")
# available when with_kafka == True
self.kafka_host = "kafka1"
self.kafka_port = get_free_port()
self.kafka_docker_id = None
self.schema_registry_host = "schema-registry"
self.schema_registry_port = get_free_port()
self.kafka_docker_id = self.get_instance_docker_id(self.kafka_host)
# available when with_kerberozed_kafka == True
self.kerberized_kafka_host = "kerberized_kafka1"
self.kerberized_kafka_port = get_free_port()
self.kerberized_kafka_docker_id = self.get_instance_docker_id(
self.kerberized_kafka_host
)
# available when with_mongo == True
self.mongo_host = "mongo1"
self.mongo_port = get_free_port()
self.mongo_no_cred_host = "mongo2"
self.mongo_no_cred_port = get_free_port()
# available when with_cassandra == True
self.cassandra_host = "cassandra1"
self.cassandra_port = 9042
self.cassandra_ip = None
self.cassandra_id = self.get_instance_docker_id(self.cassandra_host)
# available when with_rabbitmq == True
self.rabbitmq_host = "rabbitmq1"
self.rabbitmq_ip = None
self.rabbitmq_port = 5672
self.rabbitmq_dir = p.abspath(p.join(self.instances_dir, "rabbitmq"))
self.rabbitmq_logs_dir = os.path.join(self.rabbitmq_dir, "logs")
# available when with_nginx == True
self.nginx_host = "nginx"
self.nginx_ip = None
self.nginx_port = 80
self.nginx_id = self.get_instance_docker_id(self.nginx_host)
# available when with_redis == True
self.redis_host = "redis1"
self.redis_ip = None
self.redis_port = 6379
# available when with_postgres == True
self.postgres_host = "postgres1"
self.postgres_ip = None
self.postgres_conn = None
self.postgres2_host = "postgres2"
self.postgres2_ip = None
self.postgres2_conn = None
self.postgres3_host = "postgres3"
self.postgres3_ip = None
self.postgres3_conn = None
self.postgres4_host = "postgres4"
self.postgres4_ip = None
self.postgres4_conn = None
self.postgres_port = 5432
self.postgres_dir = p.abspath(p.join(self.instances_dir, "postgres"))
self.postgres_logs_dir = os.path.join(self.postgres_dir, "postgres1")
self.postgres2_logs_dir = os.path.join(self.postgres_dir, "postgres2")
self.postgres3_logs_dir = os.path.join(self.postgres_dir, "postgres3")
self.postgres4_logs_dir = os.path.join(self.postgres_dir, "postgres4")
# available when with_mysql_client == True
self.mysql_client_host = "mysql_client"
self.mysql_client_container = None
# available when with_mysql == True
self.mysql_host = "mysql57"
self.mysql_port = 3306
self.mysql_ip = None
self.mysql_dir = p.abspath(p.join(self.instances_dir, "mysql"))
self.mysql_logs_dir = os.path.join(self.mysql_dir, "logs")
# available when with_mysql_cluster == True
self.mysql2_host = "mysql2"
self.mysql3_host = "mysql3"
self.mysql4_host = "mysql4"
self.mysql2_ip = None
self.mysql3_ip = None
self.mysql4_ip = None
self.mysql_cluster_dir = p.abspath(p.join(self.instances_dir, "mysql"))
self.mysql_cluster_logs_dir = os.path.join(self.mysql_dir, "logs")
# available when with_mysql8 == True
self.mysql8_host = "mysql80"
self.mysql8_port = 3306
self.mysql8_ip = None
self.mysql8_dir = p.abspath(p.join(self.instances_dir, "mysql8"))
self.mysql8_logs_dir = os.path.join(self.mysql8_dir, "logs")
# available when with_zookeper_secure == True
self.zookeeper_secure_port = 2281
self.zookeeper_keyfile = zookeeper_keyfile
self.zookeeper_certfile = zookeeper_certfile
# available when with_zookeper == True
self.use_keeper = True
self.zookeeper_port = 2181
self.keeper_instance_dir_prefix = p.join(
p.abspath(self.instances_dir), "keeper"
) # if use_keeper = True
self.zookeeper_instance_dir_prefix = p.join(self.instances_dir, "zk")
self.zookeeper_dirs_to_create = []
# available when with_jdbc_bridge == True
self.jdbc_bridge_host = "bridge1"
self.jdbc_bridge_ip = None
self.jdbc_bridge_port = 9019
self.jdbc_driver_dir = p.abspath(p.join(self.instances_dir, "jdbc_driver"))
self.jdbc_driver_logs_dir = os.path.join(self.jdbc_driver_dir, "logs")
self.docker_client = None
self.is_up = False
self.env = os.environ.copy()
logging.debug(f"CLUSTER INIT base_config_dir:{self.base_config_dir}")
def cleanup(self):
if (
os.environ
and "DISABLE_CLEANUP" in os.environ
and os.environ["DISABLE_CLEANUP"] == "1"
):
logging.warning("Cleanup is disabled")
return
# Just in case kill unstopped containers from previous launch
try:
unstopped_containers = self.get_running_containers()
if unstopped_containers:
logging.debug(
f"Trying to kill unstopped containers: {unstopped_containers}"
)
for id in unstopped_containers:
run_and_check(f"docker kill {id}", shell=True, nothrow=True)
run_and_check(f"docker rm {id}", shell=True, nothrow=True)
unstopped_containers = self.get_running_containers()
if unstopped_containers:
logging.debug(f"Left unstopped containers: {unstopped_containers}")
else:
logging.debug(f"Unstopped containers killed.")
else:
logging.debug(f"No running containers for project: {self.project_name}")
except:
pass
# # Just in case remove unused networks
# try:
# logging.debug("Trying to prune unused networks...")
# run_and_check(['docker', 'network', 'prune', '-f'])
# logging.debug("Networks pruned")
# except:
# pass
# Remove unused images
# try:
# logging.debug("Trying to prune unused images...")
# run_and_check(['docker', 'image', 'prune', '-f'])
# logging.debug("Images pruned")
# except:
# pass
# Remove unused volumes
try:
logging.debug("Trying to prune unused volumes...")
result = run_and_check(["docker volume ls | wc -l"], shell=True)
if int(result > 0):
run_and_check(["docker", "volume", "prune", "-f"])
logging.debug(f"Volumes pruned: {result}")
except:
pass
def get_docker_handle(self, docker_id):
exception = None
for i in range(5):
try:
return self.docker_client.containers.get(docker_id)
except Exception as ex:
print("Got exception getting docker handle", str(ex))
time.sleep(i * 2)
exception = ex
raise exception
def get_client_cmd(self):
cmd = self.client_bin_path
if p.basename(cmd) == "clickhouse":
cmd += " client"
return cmd
# Returns the list of currently running docker containers corresponding to this ClickHouseCluster.
def get_running_containers(self):
# docker-compose names containers using the following formula:
# container_name = project_name + '_' + instance_name + '_1'
# We need to have "^/" and "$" in the "--filter name" option below to filter by exact name of the container, see
# https://stackoverflow.com/questions/48767760/how-to-make-docker-container-ls-f-name-filter-by-exact-name
filter_name = f"^/{self.project_name}_.*_1$"
# We want the command "docker container list" to show only containers' ID and their names, separated by colon.
format = "{{.ID}}:{{.Names}}"
containers = run_and_check(
f"docker container list --all --filter name='{filter_name}' --format '{format}'",
shell=True,
)
containers = dict(
line.split(":", 1) for line in containers.decode("utf8").splitlines()
)
return containers
def copy_file_from_container_to_container(
self, src_node, src_path, dst_node, dst_path
):
fname = os.path.basename(src_path)
run_and_check(
[f"docker cp {src_node.docker_id}:{src_path} {self.instances_dir}"],
shell=True,
)
run_and_check(
[f"docker cp {self.instances_dir}/{fname} {dst_node.docker_id}:{dst_path}"],
shell=True,
)
def setup_zookeeper_secure_cmd(
self, instance, env_variables, docker_compose_yml_dir
):
logging.debug("Setup ZooKeeper Secure")
zookeeper_docker_compose_path = p.join(
docker_compose_yml_dir, "docker_compose_zookeeper_secure.yml"
)
env_variables["ZOO_SECURE_CLIENT_PORT"] = str(self.zookeeper_secure_port)
env_variables["ZK_FS"] = "bind"
for i in range(1, 4):
zk_data_path = os.path.join(
self.zookeeper_instance_dir_prefix + str(i), "data"
)
zk_log_path = os.path.join(
self.zookeeper_instance_dir_prefix + str(i), "log"
)
env_variables["ZK_DATA" + str(i)] = zk_data_path
env_variables["ZK_DATA_LOG" + str(i)] = zk_log_path
self.zookeeper_dirs_to_create += [zk_data_path, zk_log_path]
logging.debug(f"DEBUG ZK: {self.zookeeper_dirs_to_create}")
self.with_zookeeper_secure = True
self.base_cmd.extend(["--file", zookeeper_docker_compose_path])
self.base_zookeeper_cmd = [
"docker-compose",
"--env-file",
instance.env_file,
"--project-name",
self.project_name,
"--file",
zookeeper_docker_compose_path,
]
return self.base_zookeeper_cmd
def setup_zookeeper_cmd(self, instance, env_variables, docker_compose_yml_dir):
logging.debug("Setup ZooKeeper")
zookeeper_docker_compose_path = p.join(
docker_compose_yml_dir, "docker_compose_zookeeper.yml"
)
env_variables["ZK_FS"] = "bind"
for i in range(1, 4):
zk_data_path = os.path.join(
self.zookeeper_instance_dir_prefix + str(i), "data"
)
zk_log_path = os.path.join(
self.zookeeper_instance_dir_prefix + str(i), "log"
)
env_variables["ZK_DATA" + str(i)] = zk_data_path
env_variables["ZK_DATA_LOG" + str(i)] = zk_log_path
self.zookeeper_dirs_to_create += [zk_data_path, zk_log_path]
logging.debug(f"DEBUG ZK: {self.zookeeper_dirs_to_create}")
self.with_zookeeper = True
self.base_cmd.extend(["--file", zookeeper_docker_compose_path])
self.base_zookeeper_cmd = [
"docker-compose",
"--env-file",
instance.env_file,
"--project-name",
self.project_name,
"--file",
zookeeper_docker_compose_path,
]
return self.base_zookeeper_cmd
def setup_keeper_cmd(self, instance, env_variables, docker_compose_yml_dir):
logging.debug("Setup Keeper")
keeper_docker_compose_path = p.join(
docker_compose_yml_dir, "docker_compose_keeper.yml"
)
binary_path = self.server_bin_path
binary_dir = os.path.dirname(self.server_bin_path)
# always prefer clickhouse-keeper standalone binary
if os.path.exists(os.path.join(binary_dir, "clickhouse-keeper")):
binary_path = os.path.join(binary_dir, "clickhouse-keeper")
keeper_cmd_prefix = "clickhouse-keeper"
else:
if binary_path.endswith("-server"):
binary_path = binary_path[: -len("-server")]
keeper_cmd_prefix = "clickhouse keeper"
env_variables["keeper_binary"] = binary_path
env_variables["keeper_cmd_prefix"] = keeper_cmd_prefix
env_variables["image"] = "clickhouse/integration-test:" + self.docker_base_tag
env_variables["user"] = str(os.getuid())
env_variables["keeper_fs"] = "bind"
for i in range(1, 4):
keeper_instance_dir = self.keeper_instance_dir_prefix + f"{i}"
logs_dir = os.path.join(keeper_instance_dir, "log")
configs_dir = os.path.join(keeper_instance_dir, "config")
coordination_dir = os.path.join(keeper_instance_dir, "coordination")
env_variables[f"keeper_logs_dir{i}"] = logs_dir
env_variables[f"keeper_config_dir{i}"] = configs_dir
env_variables[f"keeper_db_dir{i}"] = coordination_dir
self.zookeeper_dirs_to_create += [logs_dir, configs_dir, coordination_dir]
logging.debug(f"DEBUG KEEPER: {self.zookeeper_dirs_to_create}")
self.with_zookeeper = True
self.base_cmd.extend(["--file", keeper_docker_compose_path])
self.base_zookeeper_cmd = [
"docker-compose",
"--env-file",
instance.env_file,
"--project-name",
self.project_name,
"--file",
keeper_docker_compose_path,
]
return self.base_zookeeper_cmd
def setup_mysql_client_cmd(self, instance, env_variables, docker_compose_yml_dir):
self.with_mysql_client = True
self.base_cmd.extend(
[
"--file",
p.join(docker_compose_yml_dir, "docker_compose_mysql_client.yml"),
]
)
self.base_mysql_client_cmd = [
"docker-compose",
"--env-file",
instance.env_file,
"--project-name",
self.project_name,
"--file",
p.join(docker_compose_yml_dir, "docker_compose_mysql_client.yml"),
]
return self.base_mysql_client_cmd
def setup_mysql_cmd(self, instance, env_variables, docker_compose_yml_dir):
self.with_mysql = True
env_variables["MYSQL_HOST"] = self.mysql_host
env_variables["MYSQL_PORT"] = str(self.mysql_port)
env_variables["MYSQL_ROOT_HOST"] = "%"
env_variables["MYSQL_LOGS"] = self.mysql_logs_dir
env_variables["MYSQL_LOGS_FS"] = "bind"
self.base_cmd.extend(
["--file", p.join(docker_compose_yml_dir, "docker_compose_mysql.yml")]
)
self.base_mysql_cmd = [
"docker-compose",
"--env-file",
instance.env_file,
"--project-name",
self.project_name,
"--file",
p.join(docker_compose_yml_dir, "docker_compose_mysql.yml"),
]
return self.base_mysql_cmd
def setup_mysql8_cmd(self, instance, env_variables, docker_compose_yml_dir):
self.with_mysql8 = True
env_variables["MYSQL8_HOST"] = self.mysql8_host
env_variables["MYSQL8_PORT"] = str(self.mysql8_port)
env_variables["MYSQL8_ROOT_HOST"] = "%"
env_variables["MYSQL8_LOGS"] = self.mysql8_logs_dir
env_variables["MYSQL8_LOGS_FS"] = "bind"
self.base_cmd.extend(
["--file", p.join(docker_compose_yml_dir, "docker_compose_mysql_8_0.yml")]
)
self.base_mysql8_cmd = [
"docker-compose",
"--env-file",
instance.env_file,
"--project-name",
self.project_name,
"--file",
p.join(docker_compose_yml_dir, "docker_compose_mysql_8_0.yml"),
]
return self.base_mysql8_cmd
def setup_mysql_cluster_cmd(self, instance, env_variables, docker_compose_yml_dir):
self.with_mysql_cluster = True
env_variables["MYSQL_CLUSTER_PORT"] = str(self.mysql_port)
env_variables["MYSQL_CLUSTER_ROOT_HOST"] = "%"
env_variables["MYSQL_CLUSTER_LOGS"] = self.mysql_cluster_logs_dir
env_variables["MYSQL_CLUSTER_LOGS_FS"] = "bind"
self.base_cmd.extend(
[
"--file",
p.join(docker_compose_yml_dir, "docker_compose_mysql_cluster.yml"),
]
)
self.base_mysql_cluster_cmd = [
"docker-compose",
"--env-file",
instance.env_file,
"--project-name",
self.project_name,
"--file",
p.join(docker_compose_yml_dir, "docker_compose_mysql_cluster.yml"),
]
return self.base_mysql_cluster_cmd
def setup_postgres_cmd(self, instance, env_variables, docker_compose_yml_dir):
self.base_cmd.extend(
["--file", p.join(docker_compose_yml_dir, "docker_compose_postgres.yml")]
)
env_variables["POSTGRES_PORT"] = str(self.postgres_port)
env_variables["POSTGRES_DIR"] = self.postgres_logs_dir
env_variables["POSTGRES_LOGS_FS"] = "bind"
self.with_postgres = True
self.base_postgres_cmd = [
"docker-compose",
"--env-file",
instance.env_file,
"--project-name",
self.project_name,
"--file",
p.join(docker_compose_yml_dir, "docker_compose_postgres.yml"),
]
return self.base_postgres_cmd
def setup_postgres_cluster_cmd(
self, instance, env_variables, docker_compose_yml_dir
):
self.with_postgres_cluster = True
env_variables["POSTGRES_PORT"] = str(self.postgres_port)
env_variables["POSTGRES2_DIR"] = self.postgres2_logs_dir
env_variables["POSTGRES3_DIR"] = self.postgres3_logs_dir
env_variables["POSTGRES4_DIR"] = self.postgres4_logs_dir
env_variables["POSTGRES_LOGS_FS"] = "bind"
self.base_cmd.extend(
[
"--file",
p.join(docker_compose_yml_dir, "docker_compose_postgres_cluster.yml"),
]
)
self.base_postgres_cluster_cmd = [
"docker-compose",
"--env-file",
instance.env_file,
"--project-name",
self.project_name,
"--file",
p.join(docker_compose_yml_dir, "docker_compose_postgres_cluster.yml"),
]
def setup_hdfs_cmd(self, instance, env_variables, docker_compose_yml_dir):
self.with_hdfs = True
env_variables["HDFS_HOST"] = self.hdfs_host
env_variables["HDFS_NAME_PORT"] = str(self.hdfs_name_port)
env_variables["HDFS_DATA_PORT"] = str(self.hdfs_data_port)
env_variables["HDFS_LOGS"] = self.hdfs_logs_dir
env_variables["HDFS_FS"] = "bind"
self.base_cmd.extend(
["--file", p.join(docker_compose_yml_dir, "docker_compose_hdfs.yml")]
)
self.base_hdfs_cmd = [
"docker-compose",
"--env-file",
instance.env_file,
"--project-name",
self.project_name,
"--file",
p.join(docker_compose_yml_dir, "docker_compose_hdfs.yml"),
]
logging.debug("HDFS BASE CMD:{self.base_hdfs_cmd)}")
return self.base_hdfs_cmd
def setup_kerberized_hdfs_cmd(
self, instance, env_variables, docker_compose_yml_dir
):
self.with_kerberized_hdfs = True
env_variables["KERBERIZED_HDFS_HOST"] = self.hdfs_kerberized_host
env_variables["KERBERIZED_HDFS_NAME_PORT"] = str(self.hdfs_kerberized_name_port)
env_variables["KERBERIZED_HDFS_DATA_PORT"] = str(self.hdfs_kerberized_data_port)
env_variables["KERBERIZED_HDFS_LOGS"] = self.hdfs_kerberized_logs_dir
env_variables["KERBERIZED_HDFS_FS"] = "bind"
env_variables["KERBERIZED_HDFS_DIR"] = instance.path + "/"
self.base_cmd.extend(
[
"--file",
p.join(docker_compose_yml_dir, "docker_compose_kerberized_hdfs.yml"),
]
)
self.base_kerberized_hdfs_cmd = [
"docker-compose",
"--env-file",
instance.env_file,
"--project-name",
self.project_name,
"--file",
p.join(docker_compose_yml_dir, "docker_compose_kerberized_hdfs.yml"),
]
return self.base_kerberized_hdfs_cmd
def setup_kafka_cmd(self, instance, env_variables, docker_compose_yml_dir):
self.with_kafka = True
env_variables["KAFKA_HOST"] = self.kafka_host
env_variables["KAFKA_EXTERNAL_PORT"] = str(self.kafka_port)
env_variables["SCHEMA_REGISTRY_EXTERNAL_PORT"] = str(self.schema_registry_port)
env_variables["SCHEMA_REGISTRY_INTERNAL_PORT"] = "8081"
self.base_cmd.extend(
["--file", p.join(docker_compose_yml_dir, "docker_compose_kafka.yml")]
)
self.base_kafka_cmd = [
"docker-compose",
"--env-file",
instance.env_file,
"--project-name",
self.project_name,
"--file",
p.join(docker_compose_yml_dir, "docker_compose_kafka.yml"),
]
return self.base_kafka_cmd
def setup_kerberized_kafka_cmd(
self, instance, env_variables, docker_compose_yml_dir
):
self.with_kerberized_kafka = True
env_variables["KERBERIZED_KAFKA_DIR"] = instance.path + "/"
env_variables["KERBERIZED_KAFKA_HOST"] = self.kerberized_kafka_host
env_variables["KERBERIZED_KAFKA_EXTERNAL_PORT"] = str(
self.kerberized_kafka_port
)
self.base_cmd.extend(
[
"--file",
p.join(docker_compose_yml_dir, "docker_compose_kerberized_kafka.yml"),
]
)
self.base_kerberized_kafka_cmd = [
"docker-compose",
"--env-file",
instance.env_file,
"--project-name",
self.project_name,
"--file",
p.join(docker_compose_yml_dir, "docker_compose_kerberized_kafka.yml"),
]
return self.base_kerberized_kafka_cmd
def setup_redis_cmd(self, instance, env_variables, docker_compose_yml_dir):
self.with_redis = True
env_variables["REDIS_HOST"] = self.redis_host
env_variables["REDIS_EXTERNAL_PORT"] = str(self.redis_port)
env_variables["REDIS_INTERNAL_PORT"] = "6379"
self.base_cmd.extend(
["--file", p.join(docker_compose_yml_dir, "docker_compose_redis.yml")]
)
self.base_redis_cmd = [
"docker-compose",
"--env-file",
instance.env_file,
"--project-name",
self.project_name,
"--file",
p.join(docker_compose_yml_dir, "docker_compose_redis.yml"),
]
return self.base_redis_cmd
def setup_rabbitmq_cmd(self, instance, env_variables, docker_compose_yml_dir):
self.with_rabbitmq = True
env_variables["RABBITMQ_HOST"] = self.rabbitmq_host
env_variables["RABBITMQ_PORT"] = str(self.rabbitmq_port)
env_variables["RABBITMQ_LOGS"] = self.rabbitmq_logs_dir
env_variables["RABBITMQ_LOGS_FS"] = "bind"
self.base_cmd.extend(
["--file", p.join(docker_compose_yml_dir, "docker_compose_rabbitmq.yml")]
)
self.base_rabbitmq_cmd = [
"docker-compose",
"--env-file",
instance.env_file,
"--project-name",
self.project_name,
"--file",
p.join(docker_compose_yml_dir, "docker_compose_rabbitmq.yml"),
]
return self.base_rabbitmq_cmd
def setup_mongo_secure_cmd(self, instance, env_variables, docker_compose_yml_dir):
self.with_mongo = self.with_mongo_secure = True
env_variables["MONGO_HOST"] = self.mongo_host
env_variables["MONGO_EXTERNAL_PORT"] = str(self.mongo_port)
env_variables["MONGO_INTERNAL_PORT"] = "27017"
env_variables["MONGO_CONFIG_PATH"] = HELPERS_DIR
self.base_cmd.extend(
[
"--file",
p.join(docker_compose_yml_dir, "docker_compose_mongo_secure.yml"),
]
)
self.base_mongo_cmd = [
"docker-compose",
"--env-file",
instance.env_file,
"--project-name",
self.project_name,
"--file",
p.join(docker_compose_yml_dir, "docker_compose_mongo_secure.yml"),
]
return self.base_mongo_cmd
def setup_mongo_cmd(self, instance, env_variables, docker_compose_yml_dir):
self.with_mongo = True
env_variables["MONGO_HOST"] = self.mongo_host
env_variables["MONGO_EXTERNAL_PORT"] = str(self.mongo_port)
env_variables["MONGO_INTERNAL_PORT"] = "27017"
env_variables["MONGO_NO_CRED_EXTERNAL_PORT"] = str(self.mongo_no_cred_port)
env_variables["MONGO_NO_CRED_INTERNAL_PORT"] = "27017"
self.base_cmd.extend(
["--file", p.join(docker_compose_yml_dir, "docker_compose_mongo.yml")]
)
self.base_mongo_cmd = [
"docker-compose",
"--env-file",
instance.env_file,
"--project-name",
self.project_name,
"--file",
p.join(docker_compose_yml_dir, "docker_compose_mongo.yml"),
]
return self.base_mongo_cmd
def setup_minio_cmd(self, instance, env_variables, docker_compose_yml_dir):
self.with_minio = True
cert_d = p.join(self.minio_dir, "certs")
env_variables["MINIO_CERTS_DIR"] = cert_d
env_variables["MINIO_PORT"] = str(self.minio_port)
env_variables["SSL_CERT_FILE"] = p.join(self.base_dir, cert_d, "public.crt")
self.base_cmd.extend(
["--file", p.join(docker_compose_yml_dir, "docker_compose_minio.yml")]
)
self.base_minio_cmd = [
"docker-compose",
"--env-file",
instance.env_file,
"--project-name",
self.project_name,
"--file",
p.join(docker_compose_yml_dir, "docker_compose_minio.yml"),
]
return self.base_minio_cmd
def setup_azurite_cmd(self, instance, env_variables, docker_compose_yml_dir):
self.with_azurite = True
self.base_cmd.extend(
["--file", p.join(docker_compose_yml_dir, "docker_compose_azurite.yml")]
)
self.base_azurite_cmd = [
"docker-compose",
"--env-file",
instance.env_file,
"--project-name",
self.project_name,
"--file",
p.join(docker_compose_yml_dir, "docker_compose_azurite.yml"),
]
return self.base_azurite_cmd
def setup_cassandra_cmd(self, instance, env_variables, docker_compose_yml_dir):
self.with_cassandra = True
env_variables["CASSANDRA_PORT"] = str(self.cassandra_port)
self.base_cmd.extend(
["--file", p.join(docker_compose_yml_dir, "docker_compose_cassandra.yml")]
)
self.base_cassandra_cmd = [
"docker-compose",
"--env-file",
instance.env_file,
"--project-name",
self.project_name,
"--file",
p.join(docker_compose_yml_dir, "docker_compose_cassandra.yml"),
]
return self.base_cassandra_cmd
def setup_jdbc_bridge_cmd(self, instance, env_variables, docker_compose_yml_dir):
self.with_jdbc_bridge = True
env_variables["JDBC_DRIVER_LOGS"] = self.jdbc_driver_logs_dir
env_variables["JDBC_DRIVER_FS"] = "bind"
self.base_cmd.extend(
["--file", p.join(docker_compose_yml_dir, "docker_compose_jdbc_bridge.yml")]
)
self.base_jdbc_bridge_cmd = [
"docker-compose",
"--env-file",
instance.env_file,
"--project-name",
self.project_name,
"--file",
p.join(docker_compose_yml_dir, "docker_compose_jdbc_bridge.yml"),
]
return self.base_jdbc_bridge_cmd
def setup_nginx_cmd(self, instance, env_variables, docker_compose_yml_dir):
self.with_nginx = True
self.base_cmd.extend(
["--file", p.join(docker_compose_yml_dir, "docker_compose_nginx.yml")]
)
self.base_nginx_cmd = [
"docker-compose",
"--env-file",
instance.env_file,
"--project-name",
self.project_name,
"--file",
p.join(docker_compose_yml_dir, "docker_compose_nginx.yml"),
]
return self.base_nginx_cmd
def setup_hive(self, instance, env_variables, docker_compose_yml_dir):
self.with_hive = True
self.base_cmd.extend(
["--file", p.join(docker_compose_yml_dir, "docker_compose_hive.yml")]
)
self.base_hive_cmd = [
"docker-compose",
"--env-file",
instance.env_file,
"--project-name",
self.project_name,
"--file",
p.join(docker_compose_yml_dir, "docker_compose_hive.yml"),
]
return self.base_hive_cmd
def add_instance(
self,
name,
base_config_dir=None,
main_configs=None,
user_configs=None,
dictionaries=None,
macros=None,
with_zookeeper=False,
with_zookeeper_secure=False,
with_mysql_client=False,
with_mysql=False,
with_mysql8=False,
with_mysql_cluster=False,
with_kafka=False,
with_kerberized_kafka=False,
with_rabbitmq=False,
clickhouse_path_dir=None,
with_odbc_drivers=False,
with_postgres=False,
with_postgres_cluster=False,
with_hdfs=False,
with_kerberized_hdfs=False,
with_mongo=False,
with_mongo_secure=False,
with_nginx=False,
with_redis=False,
with_minio=False,
with_azurite=False,
with_cassandra=False,
with_jdbc_bridge=False,
with_hive=False,
hostname=None,
env_variables=None,
image="clickhouse/integration-test",
tag=None,
stay_alive=False,
ipv4_address=None,
ipv6_address=None,
with_installed_binary=False,
external_dirs=None,
tmpfs=None,
zookeeper_docker_compose_path=None,
minio_certs_dir=None,
use_keeper=True,
main_config_name="config.xml",
users_config_name="users.xml",
copy_common_configs=True,
config_root_name="clickhouse",
extra_configs=[],
) -> "ClickHouseInstance":
"""Add an instance to the cluster.
name - the name of the instance directory and the value of the 'instance' macro in ClickHouse.
base_config_dir - a directory with config.xml and users.xml files which will be copied to /etc/clickhouse-server/ directory
main_configs - a list of config files that will be added to config.d/ directory
user_configs - a list of config files that will be added to users.d/ directory
with_zookeeper - if True, add ZooKeeper configuration to configs and ZooKeeper instances to the cluster.
with_zookeeper_secure - if True, add ZooKeeper Secure configuration to configs and ZooKeeper instances to the cluster.
extra_configs - config files cannot put into config.d and users.d
"""
if self.is_up:
raise Exception("Can't add instance %s: cluster is already up!" % name)
if name in self.instances:
raise Exception(
"Can't add instance `%s': there is already an instance with the same name!"
% name
)
if tag is None:
tag = self.docker_base_tag
if not env_variables:
env_variables = {}
self.use_keeper = use_keeper
# Code coverage files will be placed in database directory
# (affect only WITH_COVERAGE=1 build)
env_variables[
"LLVM_PROFILE_FILE"
] = "/var/lib/clickhouse/server_%h_%p_%m.profraw"
instance = ClickHouseInstance(
cluster=self,
base_path=self.base_dir,
name=name,
base_config_dir=base_config_dir
if base_config_dir
else self.base_config_dir,
custom_main_configs=main_configs or [],
custom_user_configs=user_configs or [],
custom_dictionaries=dictionaries or [],
macros=macros or {},
with_zookeeper=with_zookeeper,
zookeeper_config_path=self.zookeeper_config_path,
with_mysql_client=with_mysql_client,
with_mysql=with_mysql,
with_mysql8=with_mysql8,
with_mysql_cluster=with_mysql_cluster,
with_kafka=with_kafka,
with_kerberized_kafka=with_kerberized_kafka,
with_rabbitmq=with_rabbitmq,
with_nginx=with_nginx,
with_kerberized_hdfs=with_kerberized_hdfs,
with_mongo=with_mongo or with_mongo_secure,
with_redis=with_redis,
with_minio=with_minio,
with_azurite=with_azurite,
with_cassandra=with_cassandra,
with_jdbc_bridge=with_jdbc_bridge,
with_hive=with_hive,
server_bin_path=self.server_bin_path,
odbc_bridge_bin_path=self.odbc_bridge_bin_path,
library_bridge_bin_path=self.library_bridge_bin_path,
clickhouse_path_dir=clickhouse_path_dir,
with_odbc_drivers=with_odbc_drivers,
with_postgres=with_postgres,
with_postgres_cluster=with_postgres_cluster,
hostname=hostname,
env_variables=env_variables,
image=image,
tag=tag,
stay_alive=stay_alive,
ipv4_address=ipv4_address,
ipv6_address=ipv6_address,
with_installed_binary=with_installed_binary,
main_config_name=main_config_name,
users_config_name=users_config_name,
copy_common_configs=copy_common_configs,
external_dirs=external_dirs,
tmpfs=tmpfs or [],
config_root_name=config_root_name,
extra_configs=extra_configs,
)
docker_compose_yml_dir = get_docker_compose_path()
self.instances[name] = instance
if ipv4_address is not None or ipv6_address is not None:
self.with_net_trics = True
self.base_cmd.extend(
["--file", p.join(docker_compose_yml_dir, "docker_compose_net.yml")]
)
self.base_cmd.extend(["--file", instance.docker_compose_path])
cmds = []
if with_zookeeper_secure and not self.with_zookeeper_secure:
cmds.append(
self.setup_zookeeper_secure_cmd(
instance, env_variables, docker_compose_yml_dir
)
)
if with_zookeeper and not self.with_zookeeper:
if self.use_keeper:
cmds.append(
self.setup_keeper_cmd(
instance, env_variables, docker_compose_yml_dir
)
)
else:
cmds.append(
self.setup_zookeeper_cmd(
instance, env_variables, docker_compose_yml_dir
)
)
if with_mysql_client and not self.with_mysql_client:
cmds.append(
self.setup_mysql_client_cmd(
instance, env_variables, docker_compose_yml_dir
)
)
if with_mysql and not self.with_mysql:
cmds.append(
self.setup_mysql_cmd(instance, env_variables, docker_compose_yml_dir)
)
if with_mysql8 and not self.with_mysql8:
cmds.append(
self.setup_mysql8_cmd(instance, env_variables, docker_compose_yml_dir)
)
if with_mysql_cluster and not self.with_mysql_cluster:
cmds.append(
self.setup_mysql_cluster_cmd(
instance, env_variables, docker_compose_yml_dir
)
)
if with_postgres and not self.with_postgres:
cmds.append(
self.setup_postgres_cmd(instance, env_variables, docker_compose_yml_dir)
)
if with_postgres_cluster and not self.with_postgres_cluster:
cmds.append(
self.setup_postgres_cluster_cmd(
instance, env_variables, docker_compose_yml_dir
)
)
if with_odbc_drivers and not self.with_odbc_drivers:
self.with_odbc_drivers = True
if not self.with_mysql:
cmds.append(
self.setup_mysql_cmd(
instance, env_variables, docker_compose_yml_dir
)
)
if not self.with_postgres:
cmds.append(
self.setup_postgres_cmd(
instance, env_variables, docker_compose_yml_dir
)
)
if with_kafka and not self.with_kafka:
cmds.append(
self.setup_kafka_cmd(instance, env_variables, docker_compose_yml_dir)
)
if with_kerberized_kafka and not self.with_kerberized_kafka:
cmds.append(
self.setup_kerberized_kafka_cmd(
instance, env_variables, docker_compose_yml_dir
)
)
if with_rabbitmq and not self.with_rabbitmq:
cmds.append(
self.setup_rabbitmq_cmd(instance, env_variables, docker_compose_yml_dir)
)
if with_nginx and not self.with_nginx:
cmds.append(
self.setup_nginx_cmd(instance, env_variables, docker_compose_yml_dir)
)
if with_hdfs and not self.with_hdfs:
cmds.append(
self.setup_hdfs_cmd(instance, env_variables, docker_compose_yml_dir)
)
if with_kerberized_hdfs and not self.with_kerberized_hdfs:
cmds.append(
self.setup_kerberized_hdfs_cmd(
instance, env_variables, docker_compose_yml_dir
)
)
if (with_mongo or with_mongo_secure) and not (
self.with_mongo or self.with_mongo_secure
):
if with_mongo_secure:
cmds.append(
self.setup_mongo_secure_cmd(
instance, env_variables, docker_compose_yml_dir
)
)
else:
cmds.append(
self.setup_mongo_cmd(
instance, env_variables, docker_compose_yml_dir
)
)
if self.with_net_trics:
for cmd in cmds:
cmd.extend(
["--file", p.join(docker_compose_yml_dir, "docker_compose_net.yml")]
)
if with_redis and not self.with_redis:
cmds.append(
self.setup_redis_cmd(instance, env_variables, docker_compose_yml_dir)
)
if with_minio and not self.with_minio:
cmds.append(
self.setup_minio_cmd(instance, env_variables, docker_compose_yml_dir)
)
if with_azurite and not self.with_azurite:
cmds.append(
self.setup_azurite_cmd(instance, env_variables, docker_compose_yml_dir)
)
if minio_certs_dir is not None:
if self.minio_certs_dir is None:
self.minio_certs_dir = minio_certs_dir
else:
raise Exception("Overwriting minio certs dir")
if with_cassandra and not self.with_cassandra:
cmds.append(
self.setup_cassandra_cmd(
instance, env_variables, docker_compose_yml_dir
)
)
if with_jdbc_bridge and not self.with_jdbc_bridge:
cmds.append(
self.setup_jdbc_bridge_cmd(
instance, env_variables, docker_compose_yml_dir
)
)
if with_hive:
cmds.append(
self.setup_hive(instance, env_variables, docker_compose_yml_dir)
)
logging.debug(
"Cluster name:{} project_name:{}. Added instance name:{} tag:{} base_cmd:{} docker_compose_yml_dir:{}".format(
self.name,
self.project_name,
name,
tag,
self.base_cmd,
docker_compose_yml_dir,
)
)
return instance
def get_instance_docker_id(self, instance_name):
# According to how docker-compose names containers.
return self.project_name + "_" + instance_name + "_1"
def _replace(self, path, what, to):
with open(path, "r") as p:
data = p.read()
data = data.replace(what, to)
with open(path, "w") as p:
p.write(data)
def restart_instance_with_ip_change(self, node, new_ip):
if "::" in new_ip:
if node.ipv6_address is None:
raise Exception("You should specity ipv6_address in add_node method")
self._replace(node.docker_compose_path, node.ipv6_address, new_ip)
node.ipv6_address = new_ip
else:
if node.ipv4_address is None:
raise Exception("You should specity ipv4_address in add_node method")
self._replace(node.docker_compose_path, node.ipv4_address, new_ip)
node.ipv4_address = new_ip
run_and_check(self.base_cmd + ["stop", node.name])
run_and_check(self.base_cmd + ["rm", "--force", "--stop", node.name])
run_and_check(
self.base_cmd + ["up", "--force-recreate", "--no-deps", "-d", node.name]
)
node.ip_address = self.get_instance_ip(node.name)
node.client = Client(node.ip_address, command=self.client_bin_path)
logging.info("Restart node with ip change")
# In builds with sanitizer the server can take a long time to start
node.wait_for_start(start_timeout=180.0, connection_timeout=600.0) # seconds
res = node.client.query("SELECT 30")
logging.debug(f"Read '{res}'")
assert "30\n" == res
logging.info("Restarted")
return node
def restart_service(self, service_name):
run_and_check(self.base_cmd + ["restart", service_name])
def get_instance_ip(self, instance_name):
logging.debug("get_instance_ip instance_name={}".format(instance_name))
docker_id = self.get_instance_docker_id(instance_name)
# for cont in self.docker_client.containers.list():
# logging.debug("CONTAINERS LIST: ID={} NAME={} STATUS={}".format(cont.id, cont.name, cont.status))
handle = self.docker_client.containers.get(docker_id)
return list(handle.attrs["NetworkSettings"]["Networks"].values())[0][
"IPAddress"
]
def get_container_id(self, instance_name):
return self.get_instance_docker_id(instance_name)
# docker_id = self.get_instance_docker_id(instance_name)
# handle = self.docker_client.containers.get(docker_id)
# return handle.attrs['Id']
def get_container_logs(self, instance_name):
container_id = self.get_container_id(instance_name)
return self.docker_client.api.logs(container_id).decode()
def exec_in_container(
self, container_id, cmd, detach=False, nothrow=False, use_cli=True, **kwargs
):
if use_cli:
logging.debug(
f"run container_id:{container_id} detach:{detach} nothrow:{nothrow} cmd: {cmd}"
)
exec_cmd = ["docker", "exec"]
if "user" in kwargs:
exec_cmd += ["-u", kwargs["user"]]
result = subprocess_check_call(
exec_cmd + [container_id] + cmd, detach=detach, nothrow=nothrow
)
return result
else:
exec_id = self.docker_client.api.exec_create(container_id, cmd, **kwargs)
output = self.docker_client.api.exec_start(exec_id, detach=detach)
exit_code = self.docker_client.api.exec_inspect(exec_id)["ExitCode"]
if exit_code:
container_info = self.docker_client.api.inspect_container(container_id)
image_id = container_info.get("Image")
image_info = self.docker_client.api.inspect_image(image_id)
logging.debug(("Command failed in container {}: ".format(container_id)))
pprint.pprint(container_info)
logging.debug("")
logging.debug(
("Container {} uses image {}: ".format(container_id, image_id))
)
pprint.pprint(image_info)
logging.debug("")
message = 'Cmd "{}" failed in container {}. Return code {}. Output: {}'.format(
" ".join(cmd), container_id, exit_code, output
)
if nothrow:
logging.debug(message)
else:
raise Exception(message)
if not detach:
return output.decode()
return output
def copy_file_to_container(self, container_id, local_path, dest_path):
with open(local_path, "r") as fdata:
data = fdata.read()
encodedBytes = base64.b64encode(data.encode("utf-8"))
encodedStr = str(encodedBytes, "utf-8")
self.exec_in_container(
container_id,
[
"bash",
"-c",
"echo {} | base64 --decode > {}".format(encodedStr, dest_path),
],
user="root",
)
def wait_for_url(
self, url="http://localhost:8123/ping", conn_timeout=2, interval=2, timeout=60
):
if not url.startswith("http"):
url = "http://" + url
if interval <= 0:
interval = 2
if timeout <= 0:
timeout = 60
attempts = 1
errors = []
start = time.time()
while time.time() - start < timeout:
try:
requests.get(
url, allow_redirects=True, timeout=conn_timeout, verify=False
).raise_for_status()
logging.debug(
"{} is available after {} seconds".format(url, time.time() - start)
)
return
except Exception as ex:
logging.debug(
"{} Attempt {} failed, retrying in {} seconds".format(
ex, attempts, interval
)
)
attempts += 1
errors += [str(ex)]
time.sleep(interval)
run_and_check(["docker", "ps", "--all"])
logging.error("Can't connect to URL:{}".format(errors))
raise Exception(
"Cannot wait URL {}(interval={}, timeout={}, attempts={})".format(
url, interval, timeout, attempts
)
)
def wait_mysql_client_to_start(self, timeout=180):
start = time.time()
errors = []
self.mysql_client_container = self.get_docker_handle(
self.get_instance_docker_id(self.mysql_client_host)
)
while time.time() - start < timeout:
try:
info = self.mysql_client_container.client.api.inspect_container(
self.mysql_client_container.name
)
if info["State"]["Health"]["Status"] == "healthy":
logging.debug("Mysql Client Container Started")
return
time.sleep(1)
except Exception as ex:
errors += [str(ex)]
time.sleep(1)
run_and_check(["docker", "ps", "--all"])
logging.error("Can't connect to MySQL Client:{}".format(errors))
raise Exception("Cannot wait MySQL Client container")
def wait_mysql_to_start(self, timeout=180):
self.mysql_ip = self.get_instance_ip("mysql57")
start = time.time()
errors = []
while time.time() - start < timeout:
try:
conn = pymysql.connect(
user=mysql_user,
password=mysql_pass,
host=self.mysql_ip,
port=self.mysql_port,
)
conn.close()
logging.debug("Mysql Started")
return
except Exception as ex:
errors += [str(ex)]
time.sleep(0.5)
run_and_check(["docker-compose", "ps", "--services", "--all"])
logging.error("Can't connect to MySQL:{}".format(errors))
raise Exception("Cannot wait MySQL container")
def wait_mysql8_to_start(self, timeout=180):
self.mysql8_ip = self.get_instance_ip("mysql80")
start = time.time()
while time.time() - start < timeout:
try:
conn = pymysql.connect(
user=mysql8_user,
password=mysql8_pass,
host=self.mysql8_ip,
port=self.mysql8_port,
)
conn.close()
logging.debug("Mysql 8 Started")
return
except Exception as ex:
logging.debug("Can't connect to MySQL 8 " + str(ex))
time.sleep(0.5)
run_and_check(["docker-compose", "ps", "--services", "--all"])
raise Exception("Cannot wait MySQL 8 container")
def wait_mysql_cluster_to_start(self, timeout=180):
self.mysql2_ip = self.get_instance_ip(self.mysql2_host)
self.mysql3_ip = self.get_instance_ip(self.mysql3_host)
self.mysql4_ip = self.get_instance_ip(self.mysql4_host)
start = time.time()
errors = []
while time.time() - start < timeout:
try:
for ip in [self.mysql2_ip, self.mysql3_ip, self.mysql4_ip]:
conn = pymysql.connect(
user=mysql_user,
password=mysql_pass,
host=ip,
port=self.mysql_port,
)
conn.close()
logging.debug(f"Mysql Started {ip}")
return
except Exception as ex:
errors += [str(ex)]
time.sleep(0.5)
run_and_check(["docker-compose", "ps", "--services", "--all"])
logging.error("Can't connect to MySQL:{}".format(errors))
raise Exception("Cannot wait MySQL container")
def wait_postgres_to_start(self, timeout=260):
self.postgres_ip = self.get_instance_ip(self.postgres_host)
start = time.time()
while time.time() - start < timeout:
try:
self.postgres_conn = psycopg2.connect(
host=self.postgres_ip,
port=self.postgres_port,
database=pg_db,
user=pg_user,
password=pg_pass,
)
self.postgres_conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
self.postgres_conn.autocommit = True
logging.debug("Postgres Started")
return
except Exception as ex:
logging.debug("Can't connect to Postgres " + str(ex))
time.sleep(0.5)
raise Exception("Cannot wait Postgres container")
def wait_postgres_cluster_to_start(self, timeout=180):
self.postgres2_ip = self.get_instance_ip(self.postgres2_host)
self.postgres3_ip = self.get_instance_ip(self.postgres3_host)
self.postgres4_ip = self.get_instance_ip(self.postgres4_host)
start = time.time()
while time.time() - start < timeout:
try:
self.postgres2_conn = psycopg2.connect(
host=self.postgres2_ip,
port=self.postgres_port,
database=pg_db,
user=pg_user,
password=pg_pass,
)
self.postgres2_conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
self.postgres2_conn.autocommit = True
logging.debug("Postgres Cluster host 2 started")
break
except Exception as ex:
logging.debug("Can't connect to Postgres host 2" + str(ex))
time.sleep(0.5)
while time.time() - start < timeout:
try:
self.postgres3_conn = psycopg2.connect(
host=self.postgres3_ip,
port=self.postgres_port,
database=pg_db,
user=pg_user,
password=pg_pass,
)
self.postgres3_conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
self.postgres3_conn.autocommit = True
logging.debug("Postgres Cluster host 3 started")
break
except Exception as ex:
logging.debug("Can't connect to Postgres host 3" + str(ex))
time.sleep(0.5)
while time.time() - start < timeout:
try:
self.postgres4_conn = psycopg2.connect(
host=self.postgres4_ip,
port=self.postgres_port,
database=pg_db,
user=pg_user,
password=pg_pass,
)
self.postgres4_conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
self.postgres4_conn.autocommit = True
logging.debug("Postgres Cluster host 4 started")
return
except Exception as ex:
logging.debug("Can't connect to Postgres host 4" + str(ex))
time.sleep(0.5)
raise Exception("Cannot wait Postgres container")
def wait_rabbitmq_to_start(self, timeout=180, throw=True):
self.rabbitmq_ip = self.get_instance_ip(self.rabbitmq_host)
start = time.time()
while time.time() - start < timeout:
try:
if check_rabbitmq_is_available(self.rabbitmq_docker_id):
logging.debug("RabbitMQ is available")
if enable_consistent_hash_plugin(self.rabbitmq_docker_id):
logging.debug("RabbitMQ consistent hash plugin is available")
return True
time.sleep(0.5)
except Exception as ex:
logging.debug("Can't connect to RabbitMQ " + str(ex))
time.sleep(0.5)
if throw:
raise Exception("Cannot wait RabbitMQ container")
return False
def wait_redis_to_start(self, timeout=180, throw=True):
self.redis_ip = self.get_instance_ip(self.redis_host)
start = time.time()
while time.time() - start < timeout:
try:
if check_redis_is_available(self.redis_docker_id):
logging.debug("Redis is available")
return True
time.sleep(0.5)
except Exception as ex:
logging.debug("Can't connect to Redis " + str(ex))
time.sleep(0.5)
if throw:
raise Exception("Cannot wait Redis container")
return False
def wait_nginx_to_start(self, timeout=60):
self.nginx_ip = self.get_instance_ip(self.nginx_host)
start = time.time()
while time.time() - start < timeout:
try:
self.exec_in_container(
self.nginx_id,
["curl", "-X", "PUT", "-d", "Test", "http://test.com/test.txt"],
)
res = self.exec_in_container(
self.nginx_id, ["curl", "-X", "GET", "http://test.com/test.txt"]
)
assert res == "Test"
print("nginx static files server is available")
return
except Exception as ex:
print("Can't connect to nginx: " + str(ex))
time.sleep(0.5)
def wait_zookeeper_secure_to_start(self, timeout=20):
logging.debug("Wait ZooKeeper Secure to start")
start = time.time()
while time.time() - start < timeout:
try:
for instance in ["zoo1", "zoo2", "zoo3"]:
conn = self.get_kazoo_client(instance)
conn.get_children("/")
conn.stop()
logging.debug("All instances of ZooKeeper Secure started")
return
except Exception as ex:
logging.debug("Can't connect to ZooKeeper secure " + str(ex))
time.sleep(0.5)
raise Exception("Cannot wait ZooKeeper secure container")
def wait_zookeeper_to_start(self, timeout=180):
logging.debug("Wait ZooKeeper to start")
start = time.time()
while time.time() - start < timeout:
try:
for instance in ["zoo1", "zoo2", "zoo3"]:
conn = self.get_kazoo_client(instance)
conn.get_children("/")
conn.stop()
logging.debug("All instances of ZooKeeper started")
return
except Exception as ex:
logging.debug("Can't connect to ZooKeeper " + str(ex))
time.sleep(0.5)
raise Exception("Cannot wait ZooKeeper container")
def make_hdfs_api(self, timeout=180, kerberized=False):
if kerberized:
keytab = p.abspath(
p.join(self.instances["node1"].path, "secrets/clickhouse.keytab")
)
krb_conf = p.abspath(
p.join(self.instances["node1"].path, "secrets/krb_long.conf")
)
self.hdfs_kerberized_ip = self.get_instance_ip(self.hdfs_kerberized_host)
kdc_ip = self.get_instance_ip("hdfskerberos")
self.hdfs_api = HDFSApi(
user="root",
timeout=timeout,
kerberized=True,
principal="[email protected]",
keytab=keytab,
krb_conf=krb_conf,
host=self.hdfs_kerberized_host,
protocol="http",
proxy_port=self.hdfs_kerberized_name_port,
data_port=self.hdfs_kerberized_data_port,
hdfs_ip=self.hdfs_kerberized_ip,
kdc_ip=kdc_ip,
)
else:
self.hdfs_ip = self.get_instance_ip(self.hdfs_host)
self.hdfs_api = HDFSApi(
user="root",
host=self.hdfs_host,
data_port=self.hdfs_data_port,
proxy_port=self.hdfs_name_port,
hdfs_ip=self.hdfs_ip,
)
def wait_kafka_is_available(self, kafka_docker_id, kafka_port, max_retries=50):
retries = 0
while True:
if check_kafka_is_available(kafka_docker_id, kafka_port):
break
else:
retries += 1
if retries > max_retries:
raise Exception("Kafka is not available")
logging.debug("Waiting for Kafka to start up")
time.sleep(1)
def wait_hdfs_to_start(self, timeout=300, check_marker=False):
start = time.time()
while time.time() - start < timeout:
try:
self.hdfs_api.write_data("/somefilewithrandomname222", "1")
logging.debug("Connected to HDFS and SafeMode disabled! ")
if check_marker:
self.hdfs_api.read_data("/preparations_done_marker")
return
except Exception as ex:
logging.exception(
"Can't connect to HDFS or preparations are not done yet " + str(ex)
)
time.sleep(1)
raise Exception("Can't wait HDFS to start")
def wait_mongo_to_start(self, timeout=30, secure=False):
connection_str = "mongodb://{user}:{password}@{host}:{port}".format(
host="localhost", port=self.mongo_port, user=mongo_user, password=mongo_pass
)
if secure:
connection_str += "/?tls=true&tlsAllowInvalidCertificates=true"
connection = pymongo.MongoClient(connection_str)
start = time.time()
while time.time() - start < timeout:
try:
connection.list_database_names()
logging.debug(f"Connected to Mongo dbs: {connection.database_names()}")
return
except Exception as ex:
logging.debug("Can't connect to Mongo " + str(ex))
time.sleep(1)
def wait_minio_to_start(self, timeout=180, secure=False):
self.minio_ip = self.get_instance_ip(self.minio_host)
self.minio_redirect_ip = self.get_instance_ip(self.minio_redirect_host)
os.environ["SSL_CERT_FILE"] = p.join(
self.base_dir, self.minio_dir, "certs", "public.crt"
)
minio_client = Minio(
f"{self.minio_ip}:{self.minio_port}",
access_key=minio_access_key,
secret_key=minio_secret_key,
secure=secure,
http_client=urllib3.PoolManager(cert_reqs="CERT_NONE"),
) # disable SSL check as we test ClickHouse and not Python library
start = time.time()
while time.time() - start < timeout:
try:
minio_client.list_buckets()
logging.debug("Connected to Minio.")
buckets = [self.minio_bucket, self.minio_bucket_2]
for bucket in buckets:
if minio_client.bucket_exists(bucket):
delete_object_list = map(
lambda x: x.object_name,
minio_client.list_objects_v2(bucket, recursive=True),
)
errors = minio_client.remove_objects(bucket, delete_object_list)
for error in errors:
logging.error(f"Error occured when deleting object {error}")
minio_client.remove_bucket(bucket)
minio_client.make_bucket(bucket)
logging.debug("S3 bucket '%s' created", bucket)
self.minio_client = minio_client
return
except Exception as ex:
logging.debug("Can't connect to Minio: %s", str(ex))
time.sleep(1)
raise Exception("Can't wait Minio to start")
def wait_azurite_to_start(self, timeout=180):
from azure.storage.blob import BlobServiceClient
connection_string = "DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://127.0.0.1:10000/devstoreaccount1;"
time.sleep(1)
start = time.time()
while time.time() - start < timeout:
try:
blob_service_client = BlobServiceClient.from_connection_string(
connection_string
)
logging.debug(blob_service_client.get_account_information())
self.blob_service_client = blob_service_client
return
except Exception as ex:
logging.debug("Can't connect to Azurite: %s", str(ex))
time.sleep(1)
raise Exception("Can't wait Azurite to start")
def wait_schema_registry_to_start(self, timeout=180):
sr_client = CachedSchemaRegistryClient(
{"url": "http://localhost:{}".format(self.schema_registry_port)}
)
start = time.time()
while time.time() - start < timeout:
try:
sr_client._send_request(sr_client.url)
logging.debug("Connected to SchemaRegistry")
return sr_client
except Exception as ex:
logging.debug(("Can't connect to SchemaRegistry: %s", str(ex)))
time.sleep(1)
raise Exception("Can't wait Schema Registry to start")
def wait_cassandra_to_start(self, timeout=180):
self.cassandra_ip = self.get_instance_ip(self.cassandra_host)
cass_client = cassandra.cluster.Cluster(
[self.cassandra_ip],
port=self.cassandra_port,
load_balancing_policy=RoundRobinPolicy(),
)
start = time.time()
while time.time() - start < timeout:
try:
logging.info(
f"Check Cassandra Online {self.cassandra_id} {self.cassandra_ip} {self.cassandra_port}"
)
check = self.exec_in_container(
self.cassandra_id,
[
"bash",
"-c",
f"/opt/cassandra/bin/cqlsh -u cassandra -p cassandra -e 'describe keyspaces' {self.cassandra_ip} {self.cassandra_port}",
],
user="root",
)
logging.info("Cassandra Online")
cass_client.connect()
logging.info("Connected Clients to Cassandra")
return
except Exception as ex:
logging.warning("Can't connect to Cassandra: %s", str(ex))
time.sleep(1)
raise Exception("Can't wait Cassandra to start")
def start(self, destroy_dirs=True):
pytest_xdist_logging_to_separate_files.setup()
logging.info("Running tests in {}".format(self.base_path))
logging.debug(
"Cluster start called. is_up={}, destroy_dirs={}".format(
self.is_up, destroy_dirs
)
)
if self.is_up:
return
try:
self.cleanup()
except Exception as e:
logging.warning("Cleanup failed:{e}")
try:
# clickhouse_pull_cmd = self.base_cmd + ['pull']
# print(f"Pulling images for {self.base_cmd}")
# retry_exception(10, 5, subprocess_check_call, Exception, clickhouse_pull_cmd)
if destroy_dirs and p.exists(self.instances_dir):
logging.debug(f"Removing instances dir {self.instances_dir}")
shutil.rmtree(self.instances_dir)
for instance in list(self.instances.values()):
logging.debug(
(
"Setup directory for instance: {} destroy_dirs: {}".format(
instance.name, destroy_dirs
)
)
)
instance.create_dir(destroy_dir=destroy_dirs)
_create_env_file(os.path.join(self.env_file), self.env_variables)
self.docker_client = docker.DockerClient(
base_url="unix:///var/run/docker.sock",
version=self.docker_api_version,
timeout=600,
)
common_opts = ["--verbose", "up", "-d"]
images_pull_cmd = self.base_cmd + ["pull"]
# sometimes dockerhub/proxy can be flaky
for i in range(5):
try:
run_and_check(images_pull_cmd)
break
except Exception as ex:
if i == 4:
raise ex
logging.info("Got exception pulling images: %s", ex)
time.sleep(i * 3)
if self.with_zookeeper_secure and self.base_zookeeper_cmd:
logging.debug("Setup ZooKeeper Secure")
logging.debug(
f"Creating internal ZooKeeper dirs: {self.zookeeper_dirs_to_create}"
)
for i in range(1, 3):
if os.path.exists(self.zookeeper_instance_dir_prefix + f"{i}"):
shutil.rmtree(self.zookeeper_instance_dir_prefix + f"{i}")
for dir in self.zookeeper_dirs_to_create:
os.makedirs(dir)
run_and_check(self.base_zookeeper_cmd + common_opts, env=self.env)
self.up_called = True
self.wait_zookeeper_secure_to_start()
for command in self.pre_zookeeper_commands:
self.run_kazoo_commands_with_retries(command, repeats=5)
if self.with_zookeeper and self.base_zookeeper_cmd:
logging.debug("Setup ZooKeeper")
logging.debug(
f"Creating internal ZooKeeper dirs: {self.zookeeper_dirs_to_create}"
)
if self.use_keeper:
for i in range(1, 4):
if os.path.exists(self.keeper_instance_dir_prefix + f"{i}"):
shutil.rmtree(self.keeper_instance_dir_prefix + f"{i}")
else:
for i in range(1, 3):
if os.path.exists(self.zookeeper_instance_dir_prefix + f"{i}"):
shutil.rmtree(self.zookeeper_instance_dir_prefix + f"{i}")
for dir in self.zookeeper_dirs_to_create:
os.makedirs(dir)
if self.use_keeper: # TODO: remove hardcoded paths from here
for i in range(1, 4):
shutil.copy(
os.path.join(HELPERS_DIR, f"keeper_config{i}.xml"),
os.path.join(
self.keeper_instance_dir_prefix + f"{i}", "config"
),
)
run_and_check(self.base_zookeeper_cmd + common_opts, env=self.env)
self.up_called = True
self.wait_zookeeper_to_start()
for command in self.pre_zookeeper_commands:
self.run_kazoo_commands_with_retries(command, repeats=5)
if self.with_mysql_client and self.base_mysql_client_cmd:
logging.debug("Setup MySQL Client")
subprocess_check_call(self.base_mysql_client_cmd + common_opts)
self.wait_mysql_client_to_start()
if self.with_mysql and self.base_mysql_cmd:
logging.debug("Setup MySQL")
if os.path.exists(self.mysql_dir):
shutil.rmtree(self.mysql_dir)
os.makedirs(self.mysql_logs_dir)
os.chmod(self.mysql_logs_dir, stat.S_IRWXU | stat.S_IRWXO)
subprocess_check_call(self.base_mysql_cmd + common_opts)
self.up_called = True
self.wait_mysql_to_start()
if self.with_mysql8 and self.base_mysql8_cmd:
logging.debug("Setup MySQL 8")
if os.path.exists(self.mysql8_dir):
shutil.rmtree(self.mysql8_dir)
os.makedirs(self.mysql8_logs_dir)
os.chmod(self.mysql8_logs_dir, stat.S_IRWXU | stat.S_IRWXO)
subprocess_check_call(self.base_mysql8_cmd + common_opts)
self.wait_mysql8_to_start()
if self.with_mysql_cluster and self.base_mysql_cluster_cmd:
print("Setup MySQL")
if os.path.exists(self.mysql_cluster_dir):
shutil.rmtree(self.mysql_cluster_dir)
os.makedirs(self.mysql_cluster_logs_dir)
os.chmod(self.mysql_cluster_logs_dir, stat.S_IRWXU | stat.S_IRWXO)
subprocess_check_call(self.base_mysql_cluster_cmd + common_opts)
self.up_called = True
self.wait_mysql_cluster_to_start()
if self.with_postgres and self.base_postgres_cmd:
logging.debug("Setup Postgres")
if os.path.exists(self.postgres_dir):
shutil.rmtree(self.postgres_dir)
os.makedirs(self.postgres_logs_dir)
os.chmod(self.postgres_logs_dir, stat.S_IRWXU | stat.S_IRWXO)
subprocess_check_call(self.base_postgres_cmd + common_opts)
self.up_called = True
self.wait_postgres_to_start()
if self.with_postgres_cluster and self.base_postgres_cluster_cmd:
print("Setup Postgres")
os.makedirs(self.postgres2_logs_dir)
os.chmod(self.postgres2_logs_dir, stat.S_IRWXU | stat.S_IRWXO)
os.makedirs(self.postgres3_logs_dir)
os.chmod(self.postgres3_logs_dir, stat.S_IRWXU | stat.S_IRWXO)
os.makedirs(self.postgres4_logs_dir)
os.chmod(self.postgres4_logs_dir, stat.S_IRWXU | stat.S_IRWXO)
subprocess_check_call(self.base_postgres_cluster_cmd + common_opts)
self.up_called = True
self.wait_postgres_cluster_to_start()
if self.with_kafka and self.base_kafka_cmd:
logging.debug("Setup Kafka")
subprocess_check_call(
self.base_kafka_cmd + common_opts + ["--renew-anon-volumes"]
)
self.up_called = True
self.wait_kafka_is_available(self.kafka_docker_id, self.kafka_port)
self.wait_schema_registry_to_start()
if self.with_kerberized_kafka and self.base_kerberized_kafka_cmd:
logging.debug("Setup kerberized kafka")
run_and_check(
self.base_kerberized_kafka_cmd
+ common_opts
+ ["--renew-anon-volumes"]
)
self.up_called = True
self.wait_kafka_is_available(
self.kerberized_kafka_docker_id, self.kerberized_kafka_port, 100
)
if self.with_rabbitmq and self.base_rabbitmq_cmd:
logging.debug("Setup RabbitMQ")
os.makedirs(self.rabbitmq_logs_dir)
os.chmod(self.rabbitmq_logs_dir, stat.S_IRWXU | stat.S_IRWXO)
for i in range(5):
subprocess_check_call(
self.base_rabbitmq_cmd + common_opts + ["--renew-anon-volumes"]
)
self.up_called = True
self.rabbitmq_docker_id = self.get_instance_docker_id("rabbitmq1")
logging.debug(f"RabbitMQ checking container try: {i}")
if self.wait_rabbitmq_to_start(throw=(i == 4)):
break
if self.with_hdfs and self.base_hdfs_cmd:
logging.debug("Setup HDFS")
os.makedirs(self.hdfs_logs_dir)
os.chmod(self.hdfs_logs_dir, stat.S_IRWXU | stat.S_IRWXO)
subprocess_check_call(self.base_hdfs_cmd + common_opts)
self.up_called = True
self.make_hdfs_api()
self.wait_hdfs_to_start()
if self.with_kerberized_hdfs and self.base_kerberized_hdfs_cmd:
logging.debug("Setup kerberized HDFS")
os.makedirs(self.hdfs_kerberized_logs_dir)
os.chmod(self.hdfs_kerberized_logs_dir, stat.S_IRWXU | stat.S_IRWXO)
run_and_check(self.base_kerberized_hdfs_cmd + common_opts)
self.up_called = True
self.make_hdfs_api(kerberized=True)
self.wait_hdfs_to_start(check_marker=True)
if self.with_nginx and self.base_nginx_cmd:
logging.debug("Setup nginx")
subprocess_check_call(
self.base_nginx_cmd + common_opts + ["--renew-anon-volumes"]
)
self.up_called = True
self.nginx_docker_id = self.get_instance_docker_id("nginx")
self.wait_nginx_to_start()
if self.with_mongo and self.base_mongo_cmd:
logging.debug("Setup Mongo")
run_and_check(self.base_mongo_cmd + common_opts)
self.up_called = True
self.wait_mongo_to_start(30, secure=self.with_mongo_secure)
if self.with_redis and self.base_redis_cmd:
logging.debug("Setup Redis")
subprocess_check_call(self.base_redis_cmd + common_opts)
self.up_called = True
for i in range(5):
subprocess_check_call(self.base_redis_cmd + common_opts)
self.up_called = True
self.redis_docker_id = self.get_instance_docker_id(self.redis_host)
logging.debug(f"Redis checking container try: {i}")
if self.wait_redis_to_start(throw=(i == 4)):
break
if self.with_hive and self.base_hive_cmd:
logging.debug("Setup hive")
subprocess_check_call(self.base_hive_cmd + common_opts)
self.up_called = True
time.sleep(300)
if self.with_minio and self.base_minio_cmd:
# Copy minio certificates to minio/certs
os.mkdir(self.minio_dir)
if self.minio_certs_dir is None:
os.mkdir(os.path.join(self.minio_dir, "certs"))
else:
shutil.copytree(
os.path.join(self.base_dir, self.minio_certs_dir),
os.path.join(self.minio_dir, "certs"),
)
minio_start_cmd = self.base_minio_cmd + common_opts
logging.info(
"Trying to create Minio instance by command %s",
" ".join(map(str, minio_start_cmd)),
)
run_and_check(minio_start_cmd)
self.up_called = True
logging.info("Trying to connect to Minio...")
self.wait_minio_to_start(secure=self.minio_certs_dir is not None)
if self.with_azurite and self.base_azurite_cmd:
azurite_start_cmd = self.base_azurite_cmd + common_opts
logging.info(
"Trying to create Azurite instance by command %s",
" ".join(map(str, azurite_start_cmd)),
)
run_and_check(azurite_start_cmd)
self.up_called = True
logging.info("Trying to connect to Azurite")
self.wait_azurite_to_start()
if self.with_cassandra and self.base_cassandra_cmd:
subprocess_check_call(self.base_cassandra_cmd + ["up", "-d"])
self.up_called = True
self.wait_cassandra_to_start()
if self.with_jdbc_bridge and self.base_jdbc_bridge_cmd:
os.makedirs(self.jdbc_driver_logs_dir)
os.chmod(self.jdbc_driver_logs_dir, stat.S_IRWXU | stat.S_IRWXO)
subprocess_check_call(self.base_jdbc_bridge_cmd + ["up", "-d"])
self.up_called = True
self.jdbc_bridge_ip = self.get_instance_ip(self.jdbc_bridge_host)
self.wait_for_url(
f"http://{self.jdbc_bridge_ip}:{self.jdbc_bridge_port}/ping"
)
clickhouse_start_cmd = self.base_cmd + ["up", "-d", "--no-recreate"]
logging.debug(
(
"Trying to create ClickHouse instance by command %s",
" ".join(map(str, clickhouse_start_cmd)),
)
)
self.up_called = True
run_and_check(clickhouse_start_cmd)
logging.debug("ClickHouse instance created")
start_timeout = 300.0 # seconds
for instance in self.instances.values():
instance.docker_client = self.docker_client
instance.ip_address = self.get_instance_ip(instance.name)
logging.debug(
f"Waiting for ClickHouse start in {instance.name}, ip: {instance.ip_address}..."
)
instance.wait_for_start(start_timeout)
logging.debug(f"ClickHouse {instance.name} started")
instance.client = Client(
instance.ip_address, command=self.client_bin_path
)
self.is_up = True
except BaseException as e:
logging.debug("Failed to start cluster: ")
logging.debug(str(e))
logging.debug(traceback.print_exc())
self.shutdown()
raise
def shutdown(self, kill=True, ignore_fatal=True):
sanitizer_assert_instance = None
fatal_log = None
if self.up_called:
with open(self.docker_logs_path, "w+") as f:
try:
subprocess.check_call( # STYLE_CHECK_ALLOW_SUBPROCESS_CHECK_CALL
self.base_cmd + ["logs"], stdout=f
)
except Exception as e:
logging.debug("Unable to get logs from docker.")
f.seek(0)
for line in f:
if SANITIZER_SIGN in line:
sanitizer_assert_instance = line.split("|")[0].strip()
break
if kill:
try:
run_and_check(self.base_cmd + ["stop", "--timeout", "20"])
except Exception as e:
logging.debug(
"Kill command failed during shutdown. {}".format(repr(e))
)
logging.debug("Trying to kill forcefully")
run_and_check(self.base_cmd + ["kill"])
# Check server logs for Fatal messages and sanitizer failures.
# NOTE: we cannot do this via docker since in case of Fatal message container may already die.
for name, instance in self.instances.items():
if instance.contains_in_log(SANITIZER_SIGN, from_host=True):
sanitizer_assert_instance = instance.grep_in_log(
SANITIZER_SIGN, from_host=True, filename="stderr.log"
)
logging.error(
"Sanitizer in instance %s log %s",
name,
sanitizer_assert_instance,
)
if not ignore_fatal and instance.contains_in_log(
"Fatal", from_host=True
):
fatal_log = instance.grep_in_log("Fatal", from_host=True)
if "Child process was terminated by signal 9 (KILL)" in fatal_log:
fatal_log = None
continue
logging.error("Crash in instance %s fatal log %s", name, fatal_log)
try:
subprocess_check_call(self.base_cmd + ["down", "--volumes"])
except Exception as e:
logging.debug(
"Down + remove orphans failed during shutdown. {}".format(repr(e))
)
else:
logging.warning(
"docker-compose up was not called. Trying to export docker.log for running containers"
)
self.cleanup()
self.is_up = False
self.docker_client = None
for instance in list(self.instances.values()):
instance.docker_client = None
instance.ip_address = None
instance.client = None
if sanitizer_assert_instance is not None:
raise Exception(
"Sanitizer assert found in {} for instance {}".format(
self.docker_logs_path, sanitizer_assert_instance
)
)
if fatal_log is not None:
raise Exception("Fatal messages found: {}".format(fatal_log))
def pause_container(self, instance_name):
subprocess_check_call(self.base_cmd + ["pause", instance_name])
# subprocess_check_call(self.base_cmd + ['kill', '-s SIGSTOP', instance_name])
def unpause_container(self, instance_name):
subprocess_check_call(self.base_cmd + ["unpause", instance_name])
# subprocess_check_call(self.base_cmd + ['kill', '-s SIGCONT', instance_name])
def open_bash_shell(self, instance_name):
os.system(" ".join(self.base_cmd + ["exec", instance_name, "/bin/bash"]))
def get_kazoo_client(self, zoo_instance_name):
use_ssl = False
if self.with_zookeeper_secure:
port = self.zookeeper_secure_port
use_ssl = True
elif self.with_zookeeper:
port = self.zookeeper_port
else:
raise Exception("Cluster has no ZooKeeper")
ip = self.get_instance_ip(zoo_instance_name)
logging.debug(
f"get_kazoo_client: {zoo_instance_name}, ip:{ip}, port:{port}, use_ssl:{use_ssl}"
)
zk = KazooClient(
hosts=f"{ip}:{port}",
use_ssl=use_ssl,
verify_certs=False,
certfile=self.zookeeper_certfile,
keyfile=self.zookeeper_keyfile,
)
zk.start()
return zk
def run_kazoo_commands_with_retries(
self, kazoo_callback, zoo_instance_name="zoo1", repeats=1, sleep_for=1
):
zk = self.get_kazoo_client(zoo_instance_name)
logging.debug(
f"run_kazoo_commands_with_retries: {zoo_instance_name}, {kazoo_callback}"
)
for i in range(repeats - 1):
try:
kazoo_callback(zk)
return
except KazooException as e:
logging.debug(repr(e))
time.sleep(sleep_for)
kazoo_callback(zk)
zk.stop()
def add_zookeeper_startup_command(self, command):
self.pre_zookeeper_commands.append(command)
def stop_zookeeper_nodes(self, zk_nodes):
for n in zk_nodes:
logging.info("Stopping zookeeper node: %s", n)
subprocess_check_call(self.base_zookeeper_cmd + ["stop", n])
def start_zookeeper_nodes(self, zk_nodes):
for n in zk_nodes:
logging.info("Starting zookeeper node: %s", n)
subprocess_check_call(self.base_zookeeper_cmd + ["start", n])
CLICKHOUSE_START_COMMAND = (
"clickhouse server --config-file=/etc/clickhouse-server/{main_config_file}"
" --log-file=/var/log/clickhouse-server/clickhouse-server.log "
" --errorlog-file=/var/log/clickhouse-server/clickhouse-server.err.log"
)
CLICKHOUSE_STAY_ALIVE_COMMAND = "bash -c \"trap 'pkill tail' INT TERM; {} --daemon; coproc tail -f /dev/null; wait $$!\"".format(
CLICKHOUSE_START_COMMAND
)
# /run/xtables.lock passed inside for correct iptables --wait
DOCKER_COMPOSE_TEMPLATE = """
version: '2.3'
services:
{name}:
image: {image}:{tag}
hostname: {hostname}
volumes:
- {instance_config_dir}:/etc/clickhouse-server/
- {db_dir}:/var/lib/clickhouse/
- {logs_dir}:/var/log/clickhouse-server/
- /etc/passwd:/etc/passwd:ro
- /run/xtables.lock:/run/xtables.lock:ro
{binary_volume}
{odbc_bridge_volume}
{library_bridge_volume}
{external_dirs_volumes}
{odbc_ini_path}
{keytab_path}
{krb5_conf}
entrypoint: {entrypoint_cmd}
tmpfs: {tmpfs}
cap_add:
- SYS_PTRACE
- NET_ADMIN
- IPC_LOCK
- SYS_NICE
depends_on: {depends_on}
user: '{user}'
env_file:
- {env_file}
security_opt:
- label:disable
dns_opt:
- attempts:2
- timeout:1
- inet6
- rotate
{networks}
{app_net}
{ipv4_address}
{ipv6_address}
{net_aliases}
{net_alias1}
"""
class ClickHouseInstance:
def __init__(
self,
cluster,
base_path,
name,
base_config_dir,
custom_main_configs,
custom_user_configs,
custom_dictionaries,
macros,
with_zookeeper,
zookeeper_config_path,
with_mysql_client,
with_mysql,
with_mysql8,
with_mysql_cluster,
with_kafka,
with_kerberized_kafka,
with_rabbitmq,
with_nginx,
with_kerberized_hdfs,
with_mongo,
with_redis,
with_minio,
with_azurite,
with_jdbc_bridge,
with_hive,
with_cassandra,
server_bin_path,
odbc_bridge_bin_path,
library_bridge_bin_path,
clickhouse_path_dir,
with_odbc_drivers,
with_postgres,
with_postgres_cluster,
clickhouse_start_command=CLICKHOUSE_START_COMMAND,
main_config_name="config.xml",
users_config_name="users.xml",
copy_common_configs=True,
hostname=None,
env_variables=None,
image="clickhouse/integration-test",
tag="latest",
stay_alive=False,
ipv4_address=None,
ipv6_address=None,
with_installed_binary=False,
external_dirs=None,
tmpfs=None,
config_root_name="clickhouse",
extra_configs=[],
):
self.name = name
self.base_cmd = cluster.base_cmd
self.docker_id = cluster.get_instance_docker_id(self.name)
self.cluster = cluster
self.hostname = hostname if hostname is not None else self.name
self.external_dirs = external_dirs
self.tmpfs = tmpfs or []
self.base_config_dir = (
p.abspath(p.join(base_path, base_config_dir)) if base_config_dir else None
)
self.custom_main_config_paths = [
p.abspath(p.join(base_path, c)) for c in custom_main_configs
]
self.custom_user_config_paths = [
p.abspath(p.join(base_path, c)) for c in custom_user_configs
]
self.custom_dictionaries_paths = [
p.abspath(p.join(base_path, c)) for c in custom_dictionaries
]
self.custom_extra_config_paths = [
p.abspath(p.join(base_path, c)) for c in extra_configs
]
self.clickhouse_path_dir = (
p.abspath(p.join(base_path, clickhouse_path_dir))
if clickhouse_path_dir
else None
)
self.kerberos_secrets_dir = p.abspath(p.join(base_path, "secrets"))
self.macros = macros if macros is not None else {}
self.with_zookeeper = with_zookeeper
self.zookeeper_config_path = zookeeper_config_path
self.server_bin_path = server_bin_path
self.odbc_bridge_bin_path = odbc_bridge_bin_path
self.library_bridge_bin_path = library_bridge_bin_path
self.with_mysql_client = with_mysql_client
self.with_mysql = with_mysql
self.with_mysql8 = with_mysql8
self.with_mysql_cluster = with_mysql_cluster
self.with_postgres = with_postgres
self.with_postgres_cluster = with_postgres_cluster
self.with_kafka = with_kafka
self.with_kerberized_kafka = with_kerberized_kafka
self.with_rabbitmq = with_rabbitmq
self.with_nginx = with_nginx
self.with_kerberized_hdfs = with_kerberized_hdfs
self.with_mongo = with_mongo
self.with_redis = with_redis
self.with_minio = with_minio
self.with_azurite = with_azurite
self.with_cassandra = with_cassandra
self.with_jdbc_bridge = with_jdbc_bridge
self.with_hive = with_hive
self.main_config_name = main_config_name
self.users_config_name = users_config_name
self.copy_common_configs = copy_common_configs
self.clickhouse_start_command = clickhouse_start_command.replace(
"{main_config_file}", self.main_config_name
)
self.path = p.join(self.cluster.instances_dir, name)
self.docker_compose_path = p.join(self.path, "docker-compose.yml")
self.env_variables = env_variables or {}
self.env_file = self.cluster.env_file
if with_odbc_drivers:
self.odbc_ini_path = self.path + "/odbc.ini:/etc/odbc.ini"
self.with_mysql = True
else:
self.odbc_ini_path = ""
if with_kerberized_kafka or with_kerberized_hdfs:
self.keytab_path = (
"- "
+ os.path.dirname(self.docker_compose_path)
+ "/secrets:/tmp/keytab"
)
self.krb5_conf = (
"- "
+ os.path.dirname(self.docker_compose_path)
+ "/secrets/krb.conf:/etc/krb5.conf:ro"
)
else:
self.keytab_path = ""
self.krb5_conf = ""
self.docker_client = None
self.ip_address = None
self.client = None
self.image = image
self.tag = tag
self.stay_alive = stay_alive
self.ipv4_address = ipv4_address
self.ipv6_address = ipv6_address
self.with_installed_binary = with_installed_binary
self.is_up = False
self.config_root_name = config_root_name
def is_built_with_sanitizer(self, sanitizer_name=""):
build_opts = self.query(
"SELECT value FROM system.build_options WHERE name = 'CXX_FLAGS'"
)
return "-fsanitize={}".format(sanitizer_name) in build_opts
def is_debug_build(self):
build_opts = self.query(
"SELECT value FROM system.build_options WHERE name = 'CXX_FLAGS'"
)
return "NDEBUG" not in build_opts
def is_built_with_thread_sanitizer(self):
return self.is_built_with_sanitizer("thread")
def is_built_with_address_sanitizer(self):
return self.is_built_with_sanitizer("address")
def is_built_with_memory_sanitizer(self):
return self.is_built_with_sanitizer("memory")
# Connects to the instance via clickhouse-client, sends a query (1st argument) and returns the answer
def query(
self,
sql,
stdin=None,
timeout=None,
settings=None,
user=None,
password=None,
database=None,
ignore_error=False,
query_id=None,
):
logging.debug("Executing query %s on %s", sql, self.name)
return self.client.query(
sql,
stdin=stdin,
timeout=timeout,
settings=settings,
user=user,
password=password,
database=database,
ignore_error=ignore_error,
query_id=query_id,
)
def query_with_retry(
self,
sql,
stdin=None,
timeout=None,
settings=None,
user=None,
password=None,
database=None,
ignore_error=False,
retry_count=20,
sleep_time=0.5,
check_callback=lambda x: True,
):
logging.debug(f"Executing query {sql} on {self.name}")
result = None
for i in range(retry_count):
try:
result = self.query(
sql,
stdin=stdin,
timeout=timeout,
settings=settings,
user=user,
password=password,
database=database,
ignore_error=ignore_error,
)
if check_callback(result):
return result
time.sleep(sleep_time)
except Exception as ex:
logging.debug("Retry {} got exception {}".format(i + 1, ex))
time.sleep(sleep_time)
if result is not None:
return result
raise Exception("Can't execute query {}".format(sql))
# As query() but doesn't wait response and returns response handler
def get_query_request(self, sql, *args, **kwargs):
logging.debug(f"Executing query {sql} on {self.name}")
return self.client.get_query_request(sql, *args, **kwargs)
# Connects to the instance via clickhouse-client, sends a query (1st argument), expects an error and return its code
def query_and_get_error(
self,
sql,
stdin=None,
timeout=None,
settings=None,
user=None,
password=None,
database=None,
):
logging.debug(f"Executing query {sql} on {self.name}")
return self.client.query_and_get_error(
sql,
stdin=stdin,
timeout=timeout,
settings=settings,
user=user,
password=password,
database=database,
)
# The same as query_and_get_error but ignores successful query.
def query_and_get_answer_with_error(
self,
sql,
stdin=None,
timeout=None,
settings=None,
user=None,
password=None,
database=None,
):
logging.debug(f"Executing query {sql} on {self.name}")
return self.client.query_and_get_answer_with_error(
sql,
stdin=stdin,
timeout=timeout,
settings=settings,
user=user,
password=password,
database=database,
)
# Connects to the instance via HTTP interface, sends a query and returns the answer
def http_query(
self,
sql,
data=None,
params=None,
user=None,
password=None,
expect_fail_and_get_error=False,
port=8123,
timeout=None,
retry_strategy=None,
):
logging.debug(f"Executing query {sql} on {self.name} via HTTP interface")
if params is None:
params = {}
else:
params = params.copy()
if sql is not None:
params["query"] = sql
auth = None
if user and password:
auth = requests.auth.HTTPBasicAuth(user, password)
elif user:
auth = requests.auth.HTTPBasicAuth(user, "")
url = f"http://{self.ip_address}:{port}/?" + urllib.parse.urlencode(params)
if retry_strategy is None:
requester = requests
else:
adapter = requests.adapters.HTTPAdapter(max_retries=retry_strategy)
requester = requests.Session()
requester.mount("https://", adapter)
requester.mount("http://", adapter)
if data:
r = requester.post(url, data, auth=auth, timeout=timeout)
else:
r = requester.get(url, auth=auth, timeout=timeout)
def http_code_and_message():
code = r.status_code
return str(code) + " " + http.client.responses[code] + ": " + r.text
if expect_fail_and_get_error:
if r.ok:
raise Exception(
"ClickHouse HTTP server is expected to fail, but succeeded: "
+ r.text
)
return http_code_and_message()
else:
if not r.ok:
raise Exception(
"ClickHouse HTTP server returned " + http_code_and_message()
)
return r.text
# Connects to the instance via HTTP interface, sends a query and returns the answer
def http_request(self, url, method="GET", params=None, data=None, headers=None):
logging.debug(f"Sending HTTP request {url} to {self.name}")
url = "http://" + self.ip_address + ":8123/" + url
return requests.request(
method=method, url=url, params=params, data=data, headers=headers
)
# Connects to the instance via HTTP interface, sends a query, expects an error and return the error message
def http_query_and_get_error(
self, sql, data=None, params=None, user=None, password=None
):
logging.debug(f"Executing query {sql} on {self.name} via HTTP interface")
return self.http_query(
sql=sql,
data=data,
params=params,
user=user,
password=password,
expect_fail_and_get_error=True,
)
def stop_clickhouse(self, stop_wait_sec=30, kill=False):
if not self.stay_alive:
raise Exception(
"clickhouse can be stopped only with stay_alive=True instance"
)
try:
ps_clickhouse = self.exec_in_container(
["bash", "-c", "ps -C clickhouse"], nothrow=True, user="root"
)
if ps_clickhouse == " PID TTY STAT TIME COMMAND":
logging.warning("ClickHouse process already stopped")
return
self.exec_in_container(
["bash", "-c", "pkill {} clickhouse".format("-9" if kill else "")],
user="root",
)
start_time = time.time()
stopped = False
while time.time() <= start_time + stop_wait_sec:
pid = self.get_process_pid("clickhouse")
if pid is None:
stopped = True
break
else:
time.sleep(1)
if not stopped:
pid = self.get_process_pid("clickhouse")
if pid is not None:
logging.warning(
f"Force kill clickhouse in stop_clickhouse. ps:{pid}"
)
self.exec_in_container(
[
"bash",
"-c",
f"gdb -batch -ex 'thread apply all bt full' -p {pid} > {os.path.join(self.path, 'logs/stdout.log')}",
],
user="root",
)
self.stop_clickhouse(kill=True)
else:
ps_all = self.exec_in_container(
["bash", "-c", "ps aux"], nothrow=True, user="root"
)
logging.warning(
f"We want force stop clickhouse, but no clickhouse-server is running\n{ps_all}"
)
return
except Exception as e:
logging.warning(f"Stop ClickHouse raised an error {e}")
def start_clickhouse(self, start_wait_sec=60):
if not self.stay_alive:
raise Exception(
"ClickHouse can be started again only with stay_alive=True instance"
)
start_time = time.time()
time_to_sleep = 0.5
while start_time + start_wait_sec >= time.time():
# sometimes after SIGKILL (hard reset) server may refuse to start for some time
# for different reasons.
pid = self.get_process_pid("clickhouse")
if pid is None:
logging.debug("No clickhouse process running. Start new one.")
self.exec_in_container(
["bash", "-c", "{} --daemon".format(self.clickhouse_start_command)],
user=str(os.getuid()),
)
time.sleep(1)
continue
else:
logging.debug("Clickhouse process running.")
try:
self.wait_start(start_wait_sec + start_time - time.time())
return
except Exception as e:
logging.warning(
f"Current start attempt failed. Will kill {pid} just in case."
)
self.exec_in_container(
["bash", "-c", f"kill -9 {pid}"], user="root", nothrow=True
)
time.sleep(time_to_sleep)
raise Exception("Cannot start ClickHouse, see additional info in logs")
def wait_start(self, start_wait_sec):
start_time = time.time()
last_err = None
while True:
try:
pid = self.get_process_pid("clickhouse")
if pid is None:
raise Exception("ClickHouse server is not running. Check logs.")
exec_query_with_retry(self, "select 20", retry_count=10, silent=True)
return
except QueryRuntimeException as err:
last_err = err
pid = self.get_process_pid("clickhouse")
if pid is not None:
logging.warning(f"ERROR {err}")
else:
raise Exception("ClickHouse server is not running. Check logs.")
if time.time() > start_time + start_wait_sec:
break
logging.error(
f"No time left to start. But process is still running. Will dump threads."
)
ps_clickhouse = self.exec_in_container(
["bash", "-c", "ps -C clickhouse"], nothrow=True, user="root"
)
logging.info(f"PS RESULT:\n{ps_clickhouse}")
pid = self.get_process_pid("clickhouse")
if pid is not None:
self.exec_in_container(
["bash", "-c", f"gdb -batch -ex 'thread apply all bt full' -p {pid}"],
user="root",
)
if last_err is not None:
raise last_err
def restart_clickhouse(self, stop_start_wait_sec=60, kill=False):
self.stop_clickhouse(stop_start_wait_sec, kill)
self.start_clickhouse(stop_start_wait_sec)
def exec_in_container(self, cmd, detach=False, nothrow=False, **kwargs):
return self.cluster.exec_in_container(
self.docker_id, cmd, detach, nothrow, **kwargs
)
def rotate_logs(self):
self.exec_in_container(
["bash", "-c", f"kill -HUP {self.get_process_pid('clickhouse server')}"],
user="root",
)
def contains_in_log(
self, substring, from_host=False, filename="clickhouse-server.log"
):
if from_host:
# We check fist file exists but want to look for all rotated logs as well
result = subprocess_check_call(
[
"bash",
"-c",
f'[ -f {self.logs_dir}/{filename} ] && zgrep -aH "{substring}" {self.logs_dir}/{filename}* || true',
]
)
else:
result = self.exec_in_container(
[
"bash",
"-c",
f'[ -f /var/log/clickhouse-server/{filename} ] && zgrep -aH "{substring}" /var/log/clickhouse-server/{filename} || true',
]
)
return len(result) > 0
def grep_in_log(self, substring, from_host=False, filename="clickhouse-server.log"):
logging.debug(f"grep in log called %s", substring)
if from_host:
# We check fist file exists but want to look for all rotated logs as well
result = subprocess_check_call(
[
"bash",
"-c",
f'[ -f {self.logs_dir}/{filename} ] && zgrep -a "{substring}" {self.logs_dir}/{filename}* || true',
]
)
else:
result = self.exec_in_container(
[
"bash",
"-c",
f'[ -f /var/log/clickhouse-server/{filename} ] && zgrep -a "{substring}" /var/log/clickhouse-server/{filename}* || true',
]
)
logging.debug("grep result %s", result)
return result
def count_in_log(self, substring):
result = self.exec_in_container(
[
"bash",
"-c",
'grep -a "{}" /var/log/clickhouse-server/clickhouse-server.log | wc -l'.format(
substring
),
]
)
return result
def wait_for_log_line(
self,
regexp,
filename="/var/log/clickhouse-server/clickhouse-server.log",
timeout=30,
repetitions=1,
look_behind_lines=100,
):
start_time = time.time()
result = self.exec_in_container(
[
"bash",
"-c",
'timeout {} tail -Fn{} "{}" | grep -Em {} {}'.format(
timeout,
look_behind_lines,
filename,
repetitions,
shlex.quote(regexp),
),
]
)
# if repetitions>1 grep will return success even if not enough lines were collected,
if repetitions > 1 and len(result.splitlines()) < repetitions:
logging.debug(
"wait_for_log_line: those lines were found during {} seconds:".format(
timeout
)
)
logging.debug(result)
raise Exception(
"wait_for_log_line: Not enough repetitions: {} found, while {} expected".format(
len(result.splitlines()), repetitions
)
)
wait_duration = time.time() - start_time
logging.debug(
'{} log line(s) matching "{}" appeared in a {:.3f} seconds'.format(
repetitions, regexp, wait_duration
)
)
return wait_duration
def path_exists(self, path):
return (
self.exec_in_container(
[
"bash",
"-c",
"echo $(if [ -e '{}' ]; then echo 'yes'; else echo 'no'; fi)".format(
path
),
]
)
== "yes\n"
)
def copy_file_to_container(self, local_path, dest_path):
return self.cluster.copy_file_to_container(
self.docker_id, local_path, dest_path
)
def get_process_pid(self, process_name):
output = self.exec_in_container(
[
"bash",
"-c",
"ps ax | grep '{}' | grep -v 'grep' | grep -v 'coproc' | grep -v 'bash -c' | awk '{{print $1}}'".format(
process_name
),
]
)
if output:
try:
pid = int(output.split("\n")[0].strip())
return pid
except:
return None
return None
def restart_with_original_version(
self, stop_start_wait_sec=300, callback_onstop=None, signal=15
):
begin_time = time.time()
if not self.stay_alive:
raise Exception("Cannot restart not stay alive container")
self.exec_in_container(
["bash", "-c", "pkill -{} clickhouse".format(signal)], user="root"
)
retries = int(stop_start_wait_sec / 0.5)
local_counter = 0
# wait stop
while local_counter < retries:
if not self.get_process_pid("clickhouse server"):
break
time.sleep(0.5)
local_counter += 1
# force kill if server hangs
if self.get_process_pid("clickhouse server"):
# server can die before kill, so don't throw exception, it's expected
self.exec_in_container(
["bash", "-c", "pkill -{} clickhouse".format(9)],
nothrow=True,
user="root",
)
if callback_onstop:
callback_onstop(self)
self.exec_in_container(
[
"bash",
"-c",
"echo 'restart_with_original_version: From version' && /usr/bin/clickhouse server --version && echo 'To version' && /usr/share/clickhouse_original server --version",
]
)
self.exec_in_container(
[
"bash",
"-c",
"cp /usr/share/clickhouse_original /usr/bin/clickhouse && chmod 777 /usr/bin/clickhouse",
],
user="root",
)
self.exec_in_container(
[
"bash",
"-c",
"cp /usr/share/clickhouse-odbc-bridge_fresh /usr/bin/clickhouse-odbc-bridge && chmod 777 /usr/bin/clickhouse",
],
user="root",
)
self.exec_in_container(
["bash", "-c", "{} --daemon".format(self.clickhouse_start_command)],
user=str(os.getuid()),
)
# wait start
time_left = begin_time + stop_start_wait_sec - time.time()
if time_left <= 0:
raise Exception(f"No time left during restart")
else:
self.wait_start(time_left)
def restart_with_latest_version(
self, stop_start_wait_sec=300, callback_onstop=None, signal=15
):
begin_time = time.time()
if not self.stay_alive:
raise Exception("Cannot restart not stay alive container")
self.exec_in_container(
["bash", "-c", "pkill -{} clickhouse".format(signal)], user="root"
)
retries = int(stop_start_wait_sec / 0.5)
local_counter = 0
# wait stop
while local_counter < retries:
if not self.get_process_pid("clickhouse server"):
break
time.sleep(0.5)
local_counter += 1
# force kill if server hangs
if self.get_process_pid("clickhouse server"):
# server can die before kill, so don't throw exception, it's expected
self.exec_in_container(
["bash", "-c", "pkill -{} clickhouse".format(9)],
nothrow=True,
user="root",
)
if callback_onstop:
callback_onstop(self)
self.exec_in_container(
["bash", "-c", "cp /usr/bin/clickhouse /usr/share/clickhouse_original"],
user="root",
)
self.exec_in_container(
[
"bash",
"-c",
"cp /usr/share/clickhouse_fresh /usr/bin/clickhouse && chmod 777 /usr/bin/clickhouse",
],
user="root",
)
self.exec_in_container(
[
"bash",
"-c",
"echo 'restart_with_latest_version: From version' && /usr/share/clickhouse_original server --version && echo 'To version' /usr/share/clickhouse_fresh server --version",
]
)
self.exec_in_container(
[
"bash",
"-c",
"cp /usr/share/clickhouse-odbc-bridge_fresh /usr/bin/clickhouse-odbc-bridge && chmod 777 /usr/bin/clickhouse",
],
user="root",
)
self.exec_in_container(
["bash", "-c", "{} --daemon".format(self.clickhouse_start_command)],
user=str(os.getuid()),
)
# wait start
time_left = begin_time + stop_start_wait_sec - time.time()
if time_left <= 0:
raise Exception(f"No time left during restart")
else:
self.wait_start(time_left)
def get_docker_handle(self):
return self.cluster.get_docker_handle(self.docker_id)
def stop(self):
self.get_docker_handle().stop()
def start(self):
self.get_docker_handle().start()
def wait_for_start(self, start_timeout=None, connection_timeout=None):
handle = self.get_docker_handle()
if start_timeout is None or start_timeout <= 0:
raise Exception("Invalid timeout: {}".format(start_timeout))
if connection_timeout is not None and connection_timeout < start_timeout:
raise Exception(
"Connection timeout {} should be grater then start timeout {}".format(
connection_timeout, start_timeout
)
)
start_time = time.time()
prev_rows_in_log = 0
def has_new_rows_in_log():
nonlocal prev_rows_in_log
try:
rows_in_log = int(self.count_in_log(".*").strip())
res = rows_in_log > prev_rows_in_log
prev_rows_in_log = rows_in_log
return res
except ValueError:
return False
while True:
handle.reload()
status = handle.status
if status == "exited":
raise Exception(
f"Instance `{self.name}' failed to start. Container status: {status}, logs: {handle.logs().decode('utf-8')}"
)
deadline = start_time + start_timeout
# It is possible that server starts slowly.
# If container is running, and there is some progress in log, check connection_timeout.
if connection_timeout and status == "running" and has_new_rows_in_log():
deadline = start_time + connection_timeout
current_time = time.time()
if current_time >= deadline:
raise Exception(
f"Timed out while waiting for instance `{self.name}' with ip address {self.ip_address} to start. "
f"Container status: {status}, logs: {handle.logs().decode('utf-8')}"
)
socket_timeout = min(start_timeout, deadline - current_time)
# Repeatedly poll the instance address until there is something that listens there.
# Usually it means that ClickHouse is ready to accept queries.
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(socket_timeout)
sock.connect((self.ip_address, 9000))
self.is_up = True
return
except socket.timeout:
continue
except socket.error as e:
if (
e.errno == errno.ECONNREFUSED
or e.errno == errno.EHOSTUNREACH
or e.errno == errno.ENETUNREACH
):
time.sleep(0.1)
else:
raise
finally:
sock.close()
def dict_to_xml(self, dictionary):
xml_str = dict2xml(
dictionary, wrap=self.config_root_name, indent=" ", newlines=True
)
return xml_str
@property
def odbc_drivers(self):
if self.odbc_ini_path:
return {
"SQLite3": {
"DSN": "sqlite3_odbc",
"Database": "/tmp/sqliteodbc",
"Driver": "/usr/lib/x86_64-linux-gnu/odbc/libsqlite3odbc.so",
"Setup": "/usr/lib/x86_64-linux-gnu/odbc/libsqlite3odbc.so",
},
"MySQL": {
"DSN": "mysql_odbc",
"Driver": "/usr/lib/x86_64-linux-gnu/odbc/libmyodbc.so",
"Database": odbc_mysql_db,
"Uid": odbc_mysql_uid,
"Pwd": odbc_mysql_pass,
"Server": self.cluster.mysql_host,
},
"PostgreSQL": {
"DSN": "postgresql_odbc",
"Database": odbc_psql_db,
"UserName": odbc_psql_user,
"Password": odbc_psql_pass,
"Port": str(self.cluster.postgres_port),
"Servername": self.cluster.postgres_host,
"Protocol": "9.3",
"ReadOnly": "No",
"RowVersioning": "No",
"ShowSystemTables": "No",
"Driver": "/usr/lib/x86_64-linux-gnu/odbc/psqlodbca.so",
"Setup": "/usr/lib/x86_64-linux-gnu/odbc/libodbcpsqlS.so",
"ConnSettings": "",
},
}
else:
return {}
def _create_odbc_config_file(self):
with open(self.odbc_ini_path.split(":")[0], "w") as f:
for driver_setup in list(self.odbc_drivers.values()):
f.write("[{}]\n".format(driver_setup["DSN"]))
for key, value in list(driver_setup.items()):
if key != "DSN":
f.write(key + "=" + value + "\n")
def replace_config(self, path_to_config, replacement):
self.exec_in_container(
["bash", "-c", "echo '{}' > {}".format(replacement, path_to_config)]
)
def replace_in_config(self, path_to_config, replace, replacement):
self.exec_in_container(
["bash", "-c", f"sed -i 's/{replace}/{replacement}/g' {path_to_config}"]
)
def create_dir(self, destroy_dir=True):
"""Create the instance directory and all the needed files there."""
if destroy_dir:
self.destroy_dir()
elif p.exists(self.path):
return
os.makedirs(self.path)
instance_config_dir = p.abspath(p.join(self.path, "configs"))
os.makedirs(instance_config_dir)
print(
f"Copy common default production configuration from {self.base_config_dir}. Files: {self.main_config_name}, {self.users_config_name}"
)
shutil.copyfile(
p.join(self.base_config_dir, self.main_config_name),
p.join(instance_config_dir, self.main_config_name),
)
shutil.copyfile(
p.join(self.base_config_dir, self.users_config_name),
p.join(instance_config_dir, self.users_config_name),
)
logging.debug("Create directory for configuration generated in this helper")
# used by all utils with any config
conf_d_dir = p.abspath(p.join(instance_config_dir, "conf.d"))
os.mkdir(conf_d_dir)
logging.debug("Create directory for common tests configuration")
# used by server with main config.xml
self.config_d_dir = p.abspath(p.join(instance_config_dir, "config.d"))
os.mkdir(self.config_d_dir)
users_d_dir = p.abspath(p.join(instance_config_dir, "users.d"))
os.mkdir(users_d_dir)
dictionaries_dir = p.abspath(p.join(instance_config_dir, "dictionaries"))
os.mkdir(dictionaries_dir)
extra_conf_dir = p.abspath(p.join(instance_config_dir, "extra_conf.d"))
os.mkdir(extra_conf_dir)
def write_embedded_config(name, dest_dir, fix_log_level=False):
with open(p.join(HELPERS_DIR, name), "r") as f:
data = f.read()
data = data.replace("clickhouse", self.config_root_name)
if fix_log_level:
data = data.replace("<level>test</level>", "<level>trace</level>")
with open(p.join(dest_dir, name), "w") as r:
r.write(data)
logging.debug("Copy common configuration from helpers")
# The file is named with 0_ prefix to be processed before other configuration overloads.
if self.copy_common_configs:
need_fix_log_level = self.tag != "latest"
write_embedded_config(
"0_common_instance_config.xml", self.config_d_dir, need_fix_log_level
)
write_embedded_config("0_common_instance_users.xml", users_d_dir)
if len(self.custom_dictionaries_paths):
write_embedded_config("0_common_enable_dictionaries.xml", self.config_d_dir)
logging.debug("Generate and write macros file")
macros = self.macros.copy()
macros["instance"] = self.name
with open(p.join(conf_d_dir, "macros.xml"), "w") as macros_config:
macros_config.write(self.dict_to_xml({"macros": macros}))
# Put ZooKeeper config
if self.with_zookeeper:
shutil.copy(self.zookeeper_config_path, conf_d_dir)
if self.with_kerberized_kafka or self.with_kerberized_hdfs:
shutil.copytree(
self.kerberos_secrets_dir, p.abspath(p.join(self.path, "secrets"))
)
# Copy config.d configs
logging.debug(
f"Copy custom test config files {self.custom_main_config_paths} to {self.config_d_dir}"
)
for path in self.custom_main_config_paths:
shutil.copy(path, self.config_d_dir)
# Copy users.d configs
for path in self.custom_user_config_paths:
shutil.copy(path, users_d_dir)
# Copy dictionaries configs to configs/dictionaries
for path in self.custom_dictionaries_paths:
shutil.copy(path, dictionaries_dir)
for path in self.custom_extra_config_paths:
shutil.copy(path, extra_conf_dir)
db_dir = p.abspath(p.join(self.path, "database"))
logging.debug(f"Setup database dir {db_dir}")
if self.clickhouse_path_dir is not None:
logging.debug(f"Database files taken from {self.clickhouse_path_dir}")
shutil.copytree(self.clickhouse_path_dir, db_dir)
logging.debug(
f"Database copied from {self.clickhouse_path_dir} to {db_dir}"
)
else:
os.mkdir(db_dir)
logs_dir = p.abspath(p.join(self.path, "logs"))
logging.debug(f"Setup logs dir {logs_dir}")
os.mkdir(logs_dir)
self.logs_dir = logs_dir
depends_on = []
if self.with_mysql_client:
depends_on.append(self.cluster.mysql_client_host)
if self.with_mysql:
depends_on.append("mysql57")
if self.with_mysql8:
depends_on.append("mysql80")
if self.with_mysql_cluster:
depends_on.append("mysql57")
depends_on.append("mysql2")
depends_on.append("mysql3")
depends_on.append("mysql4")
if self.with_postgres_cluster:
depends_on.append("postgres2")
depends_on.append("postgres3")
depends_on.append("postgres4")
if self.with_kafka:
depends_on.append("kafka1")
depends_on.append("schema-registry")
if self.with_kerberized_kafka:
depends_on.append("kerberized_kafka1")
if self.with_kerberized_hdfs:
depends_on.append("kerberizedhdfs1")
if self.with_rabbitmq:
depends_on.append("rabbitmq1")
if self.with_zookeeper:
depends_on.append("zoo1")
depends_on.append("zoo2")
depends_on.append("zoo3")
if self.with_minio:
depends_on.append("minio1")
if self.with_azurite:
depends_on.append("azurite1")
self.cluster.env_variables.update(self.env_variables)
odbc_ini_path = ""
if self.odbc_ini_path:
self._create_odbc_config_file()
odbc_ini_path = "- " + self.odbc_ini_path
entrypoint_cmd = self.clickhouse_start_command
if self.stay_alive:
entrypoint_cmd = CLICKHOUSE_STAY_ALIVE_COMMAND.replace(
"{main_config_file}", self.main_config_name
)
else:
entrypoint_cmd = (
"["
+ ", ".join(map(lambda x: '"' + x + '"', entrypoint_cmd.split()))
+ "]"
)
logging.debug("Entrypoint cmd: {}".format(entrypoint_cmd))
networks = app_net = ipv4_address = ipv6_address = net_aliases = net_alias1 = ""
if (
self.ipv4_address is not None
or self.ipv6_address is not None
or self.hostname != self.name
):
networks = "networks:"
app_net = "default:"
if self.ipv4_address is not None:
ipv4_address = "ipv4_address: " + self.ipv4_address
if self.ipv6_address is not None:
ipv6_address = "ipv6_address: " + self.ipv6_address
if self.hostname != self.name:
net_aliases = "aliases:"
net_alias1 = "- " + self.hostname
if not self.with_installed_binary:
binary_volume = "- " + self.server_bin_path + ":/usr/bin/clickhouse"
odbc_bridge_volume = (
"- " + self.odbc_bridge_bin_path + ":/usr/bin/clickhouse-odbc-bridge"
)
library_bridge_volume = (
"- "
+ self.library_bridge_bin_path
+ ":/usr/bin/clickhouse-library-bridge"
)
else:
binary_volume = "- " + self.server_bin_path + ":/usr/share/clickhouse_fresh"
odbc_bridge_volume = (
"- "
+ self.odbc_bridge_bin_path
+ ":/usr/share/clickhouse-odbc-bridge_fresh"
)
library_bridge_volume = (
"- "
+ self.library_bridge_bin_path
+ ":/usr/share/clickhouse-library-bridge_fresh"
)
external_dirs_volumes = ""
if self.external_dirs:
for external_dir in self.external_dirs:
external_dir_abs_path = p.abspath(
p.join(self.cluster.instances_dir, external_dir.lstrip("/"))
)
logging.info(f"external_dir_abs_path={external_dir_abs_path}")
os.makedirs(external_dir_abs_path, exist_ok=True)
external_dirs_volumes += (
"- " + external_dir_abs_path + ":" + external_dir + "\n"
)
with open(self.docker_compose_path, "w") as docker_compose:
docker_compose.write(
DOCKER_COMPOSE_TEMPLATE.format(
image=self.image,
tag=self.tag,
name=self.name,
hostname=self.hostname,
binary_volume=binary_volume,
odbc_bridge_volume=odbc_bridge_volume,
library_bridge_volume=library_bridge_volume,
instance_config_dir=instance_config_dir,
config_d_dir=self.config_d_dir,
db_dir=db_dir,
external_dirs_volumes=external_dirs_volumes,
tmpfs=str(self.tmpfs),
logs_dir=logs_dir,
depends_on=str(depends_on),
user=os.getuid(),
env_file=self.env_file,
odbc_ini_path=odbc_ini_path,
keytab_path=self.keytab_path,
krb5_conf=self.krb5_conf,
entrypoint_cmd=entrypoint_cmd,
networks=networks,
app_net=app_net,
ipv4_address=ipv4_address,
ipv6_address=ipv6_address,
net_aliases=net_aliases,
net_alias1=net_alias1,
)
)
def destroy_dir(self):
if p.exists(self.path):
shutil.rmtree(self.path)
def wait_for_path_exists(self, path, seconds):
while seconds > 0:
seconds -= 1
if self.path_exists(path):
return
time.sleep(1)
def get_backuped_s3_objects(self, disk, backup_name):
path = f"/var/lib/clickhouse/disks/{disk}/shadow/{backup_name}/store"
self.wait_for_path_exists(path, 10)
command = [
"find",
path,
"-type",
"f",
"-exec",
"grep",
"-o",
"r[01]\\{64\\}-file-[[:lower:]]\\{32\\}",
"{}",
";",
]
return self.exec_in_container(command).split("\n")
class ClickHouseKiller(object):
def __init__(self, clickhouse_node):
self.clickhouse_node = clickhouse_node
def __enter__(self):
self.clickhouse_node.stop_clickhouse(kill=True)
def __exit__(self, exc_type, exc_val, exc_tb):
self.clickhouse_node.start_clickhouse()
|
the-stack_0_10519 | """ This file contains code for a fully convolutional
(i.e. contains zero fully connected layers) neural network
for detecting lanes. This version assumes the inputs
to be road images in the shape of 80 x 160 x 3 (RGB) with
the labels as 80 x 160 x 1 (just the G channel with a
re-drawn lane). Note that in order to view a returned image,
the predictions is later stacked with zero'ed R and B layers
and added back to the initial road image.
"""
import numpy as np
import pickle
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
# Import necessary items from Keras
from keras.models import Sequential
from keras.layers import Activation, Dropout, UpSampling2D
from keras.layers import Conv2DTranspose, Conv2D, MaxPooling2D
from keras.layers.normalization import BatchNormalization
from keras.preprocessing.image import ImageDataGenerator
from keras import regularizers
# Load training images
train_images = pickle.load(open("full_CNN_train.p", "rb" ))
# Load image labels
labels = pickle.load(open("full_CNN_labels.p", "rb" ))
# Make into arrays as the neural network wants these
train_images = np.array(train_images)
labels = np.array(labels)
# Normalize labels - training images get normalized to start in the network
labels = labels / 255
# Shuffle images along with their labels, then split into training/validation sets
train_images, labels = shuffle(train_images, labels)
# Test size may be 10% or 20%
X_train, X_val, y_train, y_val = train_test_split(train_images, labels, test_size=0.1)
# Batch size, epochs and pool size below are all paramaters to fiddle with for optimization
batch_size = 128
epochs = 10
pool_size = (2, 2)
input_shape = X_train.shape[1:]
### Here is the actual neural network ###
model = Sequential()
# Normalizes incoming inputs. First layer needs the input shape to work
model.add(BatchNormalization(input_shape=input_shape))
# Below layers were re-named for easier reading of model summary; this not necessary
# Conv Layer 1
model.add(Conv2D(8, (3, 3), padding='valid', strides=(1,1), activation = 'relu', name = 'Conv1'))
# Conv Layer 2
model.add(Conv2D(16, (3, 3), padding='valid', strides=(1,1), activation = 'relu', name = 'Conv2'))
# Pooling 1
model.add(MaxPooling2D(pool_size=pool_size))
# Conv Layer 3
model.add(Conv2D(16, (3, 3), padding='valid', strides=(1,1), activation = 'relu', name = 'Conv3'))
model.add(Dropout(0.2))
# Conv Layer 4
model.add(Conv2D(32, (3, 3), padding='valid', strides=(1,1), activation = 'relu', name = 'Conv4'))
model.add(Dropout(0.2))
# Conv Layer 5
model.add(Conv2D(32, (3, 3), padding='valid', strides=(1,1), activation = 'relu', name = 'Conv5'))
model.add(Dropout(0.2))
# Pooling 2
model.add(MaxPooling2D(pool_size=pool_size))
# Conv Layer 6
model.add(Conv2D(64, (3, 3), padding='valid', strides=(1,1), activation = 'relu', name = 'Conv6'))
model.add(Dropout(0.2))
# Conv Layer 7
model.add(Conv2D(64, (3, 3), padding='valid', strides=(1,1), activation = 'relu', name = 'Conv7'))
model.add(Dropout(0.2))
# Pooling 3
model.add(MaxPooling2D(pool_size=pool_size))
# Upsample 1
model.add(UpSampling2D(size=pool_size))
# Deconv 1
model.add(Conv2DTranspose(64, (3, 3), padding='valid', strides=(1,1), activation = 'relu', name = 'Deconv1'))
model.add(Dropout(0.2))
# Deconv 2
model.add(Conv2DTranspose(64, (3, 3), padding='valid', strides=(1,1), activation = 'relu', name = 'Deconv2'))
model.add(Dropout(0.2))
# Upsample 2
model.add(UpSampling2D(size=pool_size))
# Deconv 3
model.add(Conv2DTranspose(32, (3, 3), padding='valid', strides=(1,1), activation = 'relu', name = 'Deconv3'))
model.add(Dropout(0.2))
# Deconv 4
model.add(Conv2DTranspose(32, (3, 3), padding='valid', strides=(1,1), activation = 'relu', name = 'Deconv4'))
model.add(Dropout(0.2))
# Deconv 5
model.add(Conv2DTranspose(16, (3, 3), padding='valid', strides=(1,1), activation = 'relu', name = 'Deconv5'))
model.add(Dropout(0.2))
# Upsample 3
model.add(UpSampling2D(size=pool_size))
# Deconv 6
model.add(Conv2DTranspose(16, (3, 3), padding='valid', strides=(1,1), activation = 'relu', name = 'Deconv6'))
# Final layer - only including one channel so 1 filter
model.add(Conv2DTranspose(1, (3, 3), padding='valid', strides=(1,1), activation = 'relu', name = 'Final'))
### End of network ###
# Using a generator to help the model use less data
# Channel shifts help with shadows slightly
datagen = ImageDataGenerator(channel_shift_range=0.2)
datagen.fit(X_train)
# Compiling and training the model
model.compile(optimizer='Adam', loss='mean_squared_error')
model.fit_generator(datagen.flow(X_train, y_train, batch_size=batch_size), steps_per_epoch=len(X_train)/batch_size,
epochs=epochs, verbose=1, validation_data=(X_val, y_val))
# Freeze layers since training is done
model.trainable = False
model.compile(optimizer='Adam', loss='mean_squared_error')
# Save model architecture and weights
model.save('full_CNN_model.h5')
# Show summary of model
model.summary()
|
the-stack_0_10520 | def number_of_carries(a, b):
temp1, temp2=str(a), str(b)
temp1=temp1.rjust(len(temp2), "0")
temp2=temp2.rjust(len(temp1), "0")
carry=add=0
for i,j in zip(temp1[::-1], temp2[::-1]):
if (int(i)+int(j)+add)>=10:
carry+=1
add=1
else:
add=0
return carry |
the-stack_0_10523 | from question_model import Question
from data import question_data
from quiz_brain import QuizBrain
question_bank = list()
for q_data in question_data:
q_text = q_data['question']
q_answer = q_data['correct_answer']
new_q = Question(q_text, q_answer)
question_bank.append(new_q)
quiz = QuizBrain(question_bank)
while not quiz.end_of_game():
quiz.next_question()
print(f"Quiz completed. Your final score is {quiz.score}/{len(question_bank)}.")
|
the-stack_0_10524 | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""T5 model components."""
import gin
import mesh_tensorflow as mtf
from mesh_tensorflow.transformer import attention
from mesh_tensorflow.transformer import transformer
from mesh_tensorflow.transformer import transformer_layers
import tensorflow.compat.v1 as tf
@gin.configurable
def causal_depthwise_conv(x, context, kernel_size=3):
"""Causal depthwise convolution."""
def scale_var(shift_distance):
return mtf.get_variable(
context.mesh,
"conv_%s" % shift_distance,
mtf.Shape(context.model.ensemble_dims + x.shape.dims[-1:]),
initializer=tf.constant_initializer(0.5 if shift_distance ==
0 else 0.5 / kernel_size),
dtype=context.variable_dtype)
ret = x * scale_var(0)
for shift_distance in range(1, kernel_size):
x = mtf.shift(x, 1, context.length_dim, wrap=False)
ret += x * scale_var(shift_distance)
return ret
def primer_norm(x, dim, epsilon=1e-6, name="layer_prepostprocess"):
"""Primer normalization over dimension `dim`.
Args:
x: a mtf.Tensor whose shape contains `dim`.
dim: a mtf.Dimension.
epsilon: a floating point number.
name: a string used for tf.variable_scope.
Returns:
a mtf.Tensor with same shape as x.
"""
with tf.variable_scope(name + "/primer_norm"):
scale = mtf.get_variable(
x.mesh,
"primer_norm_scale",
mtf.Shape([dim]),
initializer=tf.ones_initializer(),
activation_dtype=x.dtype)
bias = mtf.get_variable(
x.mesh,
"primer_norm_bias",
mtf.Shape([dim]),
initializer=tf.zeros_initializer(),
activation_dtype=x.dtype)
reduced_shape = x.shape - dim
mean = mtf.reduce_mean(x, output_shape=reduced_shape)
mean_centered_x = x - mean
pseudo_variance = mtf.reduce_mean(
x * mean_centered_x, output_shape=reduced_shape)
norm_x = mean_centered_x * mtf.rsqrt(pseudo_variance + epsilon)
return norm_x * scale + bias
@gin.configurable
def sublayer_prime_norm(x,
layer_stack,
context,
epsilon=1e-6,
name="primer_norm"):
"""Sublayer wrapper around Primer norm.
Args:
x: an input mtf.Tensor.
layer_stack: a LayerStack.
context: a Context.
epsilon: a float.
name: a string.
Returns:
a mtf.Tensor.
"""
del layer_stack
model_dim = context.model.model_dim
with tf.variable_scope(name):
return primer_norm(x, model_dim, epsilon)
@gin.configurable
class DoubleHeadsAttentionLayer(transformer.TransformerLayer):
"""Attention with twice as many heads for Evolved Transformer."""
def __init__(self, base_num_heads, key_value_size, dropout_rate):
self._self_attention = transformer_layers.SelfAttention(
num_heads=int(2 * base_num_heads),
key_value_size=int(key_value_size / 2),
dropout_rate=dropout_rate)
def call(self, context, x, losses=None):
"""Call the layer."""
with tf.variable_scope("double_heads_attention"):
return self._self_attention.call(context, x, losses)
class MDHAParams(attention.AttentionParams):
"""Multi-DConv-Head Attention parameters."""
def __init__(self,
create_k_weights,
**kwargs):
self._create_k_weights = create_k_weights
super().__init__(**kwargs)
def init_weights(self):
"""Initialize projection matrices."""
if mtf.layers.unit_scaling_convention():
init = tf.random_normal_initializer(stddev=1.0)
q_init = init
kv_init = init
o_init = init
else:
stddev = self.query_input_dim.size ** -0.5
if self.fold_scaling_into_initializer:
stddev *= self.key_dim.size ** -0.5
q_init = tf.random_normal_initializer(stddev=stddev)
kv_init = tf.random_normal_initializer(
stddev=self.memory_input_dim.size ** -0.5)
o_init = tf.random_normal_initializer(
stddev=mtf.Shape(self.query_heads_dims + [self.value_dim]).size**-0.5)
if self.make_attention_vars:
if not self.no_query:
self.wq = mtf.get_variable(
self.mesh,
"q",
self.q_shape,
initializer=q_init,
dtype=self.variable_dtype)
if self.shared_kv:
self.wkv = mtf.get_variable(
self.mesh,
"kv",
self.k_shape,
initializer=kv_init,
dtype=self.variable_dtype)
else:
if self._create_k_weights:
self.wk = mtf.get_variable(
self.mesh,
"k",
self.k_shape,
initializer=kv_init,
dtype=self.variable_dtype)
self.wv = mtf.get_variable(
self.mesh,
"v",
self.v_shape,
initializer=kv_init,
dtype=self.variable_dtype)
self.wo = mtf.get_variable(
self.mesh,
"o",
self.o_shape,
initializer=o_init,
dtype=self.variable_dtype)
def mdha_q(self, query_antecedent, context):
"""MDHA Q projection."""
ret = mtf.layers.us_einsum([query_antecedent, self.wq],
reduced_dims=[self.query_input_dim])
with tf.variable_scope("q_dconv"):
len_dim = context.length_dim
context.length_dim = ret.shape.dims[-2]
ret = causal_depthwise_conv(ret, context=context, kernel_size=3)
context.length_dim = len_dim
if self.combine_dims:
ret = mtf.replace_dimensions(ret, ret.shape.dims[-1], self.q_dims)
if not self.fold_scaling_into_initializer:
ret *= self.key_dim.size**-0.5
return ret
def mdha_k(self, memory_antecedent, context):
"""MDHA K projection."""
ret = mtf.layers.us_einsum([memory_antecedent, self.wk],
reduced_dims=[self.memory_input_dim])
with tf.variable_scope("k_dconv"):
len_dim = context.length_dim
context.length_dim = ret.shape.dims[-2]
ret = causal_depthwise_conv(ret, context=context, kernel_size=3)
context.length_dim = len_dim
if self.combine_dims:
ret = mtf.replace_dimensions(ret, ret.shape.dims[-1], self.k_dims)
return ret
def mdha_v(self, memory_antecedent, context):
"""MDHA V projection."""
ret = mtf.layers.us_einsum([memory_antecedent, self.wv],
reduced_dims=[self.memory_input_dim])
with tf.variable_scope("v_dconv"):
len_dim = context.length_dim
context.length_dim = ret.shape.dims[-2]
ret = causal_depthwise_conv(ret, context=context, kernel_size=3)
context.length_dim = len_dim
if self.combine_dims:
ret = mtf.replace_dimensions(ret, ret.shape.dims[-1], self.v_dims)
return ret
def mdha_shared_qk(self, query_antecedent, context):
"""MDHA QK shared projection."""
ret = mtf.layers.us_einsum([query_antecedent, self.wq],
reduced_dims=[self.query_input_dim])
with tf.variable_scope("qk_dconv"):
len_dim = context.length_dim
context.length_dim = ret.shape.dims[-2]
ret = causal_depthwise_conv(ret, context=context, kernel_size=3)
context.length_dim = len_dim
q = mtf.layers.dense(
ret,
ret.shape.dims[-1:],
use_bias=False,
activation=None,
variable_dtype=context.variable_dtype,
reduced_dims=ret.shape.dims[-1:],
name="q_solo_project",
expert_dims=context.model.ensemble_dims)
k = ret
if self.combine_dims:
q = mtf.replace_dimensions(q, q.shape.dims[-1], self.q_dims)
k = mtf.replace_dimensions(k, k.shape.dims[-1], self.k_dims)
if not self.fold_scaling_into_initializer:
q *= self.key_dim.size**-0.5
return q, k
def mdha_params(context,
kv_dim,
num_heads,
num_memory_heads=0,
shared_kv=False,
no_query=False,
combine_dims=True,
keep_query_heads_dims=False,
fold_scaling_into_initializer=True,
create_k_weights=True):
"""Multi-DConv Head Attention parameters."""
if num_heads == 1:
query_heads_dims = None
memory_heads_dims = None
elif num_memory_heads == 0:
query_heads_dims = [mtf.Dimension("heads", num_heads)]
memory_heads_dims = query_heads_dims
elif num_memory_heads == 1:
query_heads_dims = [mtf.Dimension("heads", num_heads)]
memory_heads_dims = None
else:
if num_heads % num_memory_heads != 0:
raise ValueError("num_memory_heads must divide num_heads")
memory_heads_dims = [mtf.Dimension("heads", num_memory_heads)]
query_heads_dims = memory_heads_dims + [
mtf.Dimension("query_heads", num_heads // num_memory_heads)]
return MDHAParams(
create_k_weights=create_k_weights,
mesh=context.mesh,
query_input_dim=context.model.model_dim,
memory_input_dim=context.model.model_dim,
output_dim=context.model.model_dim,
key_dim=kv_dim,
value_dim=kv_dim,
query_heads_dims=query_heads_dims,
memory_heads_dims=memory_heads_dims,
variable_dtype=context.variable_dtype,
shared_kv=shared_kv,
no_query=no_query,
ensemble_dim=context.model.ensemble_dim,
combine_dims=combine_dims,
keep_query_heads_dims=keep_query_heads_dims,
fold_scaling_into_initializer=fold_scaling_into_initializer)
@gin.configurable
class PrePostNormLayerStack(transformer.LayerStack):
"""Alternating pre and post normalization."""
def call(self, context, x):
"""Call the layer stack."""
x = self._call_sublayers(self._sublayers_initial, x, context, 0)
context.layer_outputs.append(x)
for lnum, layer in enumerate(self._layers):
with tf.variable_scope(layer.name or ""):
if self._recompute_grads:
def fn(x, l=layer, c=context, lnum_arg=lnum):
return self._layer_fn(x, l, c, lnum_arg)
x = mtf.recompute_grad(fn, [x])
else:
x = self._layer_fn(x, layer, context, lnum)
if lnum != len(self._layers) - 1:
context.layer_outputs.append(x)
context.layer_index += 1
x = self._call_sublayers(self._sublayers_final, x, context, 0)
x = transformer.sublayer_mask_padding(x, self, context)
context.layer_outputs.append(x)
return x
# Pre and post norm.
def _call_sublayers(self, sublayers, x, context, lnum):
if lnum % 2 == 0:
for s in sublayers:
x = s(x, self, context)
else:
for s in [1, 2, 0, 3, 4]:
x = sublayers[s](x, self, context)
return x
def _layer_fn(self, x, layer, context, lnum):
context.current_layer = layer
context.current_layer_input = x
y = self._call_sublayers(self._sublayers_per_layer, x, context, lnum)
if y.shape != x.shape:
raise ValueError("Layer %s returned misshaped output x=%s y=%s" %
(layer.__class__.__name__, x, y))
return y
@gin.configurable
class MDHA(transformer_layers.SelfAttention):
"""Multi-DConv-Head Attention."""
def __init__(self,
num_heads=8,
num_memory_heads=0,
key_value_size=128,
shared_kv=False,
dropout_rate=0.0,
attention_kwargs=None,
relative_attention_type=None,
relative_attention_num_buckets=32,
attention_func=None,
combine_dims=True,
keep_query_heads_dims=False,
fold_scaling_into_initializer=True,
z_loss_coeff=None,
share_qk_rep=False):
super().__init__(
num_heads=num_heads,
num_memory_heads=num_memory_heads,
key_value_size=key_value_size,
shared_kv=shared_kv,
dropout_rate=dropout_rate,
attention_kwargs=attention_kwargs,
relative_attention_type=relative_attention_type,
relative_attention_num_buckets=relative_attention_num_buckets,
attention_func=attention_func,
combine_dims=combine_dims,
keep_query_heads_dims=keep_query_heads_dims,
fold_scaling_into_initializer=fold_scaling_into_initializer,
z_loss_coeff=z_loss_coeff)
self.share_qk_rep = share_qk_rep
def make_params(self, context):
return mdha_params(
context=context,
kv_dim=self.kv_dim,
num_heads=self.num_heads,
num_memory_heads=self.num_memory_heads,
shared_kv=self.shared_kv,
combine_dims=self.combine_dims,
keep_query_heads_dims=self.keep_query_heads_dims,
fold_scaling_into_initializer=self.fold_scaling_into_initializer,
create_k_weights=not self.share_qk_rep)
@gin.configurable
def call(self, context, x, losses=None):
"""Call the layer."""
params = self.make_params(context)
if self.share_qk_rep:
q, k = params.mdha_shared_qk(x, context)
else:
q = params.mdha_q(x, context)
memory_length = self.memory_length(context)
if context.mode == "incremental":
m = x
else:
if self.share_qk_rep:
k = mtf.replace_dimensions(k, context.length_dim, memory_length)
m = mtf.replace_dimensions(x, context.length_dim, memory_length)
if self.shared_kv:
kv = params.compute_kv(m)
else:
if not self.share_qk_rep:
k = params.mdha_k(m, context)
v = params.mdha_v(m, context)
if context.mode == "incremental":
one_hot = mtf.one_hot(
context.position, memory_length, dtype=context.activation_dtype)
inv_one_hot = 1.0 - one_hot
if self.shared_kv:
old_kv = context.get_states(1)
kv = old_kv * inv_one_hot + kv * one_hot
else:
old_k, old_v = context.get_states(2)
k = old_k * inv_one_hot + k * one_hot
v = old_v * inv_one_hot + v * one_hot
memory_position = mtf.range(context.mesh, memory_length, tf.int32)
else:
memory_position = self.rename_length_to_memory_length(
context.position, context)
if context.mode == "incremental" or context.mode == "first_part":
context.record_new_states([kv] if self.shared_kv else [k, v])
if self.shared_kv:
k = kv
v = kv
o = self.attention_fn(
q,
k,
v,
context=context,
memory_length_dim=memory_length,
key_dim=self.kv_dim,
value_dim=self.kv_dim,
bias=self.compute_bias(context, memory_position, x,
params.query_heads_dims, q),
**self.attention_kwargs_from_context(context))
attention_output_shape = self.expected_attention_output_shape(x, params)
attention_output = params.compute_output(
o, output_shape=attention_output_shape)
return self.layer_output_from_attention_output(context, attention_output,
losses)
|
the-stack_0_10525 | #!/usr/bin/env python3
# ----------------------------------------------------------------------
#
# Brad T. Aagaard, U.S. Geological Survey
# Charles A. Williams, GNS Science
# Matthew G. Knepley, University of Chicago
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2016 University of California, Davis
#
# See COPYING for license information.
#
# ----------------------------------------------------------------------
"""Format C++ and Python to match coding style conventions used in the PyLith projectself.
Uses uncrustify to format C++ code and autopep8 to format Python code.
"""
import os
import subprocess
import glob
import fnmatch
def run_cmd(cmd, show_progress=False):
"""Run command as a subprocess.
:param cmd: Command to run [program, options].
"""
if show_progress:
print(" ".join(cmd))
subprocess.check_call(cmd)
return
def verify_cmd(cmd):
"""Verify command runs without errors."""
try:
subprocess.check_call(cmd)
except:
raise IOError("Error running '{}'.".format(" ".join(cmd)))
return
class Formatter(object):
"""Abstract base class for formatting objects."""
def __init__(self, config_dir, show_progress=True):
"""Constructor.
:config_dir: Directory containing uncrustify configuration file (uncrustify.cfg).
:show_progress: Show progress if True, be silent if False.
"""
self.config_dir = config_dir
self.show_progress = show_progress
self.config_filename = None
return
def _config_filename(self):
raise NotImplementedError("Implement _config_filename() in subclass.")
def _prog_test(self):
raise NotImplementedError("Implement _prog_test() in subclass.")
def _prog_format(self, filename):
raise NotImplementedError("Implement _prog_format() in subclass.")
def initialize(self):
"""Verify formatting program is installed and project configuration file exists."""
self._verify_configuration()
self._verify_installation()
return
def format_file(self, filename):
"""Format file.
:param filename: Name of file to format.
"""
run_cmd(self._prog_format(filename), show_progress=self.show_progress)
return
def _verify_configuration(self):
"""Verify project configuration file exists."""
path_filename = os.path.join(self.config_dir, self._config_filename())
if not os.path.isfile(path_filename):
raise IOError(
"Could not find configuration file '{}'.".format(path_filename))
self.config_filename = path_filename
return
def _verify_installation(self):
"""Verify formatting program is installed."""
verify_cmd(self._prog_test())
return
class FormatterCPlusCplus(Formatter):
"""Formatter for C/C++ source files."""
LANGUAGE = {
".h": "C",
".c": "C",
".hh": "CPP",
".cc": "CPP",
}
def _config_filename(self):
return "uncrustify.cfg"
def _prog_test(self):
return ["uncrustify", "--version"]
def _prog_format(self, filename):
suffix = "." + filename.split(".")[-1]
language = self.LANGUAGE[suffix]
options = ["-l", language, "-c",
self.config_filename, "--no-backup", filename]
return ["uncrustify"] + options
class FormatterPython(Formatter):
"""Formatter for Python source files."""
def _config_filename(self):
return "autopep8.cfg"
def _prog_test(self):
return ["autopep8", "--version"]
def _prog_format(self, filename):
options = [
"--global-config={}".format(self.config_filename), "--in-place", filename]
return ["autopep8"] + options
class App(object):
"""Application to reformat C++ and Python source code.
"""
def __init__(self):
"""Constructor."""
self.show_progress = False
self.config_dir = None
return
def main(self):
"""Application driver."""
args = self._parse_command_line()
self.config_dir = args.config_dir
self.show_progress = args.show_progress
if args.cplusplus:
self._reformat_cplusplus(args.cplusplus)
if args.python:
self._reformat_python(args.python)
return
def _reformat_cplusplus(self, flags):
"""Reformat C++ files.
:param flags: Flags indicating what files to reformat ["all", FILE, PATTERN]
"""
formatter = FormatterCPlusCplus(self.config_dir, self.show_progress)
formatter.initialize()
if flags == "all":
targets = self._find_files(["*.cc", "*.hh", "*.c", "*.h"])
else:
targets = glob.glob(flags)
for target in targets:
formatter.format_file(target)
return
def _reformat_python(self, flags):
"""Reformat Python files.
:param flags: Flags indicating what files to reformat ["all", FILE, PATTERN]
"""
formatter = FormatterPython(self.config_dir, self.show_progress)
formatter.initialize()
if flags == "all":
targets = self._find_files([".py"])
else:
targets = glob.glob(flags)
for target in targets:
formatter.format_file(target)
return
def _find_files(self, patterns):
"""Find files matching pattern.
:param patterns: Patterns to search for.
:returns: Files matching pattern.
"""
files = []
root, _, names = os.walk(os.getcwd())
for pattern in patterns:
files += [os.path.join(root, filename)
for filename in fnmatch.filter(names, pattern)]
return files
def _parse_command_line(self):
"""Parse command line arguments.
"""
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--cplusplus", action="store", dest="cplusplus",
default=None, help="[None, 'all', FILE, PATTERN]")
parser.add_argument("--python", action="store", dest="python",
default=None, help="[None, 'all', FILE, PATTERN]")
parser.add_argument("--config-dir", action="store", dest="config_dir",
default="doc/developer", help="Directory containing config files for formatters.")
parser.add_argument("--quiet", action="store_false",
dest="show_progress", default=True)
parser.add_argument("--debug", action="store_true",
dest="debug", default=True)
return parser.parse_args()
if __name__ == "__main__":
App().main()
|
the-stack_0_10527 | from django import forms
from .models import Produto, Categoria, Materia
class cadastrar_produto(forms.ModelForm):
class Meta:
model = Produto
fields = ['nome', 'slug','categoria', 'descricao', 'price']
def save(self, commit=True):
this = super(cadastrar_produto, self).save(commit=False)
if commit:
this.save()
return this
class cadastrar_categoria(forms.ModelForm):
class Meta:
model = Categoria
fields = ['nome',]
def save(self, commit=True):
this = super(cadastrar_categoria, self).save(commit=False)
this.slug = this.nome
if commit:
this.save()
return this
class cadastrar_materia(forms.ModelForm):
class Meta:
model = Materia
fields = ['nome', 'descricao',]
def save(self, commit=True):
this = super(cadastrar_materia, self).save(commit=False)
this.slug = this.nome
if commit:
this.save()
return this |
the-stack_0_10530 | """
Mask R-CNN
Common utility functions and classes.
Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by Waleed Abdulla
"""
import sys
import os
import logging
import math
import random
import numpy as np
import tensorflow as tf
import scipy
import skimage.color
import skimage.io
import skimage.transform
import urllib.request
import shutil
import warnings
from distutils.version import LooseVersion
import cv2
# URL from which to download the latest COCO trained weights
COCO_MODEL_URL = "https://github.com/matterport/Mask_RCNN/releases/download/v2.0/mask_rcnn_coco.h5"
############################################################
# Bounding Boxes
############################################################
def extract_bboxes(mask):
"""Compute bounding boxes from masks.
mask: [height, width, num_instances]. Mask pixels are either 1 or 0.
Returns: bbox array [num_instances, (y1, x1, y2, x2)].
"""
boxes = np.zeros([mask.shape[-1], 4], dtype=np.int32)
for i in range(mask.shape[-1]):
m = mask[:, :, i]
# Bounding box.
horizontal_indicies = np.where(np.any(m, axis=0))[0]
vertical_indicies = np.where(np.any(m, axis=1))[0]
if horizontal_indicies.shape[0]:
x1, x2 = horizontal_indicies[[0, -1]]
y1, y2 = vertical_indicies[[0, -1]]
# x2 and y2 should not be part of the box. Increment by 1.
x2 += 1
y2 += 1
else:
# No mask for this instance. Might happen due to
# resizing or cropping. Set bbox to zeros
x1, x2, y1, y2 = 0, 0, 0, 0
boxes[i] = np.array([y1, x1, y2, x2])
return boxes.astype(np.int32)
def compute_iou(box, boxes, box_area, boxes_area):
"""Calculates IoU of the given box with the array of the given boxes.
box: 1D vector [y1, x1, y2, x2]
boxes: [boxes_count, (y1, x1, y2, x2)]
box_area: float. the area of 'box'
boxes_area: array of length boxes_count.
Note: the areas are passed in rather than calculated here for
efficiency. Calculate once in the caller to avoid duplicate work.
"""
# Calculate intersection areas
y1 = np.maximum(box[0], boxes[:, 0])
y2 = np.minimum(box[2], boxes[:, 2])
x1 = np.maximum(box[1], boxes[:, 1])
x2 = np.minimum(box[3], boxes[:, 3])
intersection = np.maximum(x2 - x1, 0) * np.maximum(y2 - y1, 0)
union = box_area + boxes_area[:] - intersection[:]
iou = intersection / union
return iou
def compute_overlaps(boxes1, boxes2):
"""Computes IoU overlaps between two sets of boxes.
boxes1, boxes2: [N, (y1, x1, y2, x2)].
For better performance, pass the largest set first and the smaller second.
"""
# Areas of anchors and GT boxes
area1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1])
area2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1])
# Compute overlaps to generate matrix [boxes1 count, boxes2 count]
# Each cell contains the IoU value.
overlaps = np.zeros((boxes1.shape[0], boxes2.shape[0]))
for i in range(overlaps.shape[1]):
box2 = boxes2[i]
overlaps[:, i] = compute_iou(box2, boxes1, area2[i], area1)
return overlaps
def compute_overlaps_masks(masks1, masks2):
"""Computes IoU overlaps between two sets of masks.
masks1, masks2: [Height, Width, instances]
"""
# If either set of masks is empty return empty result
if masks1.shape[-1] == 0 or masks2.shape[-1] == 0:
return np.zeros((masks1.shape[-1], masks2.shape[-1]))
# flatten masks and compute their areas
masks1 = np.reshape(masks1 > .5, (-1, masks1.shape[-1])).astype(np.float32)
masks2 = np.reshape(masks2 > .5, (-1, masks2.shape[-1])).astype(np.float32)
#a, _= masks1.shape
#_,b = masks2.shape
#masks2 = cv2.resize(masks2, (b,a))
#x = np.arange(3).reshape(1,3)
#y = np.arange(3,6).reshape(1,3)
#masks1 = y.reshape(3,1)
#print("masks1:", masks1.shape)
#print("masks2:", masks2.shape)
#resize_mask()
area1 = np.sum(masks1, axis=0)
area2 = np.sum(masks2, axis=0)
# intersections and union
intersections = np.dot(masks1.T, masks2)
union = area1[:, None] + area2[None, :] - intersections
overlaps = intersections / union
return overlaps
def non_max_suppression(boxes, scores, threshold):
"""Performs non-maximum suppression and returns indices of kept boxes.
boxes: [N, (y1, x1, y2, x2)]. Notice that (y2, x2) lays outside the box.
scores: 1-D array of box scores.
threshold: Float. IoU threshold to use for filtering.
"""
assert boxes.shape[0] > 0
if boxes.dtype.kind != "f":
boxes = boxes.astype(np.float32)
# Compute box areas
y1 = boxes[:, 0]
x1 = boxes[:, 1]
y2 = boxes[:, 2]
x2 = boxes[:, 3]
area = (y2 - y1) * (x2 - x1)
# Get indicies of boxes sorted by scores (highest first)
ixs = scores.argsort()[::-1]
pick = []
while len(ixs) > 0:
# Pick top box and add its index to the list
i = ixs[0]
pick.append(i)
# Compute IoU of the picked box with the rest
iou = compute_iou(boxes[i], boxes[ixs[1:]], area[i], area[ixs[1:]])
# Identify boxes with IoU over the threshold. This
# returns indices into ixs[1:], so add 1 to get
# indices into ixs.
remove_ixs = np.where(iou > threshold)[0] + 1
# Remove indices of the picked and overlapped boxes.
ixs = np.delete(ixs, remove_ixs)
ixs = np.delete(ixs, 0)
return np.array(pick, dtype=np.int32)
def apply_box_deltas(boxes, deltas):
"""Applies the given deltas to the given boxes.
boxes: [N, (y1, x1, y2, x2)]. Note that (y2, x2) is outside the box.
deltas: [N, (dy, dx, log(dh), log(dw))]
"""
boxes = boxes.astype(np.float32)
# Convert to y, x, h, w
height = boxes[:, 2] - boxes[:, 0]
width = boxes[:, 3] - boxes[:, 1]
center_y = boxes[:, 0] + 0.5 * height
center_x = boxes[:, 1] + 0.5 * width
# Apply deltas
center_y += deltas[:, 0] * height
center_x += deltas[:, 1] * width
height *= np.exp(deltas[:, 2])
width *= np.exp(deltas[:, 3])
# Convert back to y1, x1, y2, x2
y1 = center_y - 0.5 * height
x1 = center_x - 0.5 * width
y2 = y1 + height
x2 = x1 + width
return np.stack([y1, x1, y2, x2], axis=1)
def box_refinement_graph(box, gt_box):
"""Compute refinement needed to transform box to gt_box.
box and gt_box are [N, (y1, x1, y2, x2)]
"""
box = tf.cast(box, tf.float32)
gt_box = tf.cast(gt_box, tf.float32)
height = box[:, 2] - box[:, 0]
width = box[:, 3] - box[:, 1]
center_y = box[:, 0] + 0.5 * height
center_x = box[:, 1] + 0.5 * width
gt_height = gt_box[:, 2] - gt_box[:, 0]
gt_width = gt_box[:, 3] - gt_box[:, 1]
gt_center_y = gt_box[:, 0] + 0.5 * gt_height
gt_center_x = gt_box[:, 1] + 0.5 * gt_width
dy = (gt_center_y - center_y) / height
dx = (gt_center_x - center_x) / width
dh = tf.math.log(gt_height / height)
dw = tf.math.log(gt_width / width)
result = tf.stack([dy, dx, dh, dw], axis=1)
return result
def box_refinement(box, gt_box):
"""Compute refinement needed to transform box to gt_box.
box and gt_box are [N, (y1, x1, y2, x2)]. (y2, x2) is
assumed to be outside the box.
"""
box = box.astype(np.float32)
gt_box = gt_box.astype(np.float32)
height = box[:, 2] - box[:, 0]
width = box[:, 3] - box[:, 1]
center_y = box[:, 0] + 0.5 * height
center_x = box[:, 1] + 0.5 * width
gt_height = gt_box[:, 2] - gt_box[:, 0]
gt_width = gt_box[:, 3] - gt_box[:, 1]
gt_center_y = gt_box[:, 0] + 0.5 * gt_height
gt_center_x = gt_box[:, 1] + 0.5 * gt_width
dy = (gt_center_y - center_y) / height
dx = (gt_center_x - center_x) / width
dh = np.log(gt_height / height)
dw = np.log(gt_width / width)
return np.stack([dy, dx, dh, dw], axis=1)
############################################################
# Dataset
############################################################
class Dataset(object):
"""The base class for dataset classes.
To use it, create a new class that adds functions specific to the dataset
you want to use. For example:
class CatsAndDogsDataset(Dataset):
def load_cats_and_dogs(self):
...
def load_mask(self, image_id):
...
def image_reference(self, image_id):
...
See COCODataset and ShapesDataset as examples.
"""
def __init__(self, class_map=None):
self._image_ids = []
self.image_info = []
# Background is always the first class
self.class_info = [{"source": "", "id": 0, "name": "BG"}]
self.source_class_ids = {}
def add_class(self, source, class_id, class_name):
assert "." not in source, "Source name cannot contain a dot"
# Does the class exist already?
for info in self.class_info:
if info['source'] == source and info["id"] == class_id:
# source.class_id combination already available, skip
return
# Add the class
self.class_info.append({
"source": source,
"id": class_id,
"name": class_name,
})
def add_image(self, source, image_id, path, **kwargs):
image_info = {
"id": image_id,
"source": source,
"path": path,
}
image_info.update(kwargs)
self.image_info.append(image_info)
def image_reference(self, image_id):
"""Return a link to the image in its source Website or details about
the image that help looking it up or debugging it.
Override for your dataset, but pass to this function
if you encounter images not in your dataset.
"""
return ""
def prepare(self, class_map=None):
"""Prepares the Dataset class for use.
TODO: class map is not supported yet. When done, it should handle mapping
classes from different datasets to the same class ID.
"""
def clean_name(name):
"""Returns a shorter version of object names for cleaner display."""
return ",".join(name.split(",")[:1])
# Build (or rebuild) everything else from the info dicts.
self.num_classes = len(self.class_info)
self.class_ids = np.arange(self.num_classes)
self.class_names = [clean_name(c["name"]) for c in self.class_info]
self.num_images = len(self.image_info)
self._image_ids = np.arange(self.num_images)
# Mapping from source class and image IDs to internal IDs
self.class_from_source_map = {"{}.{}".format(info['source'], info['id']): id
for info, id in zip(self.class_info, self.class_ids)}
self.image_from_source_map = {"{}.{}".format(info['source'], info['id']): id
for info, id in zip(self.image_info, self.image_ids)}
# Map sources to class_ids they support
self.sources = list(set([i['source'] for i in self.class_info]))
self.source_class_ids = {}
# Loop over datasets
for source in self.sources:
self.source_class_ids[source] = []
# Find classes that belong to this dataset
for i, info in enumerate(self.class_info):
# Include BG class in all datasets
if i == 0 or source == info['source']:
self.source_class_ids[source].append(i)
def map_source_class_id(self, source_class_id):
"""Takes a source class ID and returns the int class ID assigned to it.
For example:
dataset.map_source_class_id("coco.12") -> 23
"""
return self.class_from_source_map[source_class_id]
def get_source_class_id(self, class_id, source):
"""Map an internal class ID to the corresponding class ID in the source dataset."""
info = self.class_info[class_id]
assert info['source'] == source
return info['id']
@property
def image_ids(self):
return self._image_ids
def source_image_link(self, image_id):
"""Returns the path or URL to the image.
Override this to return a URL to the image if it's available online for easy
debugging.
"""
return self.image_info[image_id]["path"]
def load_image(self, image_id):
"""Load the specified image and return a [H,W,3] Numpy array.
"""
# Load image
image = skimage.io.imread(self.image_info[image_id]['path'])
# If grayscale. Convert to RGB for consistency.
if image.ndim != 3:
image = skimage.color.gray2rgb(image)
# If has an alpha channel, remove it for consistency
if image.shape[-1] == 4:
image = image[..., :3]
return image
def load_mask(self, image_id):
"""Load instance masks for the given image.
Different datasets use different ways to store masks. Override this
method to load instance masks and return them in the form of am
array of binary masks of shape [height, width, instances].
Returns:
masks: A bool array of shape [height, width, instance count] with
a binary mask per instance.
class_ids: a 1D array of class IDs of the instance masks.
"""
# Override this function to load a mask from your dataset.
# Otherwise, it returns an empty mask.
logging.warning("You are using the default load_mask(), maybe you need to define your own one.")
mask = np.empty([0, 0, 0])
class_ids = np.empty([0], np.int32)
return mask, class_ids
def resize_image(image, min_dim=None, max_dim=None, min_scale=None, mode="square"):
"""Resizes an image keeping the aspect ratio unchanged.
min_dim: if provided, resizes the image such that it's smaller
dimension == min_dim
max_dim: if provided, ensures that the image longest side doesn't
exceed this value.
min_scale: if provided, ensure that the image is scaled up by at least
this percent even if min_dim doesn't require it.
mode: Resizing mode.
none: No resizing. Return the image unchanged.
square: Resize and pad with zeros to get a square image
of size [max_dim, max_dim].
pad64: Pads width and height with zeros to make them multiples of 64.
If min_dim or min_scale are provided, it scales the image up
before padding. max_dim is ignored in this mode.
The multiple of 64 is needed to ensure smooth scaling of feature
maps up and down the 6 levels of the FPN pyramid (2**6=64).
crop: Picks random crops from the image. First, scales the image based
on min_dim and min_scale, then picks a random crop of
size min_dim x min_dim. Can be used in training only.
max_dim is not used in this mode.
Returns:
image: the resized image
window: (y1, x1, y2, x2). If max_dim is provided, padding might
be inserted in the returned image. If so, this window is the
coordinates of the image part of the full image (excluding
the padding). The x2, y2 pixels are not included.
scale: The scale factor used to resize the image
padding: Padding added to the image [(top, bottom), (left, right), (0, 0)]
"""
# Keep track of image dtype and return results in the same dtype
image_dtype = image.dtype
# Default window (y1, x1, y2, x2) and default scale == 1.
h, w = image.shape[:2]
window = (0, 0, h, w)
scale = 1
padding = [(0, 0), (0, 0), (0, 0)]
crop = None
if mode == "none":
return image, window, scale, padding, crop
# Scale?
if min_dim:
# Scale up but not down
scale = max(1, min_dim / min(h, w))
if min_scale and scale < min_scale:
scale = min_scale
# Does it exceed max dim?
if max_dim and mode == "square":
image_max = max(h, w)
if round(image_max * scale) > max_dim:
scale = max_dim / image_max
# Resize image using bilinear interpolation
if scale != 1:
image = resize(image, (round(h * scale), round(w * scale)),
preserve_range=True)
# Need padding or cropping?
if mode == "square":
# Get new height and width
h, w = image.shape[:2]
top_pad = (max_dim - h) // 2
bottom_pad = max_dim - h - top_pad
left_pad = (max_dim - w) // 2
right_pad = max_dim - w - left_pad
padding = [(top_pad, bottom_pad), (left_pad, right_pad), (0, 0)]
image = np.pad(image, padding, mode='constant', constant_values=0)
window = (top_pad, left_pad, h + top_pad, w + left_pad)
elif mode == "pad64":
h, w = image.shape[:2]
# Both sides must be divisible by 64
assert min_dim % 64 == 0, "Minimum dimension must be a multiple of 64"
# Height
if h % 64 > 0:
max_h = h - (h % 64) + 64
top_pad = (max_h - h) // 2
bottom_pad = max_h - h - top_pad
else:
top_pad = bottom_pad = 0
# Width
if w % 64 > 0:
max_w = w - (w % 64) + 64
left_pad = (max_w - w) // 2
right_pad = max_w - w - left_pad
else:
left_pad = right_pad = 0
padding = [(top_pad, bottom_pad), (left_pad, right_pad), (0, 0)]
image = np.pad(image, padding, mode='constant', constant_values=0)
window = (top_pad, left_pad, h + top_pad, w + left_pad)
elif mode == "crop":
# Pick a random crop
h, w = image.shape[:2]
y = random.randint(0, (h - min_dim))
x = random.randint(0, (w - min_dim))
crop = (y, x, min_dim, min_dim)
image = image[y:y + min_dim, x:x + min_dim]
window = (0, 0, min_dim, min_dim)
else:
raise Exception("Mode {} not supported".format(mode))
return image.astype(image_dtype), window, scale, padding, crop
def resize_mask(mask, scale, padding, crop=None):
"""Resizes a mask using the given scale and padding.
Typically, you get the scale and padding from resize_image() to
ensure both, the image and the mask, are resized consistently.
scale: mask scaling factor
padding: Padding to add to the mask in the form
[(top, bottom), (left, right), (0, 0)]
"""
# Suppress warning from scipy 0.13.0, the output shape of zoom() is
# calculated with round() instead of int()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mask = scipy.ndimage.zoom(mask, zoom=[scale, scale, 1], order=0)
if crop is not None:
y, x, h, w = crop
mask = mask[y:y + h, x:x + w]
else:
mask = np.pad(mask, padding, mode='constant', constant_values=0)
return mask
def minimize_mask(bbox, mask, mini_shape):
"""Resize masks to a smaller version to reduce memory load.
Mini-masks can be resized back to image scale using expand_masks()
See inspect_data.ipynb notebook for more details.
"""
mini_mask = np.zeros(mini_shape + (mask.shape[-1],), dtype=bool)
for i in range(mask.shape[-1]):
# Pick slice and cast to bool in case load_mask() returned wrong dtype
m = mask[:, :, i].astype(bool)
y1, x1, y2, x2 = bbox[i][:4]
m = m[y1:y2, x1:x2]
if m.size == 0:
raise Exception("Invalid bounding box with area of zero")
# Resize with bilinear interpolation
m = resize(m, mini_shape)
mini_mask[:, :, i] = np.around(m).astype(np.bool)
return mini_mask
def expand_mask(bbox, mini_mask, image_shape):
"""Resizes mini masks back to image size. Reverses the change
of minimize_mask().
See inspect_data.ipynb notebook for more details.
"""
mask = np.zeros(image_shape[:2] + (mini_mask.shape[-1],), dtype=bool)
for i in range(mask.shape[-1]):
m = mini_mask[:, :, i]
y1, x1, y2, x2 = bbox[i][:4]
h = y2 - y1
w = x2 - x1
# Resize with bilinear interpolation
m = resize(m, (h, w))
mask[y1:y2, x1:x2, i] = np.around(m).astype(np.bool)
return mask
# TODO: Build and use this function to reduce code duplication
def mold_mask(mask, config):
pass
def unmold_mask(mask, bbox, image_shape):
"""Converts a mask generated by the neural network to a format similar
to its original shape.
mask: [height, width] of type float. A small, typically 28x28 mask.
bbox: [y1, x1, y2, x2]. The box to fit the mask in.
Returns a binary mask with the same size as the original image.
"""
threshold = 0.5
y1, x1, y2, x2 = bbox
mask = resize(mask, (y2 - y1, x2 - x1))
mask = np.where(mask >= threshold, 1, 0).astype(np.bool)
# Put the mask in the right location.
full_mask = np.zeros(image_shape[:2], dtype=np.bool)
full_mask[y1:y2, x1:x2] = mask
return full_mask
############################################################
# Anchors
############################################################
def generate_anchors(scales, ratios, shape, feature_stride, anchor_stride):
"""
scales: 1D array of anchor sizes in pixels. Example: [32, 64, 128]
ratios: 1D array of anchor ratios of width/height. Example: [0.5, 1, 2]
shape: [height, width] spatial shape of the feature map over which
to generate anchors.
feature_stride: Stride of the feature map relative to the image in pixels.
anchor_stride: Stride of anchors on the feature map. For example, if the
value is 2 then generate anchors for every other feature map pixel.
"""
# Get all combinations of scales and ratios
scales, ratios = np.meshgrid(np.array(scales), np.array(ratios))
scales = scales.flatten()
ratios = ratios.flatten()
# Enumerate heights and widths from scales and ratios
heights = scales / np.sqrt(ratios)
widths = scales * np.sqrt(ratios)
# Enumerate shifts in feature space
shifts_y = np.arange(0, shape[0], anchor_stride) * feature_stride
shifts_x = np.arange(0, shape[1], anchor_stride) * feature_stride
shifts_x, shifts_y = np.meshgrid(shifts_x, shifts_y)
# Enumerate combinations of shifts, widths, and heights
box_widths, box_centers_x = np.meshgrid(widths, shifts_x)
box_heights, box_centers_y = np.meshgrid(heights, shifts_y)
# Reshape to get a list of (y, x) and a list of (h, w)
box_centers = np.stack(
[box_centers_y, box_centers_x], axis=2).reshape([-1, 2])
box_sizes = np.stack([box_heights, box_widths], axis=2).reshape([-1, 2])
# Convert to corner coordinates (y1, x1, y2, x2)
boxes = np.concatenate([box_centers - 0.5 * box_sizes,
box_centers + 0.5 * box_sizes], axis=1)
return boxes
def generate_pyramid_anchors(scales, ratios, feature_shapes, feature_strides,
anchor_stride):
"""Generate anchors at different levels of a feature pyramid. Each scale
is associated with a level of the pyramid, but each ratio is used in
all levels of the pyramid.
Returns:
anchors: [N, (y1, x1, y2, x2)]. All generated anchors in one array. Sorted
with the same order of the given scales. So, anchors of scale[0] come
first, then anchors of scale[1], and so on.
"""
# Anchors
# [anchor_count, (y1, x1, y2, x2)]
anchors = []
for i in range(len(scales)):
anchors.append(generate_anchors(scales[i], ratios, feature_shapes[i],
feature_strides[i], anchor_stride))
return np.concatenate(anchors, axis=0)
############################################################
# Miscellaneous
############################################################
def trim_zeros(x):
"""It's common to have tensors larger than the available data and
pad with zeros. This function removes rows that are all zeros.
x: [rows, columns].
"""
assert len(x.shape) == 2
return x[~np.all(x == 0, axis=1)]
def compute_matches(gt_boxes, gt_class_ids, gt_masks,
pred_boxes, pred_class_ids, pred_scores, pred_masks,
iou_threshold=0.5, score_threshold=0.0):
"""Finds matches between prediction and ground truth instances.
Returns:
gt_match: 1-D array. For each GT box it has the index of the matched
predicted box.
pred_match: 1-D array. For each predicted box, it has the index of
the matched ground truth box.
overlaps: [pred_boxes, gt_boxes] IoU overlaps.
"""
# Trim zero padding
# TODO: cleaner to do zero unpadding upstream
gt_boxes = trim_zeros(gt_boxes)
gt_masks = gt_masks[..., :gt_boxes.shape[0]]
pred_boxes = trim_zeros(pred_boxes)
pred_scores = pred_scores[:pred_boxes.shape[0]]
# Sort predictions by score from high to low
indices = np.argsort(pred_scores)[::-1]
pred_boxes = pred_boxes[indices]
pred_class_ids = pred_class_ids[indices]
pred_scores = pred_scores[indices]
pred_masks = pred_masks[..., indices]
# Compute IoU overlaps [pred_masks, gt_masks]
overlaps = compute_overlaps_masks(pred_masks, gt_masks)
# Loop through predictions and find matching ground truth boxes
match_count = 0
pred_match = -1 * np.ones([pred_boxes.shape[0]])
gt_match = -1 * np.ones([gt_boxes.shape[0]])
for i in range(len(pred_boxes)):
# Find best matching ground truth box
# 1. Sort matches by score
sorted_ixs = np.argsort(overlaps[i])[::-1]
# 2. Remove low scores
low_score_idx = np.where(overlaps[i, sorted_ixs] < score_threshold)[0]
if low_score_idx.size > 0:
sorted_ixs = sorted_ixs[:low_score_idx[0]]
# 3. Find the match
for j in sorted_ixs:
# If ground truth box is already matched, go to next one
if gt_match[j] > -1:
continue
# If we reach IoU smaller than the threshold, end the loop
iou = overlaps[i, j]
if iou < iou_threshold:
break
# Do we have a match?
if pred_class_ids[i] == gt_class_ids[j]:
match_count += 1
gt_match[j] = i
pred_match[i] = j
break
return gt_match, pred_match, overlaps
def compute_ap(gt_boxes, gt_class_ids, gt_masks,
pred_boxes, pred_class_ids, pred_scores, pred_masks,
iou_threshold):
"""Compute Average Precision at a set IoU threshold (default 0.5).
Returns:
mAP: Mean Average Precision
precisions: List of precisions at different class score thresholds.
recalls: List of recall values at different class score thresholds.
overlaps: [pred_boxes, gt_boxes] IoU overlaps.
"""
# Get matches and overlaps
gt_match, pred_match, overlaps = compute_matches(
gt_boxes, gt_class_ids, gt_masks,
pred_boxes, pred_class_ids, pred_scores, pred_masks,
iou_threshold)
# Compute precision and recall at each prediction box step
precisions = np.cumsum(pred_match > -1) / (np.arange(len(pred_match)) + 1)
recalls = np.cumsum(pred_match > -1).astype(np.float32) / len(gt_match)
# Pad with start and end values to simplify the math
precisions = np.concatenate([[0], precisions, [0]])
recalls = np.concatenate([[0], recalls, [1]])
# Ensure precision values decrease but don't increase. This way, the
# precision value at each recall threshold is the maximum it can be
# for all following recall thresholds, as specified by the VOC paper.
for i in range(len(precisions) - 2, -1, -1):
precisions[i] = np.maximum(precisions[i], precisions[i + 1])
# Compute mean AP over recall range
indices = np.where(recalls[:-1] != recalls[1:])[0] + 1
mAP = np.sum((recalls[indices] - recalls[indices - 1]) *
precisions[indices])
return mAP, precisions, recalls, overlaps
def compute_ap_range(gt_box, gt_class_id, gt_mask,
pred_box, pred_class_id, pred_score, pred_mask,
iou_thresholds=None, verbose=1):
"""Compute AP over a range or IoU thresholds. Default range is 0.5-0.95."""
# Default is 0.5 to 0.95 with increments of 0.05
iou_thresholds = iou_thresholds or np.arange(0.5, 1.0, 0.05)
# Compute AP over range of IoU thresholds
AP = []
for iou_threshold in iou_thresholds:
ap, precisions, recalls, overlaps =\
compute_ap(gt_box, gt_class_id, gt_mask,
pred_box, pred_class_id, pred_score, pred_mask,
iou_threshold=iou_threshold)
if verbose:
print("AP @{:.2f}:\t {:.3f}".format(iou_threshold, ap))
AP.append(ap)
AP = np.array(AP).mean()
if verbose:
print("AP @{:.2f}-{:.2f}:\t {:.3f}".format(
iou_thresholds[0], iou_thresholds[-1], AP))
return AP
def compute_recall(pred_boxes, gt_boxes, iou):
"""Compute the recall at the given IoU threshold. It's an indication
of how many GT boxes were found by the given prediction boxes.
pred_boxes: [N, (y1, x1, y2, x2)] in image coordinates
gt_boxes: [N, (y1, x1, y2, x2)] in image coordinates
"""
# Measure overlaps
overlaps = compute_overlaps(pred_boxes, gt_boxes)
iou_max = np.max(overlaps, axis=1)
iou_argmax = np.argmax(overlaps, axis=1)
positive_ids = np.where(iou_max >= iou)[0]
matched_gt_boxes = iou_argmax[positive_ids]
recall = len(set(matched_gt_boxes)) / gt_boxes.shape[0]
return recall, positive_ids
# ## Batch Slicing
# Some custom layers support a batch size of 1 only, and require a lot of work
# to support batches greater than 1. This function slices an input tensor
# across the batch dimension and feeds batches of size 1. Effectively,
# an easy way to support batches > 1 quickly with little code modification.
# In the long run, it's more efficient to modify the code to support large
# batches and getting rid of this function. Consider this a temporary solution
def batch_slice(inputs, graph_fn, batch_size, names=None):
"""Splits inputs into slices and feeds each slice to a copy of the given
computation graph and then combines the results. It allows you to run a
graph on a batch of inputs even if the graph is written to support one
instance only.
inputs: list of tensors. All must have the same first dimension length
graph_fn: A function that returns a TF tensor that's part of a graph.
batch_size: number of slices to divide the data into.
names: If provided, assigns names to the resulting tensors.
"""
if not isinstance(inputs, list):
inputs = [inputs]
outputs = []
for i in range(batch_size):
inputs_slice = [x[i] for x in inputs]
output_slice = graph_fn(*inputs_slice)
if not isinstance(output_slice, (tuple, list)):
output_slice = [output_slice]
outputs.append(output_slice)
# Change outputs from a list of slices where each is
# a list of outputs to a list of outputs and each has
# a list of slices
outputs = list(zip(*outputs))
if names is None:
names = [None] * len(outputs)
result = [tf.stack(o, axis=0, name=n)
for o, n in zip(outputs, names)]
if len(result) == 1:
result = result[0]
return result
def download_trained_weights(coco_model_path, verbose=1):
"""Download COCO trained weights from Releases.
coco_model_path: local path of COCO trained weights
"""
if verbose > 0:
print("Downloading pretrained model to " + coco_model_path + " ...")
with urllib.request.urlopen(COCO_MODEL_URL) as resp, open(coco_model_path, 'wb') as out:
shutil.copyfileobj(resp, out)
if verbose > 0:
print("... done downloading pretrained model!")
def norm_boxes(boxes, shape):
"""Converts boxes from pixel coordinates to normalized coordinates.
boxes: [N, (y1, x1, y2, x2)] in pixel coordinates
shape: [..., (height, width)] in pixels
Note: In pixel coordinates (y2, x2) is outside the box. But in normalized
coordinates it's inside the box.
Returns:
[N, (y1, x1, y2, x2)] in normalized coordinates
"""
h, w = shape
scale = np.array([h - 1, w - 1, h - 1, w - 1])
shift = np.array([0, 0, 1, 1])
return np.divide((boxes - shift), scale).astype(np.float32)
def denorm_boxes(boxes, shape):
"""Converts boxes from normalized coordinates to pixel coordinates.
boxes: [N, (y1, x1, y2, x2)] in normalized coordinates
shape: [..., (height, width)] in pixels
Note: In pixel coordinates (y2, x2) is outside the box. But in normalized
coordinates it's inside the box.
Returns:
[N, (y1, x1, y2, x2)] in pixel coordinates
"""
h, w = shape
scale = np.array([h - 1, w - 1, h - 1, w - 1])
shift = np.array([0, 0, 1, 1])
return np.around(np.multiply(boxes, scale) + shift).astype(np.int32)
def resize(image, output_shape, order=1, mode='constant', cval=0, clip=True,
preserve_range=False, anti_aliasing=False, anti_aliasing_sigma=None):
"""A wrapper for Scikit-Image resize().
Scikit-Image generates warnings on every call to resize() if it doesn't
receive the right parameters. The right parameters depend on the version
of skimage. This solves the problem by using different parameters per
version. And it provides a central place to control resizing defaults.
"""
if LooseVersion(skimage.__version__) >= LooseVersion("0.14"):
# New in 0.14: anti_aliasing. Default it to False for backward
# compatibility with skimage 0.13.
return skimage.transform.resize(
image, output_shape,
order=order, mode=mode, cval=cval, clip=clip,
preserve_range=preserve_range, anti_aliasing=anti_aliasing,
anti_aliasing_sigma=anti_aliasing_sigma)
else:
return skimage.transform.resize(
image, output_shape,
order=order, mode=mode, cval=cval, clip=clip,
preserve_range=preserve_range)
|
the-stack_0_10532 | # Copyright (c) 2014 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import time
import power_state
class CrosECPower(power_state.PowerStateDriver):
"""Driver for power_state for boards support EC command."""
def __init__(self, interface, params):
"""Constructor.
Args:
interface: driver interface object
params: dictionary of params
"""
super(CrosECPower, self).__init__(interface, params)
self._shutdown_ec_command = self._params.get('shutdown_ec_command',
'apshutdown')
self._shutdown_delay = float(self._params.get('shutdown_delay', 11.0))
def _power_off(self):
self._interface.set('ec_uart_regexp', 'None')
self._interface.set('ec_uart_cmd', self._shutdown_ec_command)
time.sleep(self._shutdown_delay)
|
the-stack_0_10535 | def math():
test_case = int(input())
for i in range(test_case):
name, mass = (map(str, input().split()))
if name == 'Thor':
print('Y')
else:
print('N')
if __name__ == '__main__':
math()
|
the-stack_0_10537 | import torch
import torch.nn as nn
import torch.nn.functional as F
from encoding import get_encoder
from ffmlp import FFMLP
class SDFNetwork(nn.Module):
def __init__(self,
encoding="hashgrid",
num_layers=3,
skips=[],
hidden_dim=64,
clip_sdf=None,
):
super().__init__()
self.num_layers = num_layers
self.skips = skips
self.hidden_dim = hidden_dim
self.clip_sdf = clip_sdf
assert self.skips == [], 'FFMLP does not support concatenating inside, please use skips=[].'
self.encoder, self.in_dim = get_encoder(encoding)
self.backbone = FFMLP(
input_dim=self.in_dim,
output_dim=1,
hidden_dim=self.hidden_dim,
num_layers=self.num_layers,
)
def forward(self, x):
# x: [B, 3]
#print('forward: x', x.shape, x.dtype, x.min().item(), x.max().item())
x = self.encoder(x)
#print('forward: enc(x)', x.shape, x.dtype, x.min().item(), x.max().item())
h = self.backbone(x)
if self.clip_sdf is not None:
h = h.clamp(-self.clip_sdf, self.clip_sdf)
#print('forward: y', h.shape, h.dtype, h.min().item(), h.max().item())
return h |
the-stack_0_10538 | #!/usr/bin/env python3
""" Listens for CEC keypresses and prints the keycode
"""
from time import sleep
import cec
from cecdaemon.const import USER_CONTROL_CODES, COMMANDS
import json
def print_keycode(event, *data):
""" Takes a python-cec cec.EVENT_COMMAND callback and prints the user control code
:param event: cec event type (is passed from callback even if unneeded
:type event: int
:param data: (code, milsec)
:type data: tuple
:param code: cec user command code
:type code: int
:param milsec: time pressed in miliseconds
:type milsec: int
"""
assert event == 2
code, milsec = data
if milsec > 0:
print(f'{USER_CONTROL_CODES[code]} pressed (hex: {hex(code)}, dec: {code})')
def print_event(event, data):
assert event == 4
opcode = data['opcode']
try:
event_name = COMMANDS[opcode]
except KeyError:
event_name = "<unknown>"
print("%s command (code: %x, data: %s)" % (event_name, opcode, json.dumps(data)))
def main():
""" Inits cec and listens for remote keystrokes
"""
print('Initializing CEC, please wait...')
print('If this takes too long ensure the device is not already in use')
cec.init()
cec.add_callback(print_keycode, 2)
cec.add_callback(print_event, 4)
print('CEC device initialized, press remote keys or hit ^C to quit')
try:
while True:
sleep(1)
except KeyboardInterrupt:
exit(0)
if __name__ == '__main__':
main()
|
the-stack_0_10539 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: ordnance_facts
version_added: "2.3"
author: "Alexander Turner ([email protected])"
short_description: Collect facts from Ordnance Virtual Routers over SSH
description:
- Collects a base set of device facts from an Ordnance Virtual
router over SSH. This module prepends all of the
base network fact keys with C(ansible_net_<fact>). The facts
module will always collect a base set of facts from the device
and can enable or disable collection of additional facts.
options:
gather_subset:
description:
- When supplied, this argument will restrict the facts collected
to a given subset. Possible values for this argument include
all, hardware, config, and interfaces. Can specify a list of
values to include a larger subset. Values can also be used
with an initial C(M(!)) to specify that a specific subset should
not be collected.
required: false
default: '!config'
"""
EXAMPLES = """
---
# Note: examples below use the following provider dict to handle
# transport and authentication to the node.
vars:
cli:
host: "{{ inventory_hostname }}"
username: RouterName
password: ordnance
transport: cli
---
# Collect all facts from the device
- ordnance_facts:
gather_subset: all
provider: "{{ cli }}"
# Collect only the config and default facts
- ordnance_facts:
gather_subset:
- config
provider: "{{ cli }}"
# Do not collect hardware facts
- ordnance_facts:
gather_subset:
- "!hardware"
provider: "{{ cli }}"
"""
RETURN = """
ansible_net_gather_subset:
description: The list of fact subsets collected from the virtual router
returned: always
type: list
# config
ansible_net_config:
description: The current active config from the virtual router
returned: when config is configured
type: str
# interfaces
ansible_net_all_ipv4_addresses:
description: All IPv4 addresses configured on the virtual router
returned: when interfaces is configured
type: list
ansible_net_all_ipv6_addresses:
description: All IPv6 addresses configured on the virtual router
returned: when interfaces is configured
type: list
ansible_net_interfaces:
description: A hash of all interfaces running on the virtual router
returned: when interfaces is configured
type: dict
"""
import re
import itertools
from ansible.module_utils.network import NetworkModule
from ansible.module_utils.six import iteritems
from ansible.module_utils.six.moves import zip
class FactsBase(object):
def __init__(self, module):
self.module = module
self.facts = dict()
self.failed_commands = list()
def run(self, cmd):
try:
return self.module.cli(cmd)[0]
except:
self.failed_commands.append(cmd)
class Config(FactsBase):
def populate(self):
data = self.run('show running-config')
if data:
self.facts['config'] = data
class Interfaces(FactsBase):
def populate(self):
self.facts['all_ipv4_addresses'] = list()
self.facts['all_ipv6_addresses'] = list()
data = self.run('show interfaces')
if data:
interfaces = self.parse_interfaces(data)
self.facts['interfaces'] = self.populate_interfaces(interfaces)
data = self.run('show ipv6 interface')
if data:
data = self.parse_interfaces(data)
self.populate_ipv6_interfaces(data)
def populate_interfaces(self, interfaces):
facts = dict()
for key, value in iteritems(interfaces):
intf = dict()
intf['description'] = self.parse_description(value)
intf['macaddress'] = self.parse_macaddress(value)
ipv4 = self.parse_ipv4(value)
intf['ipv4'] = self.parse_ipv4(value)
if ipv4:
self.add_ip_address(ipv4['address'], 'ipv4')
intf['duplex'] = self.parse_duplex(value)
intf['operstatus'] = self.parse_operstatus(value)
intf['type'] = self.parse_type(value)
facts[key] = intf
return facts
def populate_ipv6_interfaces(self, data):
for key, value in iteritems(data):
self.facts['interfaces'][key]['ipv6'] = list()
addresses = re.findall(r'\s+(.+), subnet', value, re.M)
subnets = re.findall(r', subnet is (.+)$', value, re.M)
for addr, subnet in zip(addresses, subnets):
ipv6 = dict(address=addr.strip(), subnet=subnet.strip())
self.add_ip_address(addr.strip(), 'ipv6')
self.facts['interfaces'][key]['ipv6'].append(ipv6)
def add_ip_address(self, address, family):
if family == 'ipv4':
self.facts['all_ipv4_addresses'].append(address)
else:
self.facts['all_ipv6_addresses'].append(address)
def parse_interfaces(self, data):
parsed = dict()
key = ''
for line in data.split('\n'):
if len(line) == 0:
continue
elif line[0] == ' ':
parsed[key] += '\n%s' % line
else:
match = re.match(r'^(\S+)', line)
if match:
key = match.group(1)
parsed[key] = line
return parsed
def parse_description(self, data):
match = re.search(r'Description: (.+)$', data, re.M)
if match:
return match.group(1)
def parse_macaddress(self, data):
match = re.search(r'address is (\S+)', data)
if match:
return match.group(1)
def parse_ipv4(self, data):
match = re.search(r'Internet address is (\S+)', data)
if match:
addr, masklen = match.group(1).split('/')
return dict(address=addr, masklen=int(masklen))
def parse_duplex(self, data):
match = re.search(r'(\w+) Duplex', data, re.M)
if match:
return match.group(1)
def parse_operstatus(self, data):
match = re.search(r'^(?:.+) is (.+),', data, re.M)
if match:
return match.group(1)
FACT_SUBSETS = dict(
interfaces=Interfaces,
config=Config,
)
VALID_SUBSETS = frozenset(FACT_SUBSETS.keys())
def main():
spec = dict(
gather_subset=dict(default=['!config'], type='list')
)
module = NetworkModule(argument_spec=spec, supports_check_mode=True)
gather_subset = module.params['gather_subset']
runable_subsets = set()
exclude_subsets = set()
for subset in gather_subset:
if subset == 'all':
runable_subsets.update(VALID_SUBSETS)
continue
if subset.startswith('!'):
subset = subset[1:]
if subset == 'all':
exclude_subsets.update(VALID_SUBSETS)
continue
exclude = True
else:
exclude = False
if subset not in VALID_SUBSETS:
module.fail_json(msg='Bad subset')
if exclude:
exclude_subsets.add(subset)
else:
runable_subsets.add(subset)
if not runable_subsets:
runable_subsets.update(VALID_SUBSETS)
runable_subsets.difference_update(exclude_subsets)
runable_subsets.add('default')
facts = dict()
facts['gather_subset'] = list(runable_subsets)
instances = list()
for key in runable_subsets:
instances.append(FACT_SUBSETS[key](module))
failed_commands = list()
try:
for inst in instances:
inst.populate()
failed_commands.extend(inst.failed_commands)
facts.update(inst.facts)
except Exception:
exc = get_exception()
module.fail_json(msg=str(exc))
ansible_facts = dict()
for key, value in iteritems(facts):
key = 'ansible_net_%s' % key
ansible_facts[key] = value
module.exit_json(ansible_facts=ansible_facts, failed_commands=failed_commands)
if __name__ == '__main__':
main()
|
the-stack_0_10540 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Att2One(nn.Module):
def __init__(self, input_dim, hidden_dim=256):
super(Att2One, self).__init__()
self.linear_trans = nn.Linear(input_dim, hidden_dim)
self.linear_q = nn.Linear(hidden_dim, 1, bias=False)
def forward(self, ft_mat):
'''
:param ft_mat: batch, n_channel, ft
:return:
'''
w_mat = torch.tanh(self.linear_trans(ft_mat))
w_mat = self.linear_q(w_mat)
# print(w_mat[0])
w_mat = F.softmax(w_mat, dim=1) # batch n_channel 1
# print(w_mat.shape, w_mat[0])
ft_mat = torch.sum(ft_mat * w_mat, dim=1) # batch 1 ft
ft_mat = ft_mat.squeeze()
# print(ft_mat.size())
return ft_mat
class Att2One2Type(nn.Module):
def __init__(self, input_dim, hidden_dim=256):
super(Att2One2Type, self).__init__()
self.linear_trans_type_a = nn.Linear(input_dim, hidden_dim)
self.linear_trans_type_b = nn.Linear(input_dim, hidden_dim)
self.linear_q = nn.Linear(hidden_dim, 1, bias=False)
def forward(self, ft_a, ft_b):
'''
:param ft_a: batch, n_channel_a, ft
:param ft_b: batch, n_channel_b, ft
:return:
'''
w_mat_a = torch.tanh(self.linear_trans_type_a(ft_a))
w_mat_b = torch.tanh(self.linear_trans_type_b(ft_b))
w_mat = torch.cat([w_mat_a, w_mat_b], dim=1)
w_mat = self.linear_q(w_mat)
# print(w_mat[0])
w_mat = F.softmax(w_mat, dim=1) # batch n_channel 1
# print(w_mat.shape, w_mat[0])
raw_fr_mat = torch.cat([ft_a, ft_b], dim=1)
ft_mat = torch.sum(raw_fr_mat * w_mat, dim=1) # batch 1 ft
ft_mat = ft_mat.squeeze()
# print(ft_mat.size())
return ft_mat
if __name__ == '__main__':
# ft_mat = torch.rand([10, 5, 12])
# att = Att2One(12)
# att(ft_mat)
ft_mat_a = torch.rand([10, 5, 12])
ft_mat_b = torch.rand([10, 8, 12])
att = Att2One2Type(12)
att(ft_mat_a, ft_mat_b)
|
the-stack_0_10544 | import setuptools
with open("README.rst", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="nbless",
version="0.2.38",
author="Martin Skarzynski",
author_email="[email protected]",
description="Construct, deconstruct, convert, and run Jupyter notebooks.",
long_description=long_description,
long_description_content_type="text/x-rst",
url="https://github.com/marskar/nbless",
package_dir={"": "src"},
packages=setuptools.find_packages('src'),
entry_points={
'console_scripts': [
'nbless = cli.nbless_cli:nbless_cli',
'nbuild = cli.nbuild_cli:nbuild_cli',
'nbraze = cli.nbraze_cli:nbraze_cli',
'nbdeck = cli.nbdeck_cli:nbdeck_cli',
'nbexec = cli.nbexec_cli:nbexec_cli',
'nbconv = cli.nbconv_cli:nbconv_cli',
]
},
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
install_requires=[
'click',
'jupyter',
'pypandoc'
],
python_requires='>=3.6'
)
|
the-stack_0_10545 | # -*- coding: utf-8 -*-
"""
pygments.lexers.tcl
~~~~~~~~~~~~~~~~~~~
Lexers for Tcl and related languages.
:copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from testflows._core.contrib.pygments.lexer import RegexLexer, include, words
from testflows._core.contrib.pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number
from testflows._core.contrib.pygments.util import shebang_matches
__all__ = ['TclLexer']
class TclLexer(RegexLexer):
"""
For Tcl source code.
.. versionadded:: 0.10
"""
keyword_cmds_re = words((
'after', 'apply', 'array', 'break', 'catch', 'continue', 'elseif', 'else', 'error',
'eval', 'expr', 'for', 'foreach', 'global', 'if', 'namespace', 'proc', 'rename', 'return',
'set', 'switch', 'then', 'trace', 'unset', 'update', 'uplevel', 'upvar', 'variable',
'vwait', 'while'), prefix=r'\b', suffix=r'\b')
builtin_cmds_re = words((
'append', 'bgerror', 'binary', 'cd', 'chan', 'clock', 'close', 'concat', 'dde', 'dict',
'encoding', 'eof', 'exec', 'exit', 'fblocked', 'fconfigure', 'fcopy', 'file',
'fileevent', 'flush', 'format', 'gets', 'glob', 'history', 'http', 'incr', 'info', 'interp',
'join', 'lappend', 'lassign', 'lindex', 'linsert', 'list', 'llength', 'load', 'loadTk',
'lrange', 'lrepeat', 'lreplace', 'lreverse', 'lsearch', 'lset', 'lsort', 'mathfunc',
'mathop', 'memory', 'msgcat', 'open', 'package', 'pid', 'pkg::create', 'pkg_mkIndex',
'platform', 'platform::shell', 'puts', 'pwd', 're_syntax', 'read', 'refchan',
'regexp', 'registry', 'regsub', 'scan', 'seek', 'socket', 'source', 'split', 'string',
'subst', 'tell', 'time', 'tm', 'unknown', 'unload'), prefix=r'\b', suffix=r'\b')
name = 'Tcl'
aliases = ['tcl']
filenames = ['*.tcl', '*.rvt']
mimetypes = ['text/x-tcl', 'text/x-script.tcl', 'application/x-tcl']
def _gen_command_rules(keyword_cmds_re, builtin_cmds_re, context=""):
return [
(keyword_cmds_re, Keyword, 'params' + context),
(builtin_cmds_re, Name.Builtin, 'params' + context),
(r'([\w.-]+)', Name.Variable, 'params' + context),
(r'#', Comment, 'comment'),
]
tokens = {
'root': [
include('command'),
include('basic'),
include('data'),
(r'\}', Keyword), # HACK: somehow we miscounted our braces
],
'command': _gen_command_rules(keyword_cmds_re, builtin_cmds_re),
'command-in-brace': _gen_command_rules(keyword_cmds_re,
builtin_cmds_re,
"-in-brace"),
'command-in-bracket': _gen_command_rules(keyword_cmds_re,
builtin_cmds_re,
"-in-bracket"),
'command-in-paren': _gen_command_rules(keyword_cmds_re,
builtin_cmds_re,
"-in-paren"),
'basic': [
(r'\(', Keyword, 'paren'),
(r'\[', Keyword, 'bracket'),
(r'\{', Keyword, 'brace'),
(r'"', String.Double, 'string'),
(r'(eq|ne|in|ni)\b', Operator.Word),
(r'!=|==|<<|>>|<=|>=|&&|\|\||\*\*|[-+~!*/%<>&^|?:]', Operator),
],
'data': [
(r'\s+', Text),
(r'0x[a-fA-F0-9]+', Number.Hex),
(r'0[0-7]+', Number.Oct),
(r'\d+\.\d+', Number.Float),
(r'\d+', Number.Integer),
(r'\$([\w.:-]+)', Name.Variable),
(r'([\w.:-]+)', Text),
],
'params': [
(r';', Keyword, '#pop'),
(r'\n', Text, '#pop'),
(r'(else|elseif|then)\b', Keyword),
include('basic'),
include('data'),
],
'params-in-brace': [
(r'\}', Keyword, ('#pop', '#pop')),
include('params')
],
'params-in-paren': [
(r'\)', Keyword, ('#pop', '#pop')),
include('params')
],
'params-in-bracket': [
(r'\]', Keyword, ('#pop', '#pop')),
include('params')
],
'string': [
(r'\[', String.Double, 'string-square'),
(r'(?s)(\\\\|\\[0-7]+|\\.|[^"\\])', String.Double),
(r'"', String.Double, '#pop')
],
'string-square': [
(r'\[', String.Double, 'string-square'),
(r'(?s)(\\\\|\\[0-7]+|\\.|\\\n|[^\]\\])', String.Double),
(r'\]', String.Double, '#pop')
],
'brace': [
(r'\}', Keyword, '#pop'),
include('command-in-brace'),
include('basic'),
include('data'),
],
'paren': [
(r'\)', Keyword, '#pop'),
include('command-in-paren'),
include('basic'),
include('data'),
],
'bracket': [
(r'\]', Keyword, '#pop'),
include('command-in-bracket'),
include('basic'),
include('data'),
],
'comment': [
(r'.*[^\\]\n', Comment, '#pop'),
(r'.*\\\n', Comment),
],
}
def analyse_text(text):
return shebang_matches(text, r'(tcl)')
|
the-stack_0_10546 | # coding: utf-8
# Modified Work: Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
# Copyright 2008-2016 Andrey Petrov and contributors
from __future__ import absolute_import
import datetime
import logging
import os
import re
import socket
import warnings
from socket import error as SocketError
from socket import timeout as SocketTimeout
from .packages import six
from .packages.six.moves.http_client import HTTPConnection as _HTTPConnection
from .packages.six.moves.http_client import HTTPException # noqa: F401
from .util.proxy import create_proxy_ssl_context
try: # Compiled with SSL?
import ssl
BaseSSLError = ssl.SSLError
except (ImportError, AttributeError): # Platform-specific: No SSL.
ssl = None
class BaseSSLError(BaseException):
pass
try:
# Python 3: not a no-op, we're adding this to the namespace so it can be imported.
ConnectionError = ConnectionError
except NameError:
# Python 2
class ConnectionError(Exception):
pass
try: # Python 3:
# Not a no-op, we're adding this to the namespace so it can be imported.
BrokenPipeError = BrokenPipeError
except NameError: # Python 2:
class BrokenPipeError(Exception):
pass
from ._collections import HTTPHeaderDict # noqa (historical, removed in v2)
from ._version import __version__
from .exceptions import (
ConnectTimeoutError,
NewConnectionError,
SubjectAltNameWarning,
SystemTimeWarning,
)
from .packages.ssl_match_hostname import CertificateError, match_hostname
from .util import SKIP_HEADER, SKIPPABLE_HEADERS, connection
from .util.ssl_ import (
assert_fingerprint,
create_urllib3_context,
resolve_cert_reqs,
resolve_ssl_version,
ssl_wrap_socket,
)
log = logging.getLogger(__name__)
port_by_scheme = {"http": 80, "https": 443}
# When it comes time to update this value as a part of regular maintenance
# (ie test_recent_date is failing) update it to ~6 months before the current date.
RECENT_DATE = datetime.date(2020, 7, 1)
_CONTAINS_CONTROL_CHAR_RE = re.compile(r"[^-!#$%&'*+.^_`|~0-9a-zA-Z]")
class HTTPConnection(_HTTPConnection, object):
"""
Based on :class:`http.client.HTTPConnection` but provides an extra constructor
backwards-compatibility layer between older and newer Pythons.
Additional keyword parameters are used to configure attributes of the connection.
Accepted parameters include:
- ``strict``: See the documentation on :class:`urllib3.connectionpool.HTTPConnectionPool`
- ``source_address``: Set the source address for the current connection.
- ``socket_options``: Set specific options on the underlying socket. If not specified, then
defaults are loaded from ``HTTPConnection.default_socket_options`` which includes disabling
Nagle's algorithm (sets TCP_NODELAY to 1) unless the connection is behind a proxy.
For example, if you wish to enable TCP Keep Alive in addition to the defaults,
you might pass:
.. code-block:: python
HTTPConnection.default_socket_options + [
(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),
]
Or you may want to disable the defaults by passing an empty list (e.g., ``[]``).
"""
default_port = port_by_scheme["http"]
#: Disable Nagle's algorithm by default.
#: ``[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]``
default_socket_options = [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]
#: Whether this connection verifies the host's certificate.
is_verified = False
def __init__(self, *args, **kw):
if not six.PY2:
kw.pop("strict", None)
# Pre-set source_address.
self.source_address = kw.get("source_address")
#: The socket options provided by the user. If no options are
#: provided, we use the default options.
self.socket_options = kw.pop("socket_options", self.default_socket_options)
# Proxy options provided by the user.
self.proxy = kw.pop("proxy", None)
self.proxy_config = kw.pop("proxy_config", None)
_HTTPConnection.__init__(self, *args, **kw)
@property
def host(self):
"""
Getter method to remove any trailing dots that indicate the hostname is an FQDN.
In general, SSL certificates don't include the trailing dot indicating a
fully-qualified domain name, and thus, they don't validate properly when
checked against a domain name that includes the dot. In addition, some
servers may not expect to receive the trailing dot when provided.
However, the hostname with trailing dot is critical to DNS resolution; doing a
lookup with the trailing dot will properly only resolve the appropriate FQDN,
whereas a lookup without a trailing dot will search the system's search domain
list. Thus, it's important to keep the original host around for use only in
those cases where it's appropriate (i.e., when doing DNS lookup to establish the
actual TCP connection across which we're going to send HTTP requests).
"""
return self._dns_host.rstrip(".")
@host.setter
def host(self, value):
"""
Setter for the `host` property.
We assume that only urllib3 uses the _dns_host attribute; httplib itself
only uses `host`, and it seems reasonable that other libraries follow suit.
"""
self._dns_host = value
def _new_conn(self):
"""Establish a socket connection and set nodelay settings on it.
:return: New socket connection.
"""
extra_kw = {}
if self.source_address:
extra_kw["source_address"] = self.source_address
if self.socket_options:
extra_kw["socket_options"] = self.socket_options
try:
conn = connection.create_connection(
(self._dns_host, self.port), self.timeout, **extra_kw
)
except SocketTimeout:
raise ConnectTimeoutError(
self,
"Connection to %s timed out. (connect timeout=%s)"
% (self.host, self.timeout),
)
except SocketError as e:
raise NewConnectionError(
self, "Failed to establish a new connection: %s" % e
)
return conn
def _is_using_tunnel(self):
# Google App Engine's httplib does not define _tunnel_host
return getattr(self, "_tunnel_host", None)
def _prepare_conn(self, conn):
self.sock = conn
if self._is_using_tunnel():
# TODO: Fix tunnel so it doesn't depend on self.sock state.
self._tunnel()
# Mark this connection as not reusable
self.auto_open = 0
def connect(self):
conn = self._new_conn()
self._prepare_conn(conn)
def putrequest(self, method, url, *args, **kwargs):
""""""
# Empty docstring because the indentation of CPython's implementation
# is broken but we don't want this method in our documentation.
match = _CONTAINS_CONTROL_CHAR_RE.search(method)
if match:
raise ValueError(
"Method cannot contain non-token characters %r (found at least %r)"
% (method, match.group())
)
return _HTTPConnection.putrequest(self, method, url, *args, **kwargs)
def putheader(self, header, *values):
""""""
if not any(isinstance(v, str) and v == SKIP_HEADER for v in values):
_HTTPConnection.putheader(self, header, *values)
elif six.ensure_str(header.lower()) not in SKIPPABLE_HEADERS:
raise ValueError(
"urllib3.util.SKIP_HEADER only supports '%s'"
% ("', '".join(map(str.title, sorted(SKIPPABLE_HEADERS))),)
)
def request(self, method, url, body=None, headers=None):
if headers is None:
headers = {}
else:
# Avoid modifying the headers passed into .request()
headers = headers.copy()
if "user-agent" not in (six.ensure_str(k.lower()) for k in headers):
headers["User-Agent"] = _get_default_user_agent()
super(HTTPConnection, self).request(method, url, body=body, headers=headers)
def request_chunked(self, method, url, body=None, headers=None):
"""
Alternative to the common request method, which sends the
body with chunked encoding and not as one block
"""
headers = headers or {}
header_keys = set([six.ensure_str(k.lower()) for k in headers])
skip_accept_encoding = "accept-encoding" in header_keys
skip_host = "host" in header_keys
self.putrequest(
method, url, skip_accept_encoding=skip_accept_encoding, skip_host=skip_host
)
if "user-agent" not in header_keys:
self.putheader("User-Agent", _get_default_user_agent())
for header, value in headers.items():
self.putheader(header, value)
if "transfer-encoding" not in headers:
self.putheader("Transfer-Encoding", "chunked")
self.endheaders()
if body is not None:
stringish_types = six.string_types + (bytes,)
if isinstance(body, stringish_types):
body = (body,)
for chunk in body:
if not chunk:
continue
if not isinstance(chunk, bytes):
chunk = chunk.encode("utf8")
len_str = hex(len(chunk))[2:]
to_send = bytearray(len_str.encode())
to_send += b"\r\n"
to_send += chunk
to_send += b"\r\n"
self.send(to_send)
# After the if clause, to always have a closed body
self.send(b"0\r\n\r\n")
class HTTPSConnection(HTTPConnection):
"""
Many of the parameters to this constructor are passed to the underlying SSL
socket by means of :py:func:`urllib3.util.ssl_wrap_socket`.
"""
default_port = port_by_scheme["https"]
cert_reqs = None
ca_certs = None
ca_cert_dir = None
ca_cert_data = None
ssl_version = None
assert_fingerprint = None
tls_in_tls_required = False
def __init__(
self,
host,
port=None,
key_file=None,
cert_file=None,
key_password=None,
strict=None,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
ssl_context=None,
server_hostname=None,
**kw
):
HTTPConnection.__init__(self, host, port, strict=strict, timeout=timeout, **kw)
self.key_file = key_file
self.cert_file = cert_file
self.key_password = key_password
self.ssl_context = ssl_context
self.server_hostname = server_hostname
# Required property for Google AppEngine 1.9.0 which otherwise causes
# HTTPS requests to go out as HTTP. (See Issue #356)
self._protocol = "https"
def set_cert(
self,
key_file=None,
cert_file=None,
cert_reqs=None,
key_password=None,
ca_certs=None,
assert_hostname=None,
assert_fingerprint=None,
ca_cert_dir=None,
ca_cert_data=None,
):
"""
This method should only be called once, before the connection is used.
"""
# If cert_reqs is not provided we'll assume CERT_REQUIRED unless we also
# have an SSLContext object in which case we'll use its verify_mode.
if cert_reqs is None:
if self.ssl_context is not None:
cert_reqs = self.ssl_context.verify_mode
else:
cert_reqs = resolve_cert_reqs(None)
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.key_password = key_password
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
self.ca_certs = ca_certs and os.path.expanduser(ca_certs)
self.ca_cert_dir = ca_cert_dir and os.path.expanduser(ca_cert_dir)
self.ca_cert_data = ca_cert_data
def connect(self):
# Add certificate verification
conn = self._new_conn()
hostname = self.host
tls_in_tls = False
if self._is_using_tunnel():
if self.tls_in_tls_required:
conn = self._connect_tls_proxy(hostname, conn)
tls_in_tls = True
self.sock = conn
# Calls self._set_hostport(), so self.host is
# self._tunnel_host below.
self._tunnel()
# Mark this connection as not reusable
self.auto_open = 0
# Override the host with the one we're requesting data from.
hostname = self._tunnel_host
server_hostname = hostname
if self.server_hostname is not None:
server_hostname = self.server_hostname
is_time_off = datetime.date.today() < RECENT_DATE
if is_time_off:
warnings.warn(
(
"System time is way off (before {0}). This will probably "
"lead to SSL verification errors"
).format(RECENT_DATE),
SystemTimeWarning,
)
# Wrap socket using verification with the root certs in
# trusted_root_certs
default_ssl_context = False
if self.ssl_context is None:
default_ssl_context = True
self.ssl_context = create_urllib3_context(
ssl_version=resolve_ssl_version(self.ssl_version),
cert_reqs=resolve_cert_reqs(self.cert_reqs),
)
context = self.ssl_context
context.verify_mode = resolve_cert_reqs(self.cert_reqs)
# Try to load OS default certs if none are given.
# Works well on Windows (requires Python3.4+)
if (
not self.ca_certs
and not self.ca_cert_dir
and not self.ca_cert_data
and default_ssl_context
and hasattr(context, "load_default_certs")
):
context.load_default_certs()
self.sock = ssl_wrap_socket(
sock=conn,
keyfile=self.key_file,
certfile=self.cert_file,
key_password=self.key_password,
ca_certs=self.ca_certs,
ca_cert_dir=self.ca_cert_dir,
ca_cert_data=self.ca_cert_data,
server_hostname=server_hostname,
ssl_context=context,
tls_in_tls=tls_in_tls,
)
# If we're using all defaults and the connection
# is TLSv1 or TLSv1.1 we throw a DeprecationWarning
# for the host.
if (
default_ssl_context
and self.ssl_version is None
and hasattr(self.sock, "version")
and self.sock.version() in {"TLSv1", "TLSv1.1"}
):
warnings.warn(
"Negotiating TLSv1/TLSv1.1 by default is deprecated "
"and will be disabled in urllib3 v2.0.0. Connecting to "
"'%s' with '%s' can be enabled by explicitly opting-in "
"with 'ssl_version'" % (self.host, self.sock.version()),
DeprecationWarning,
)
if self.assert_fingerprint:
assert_fingerprint(
self.sock.getpeercert(binary_form=True), self.assert_fingerprint
)
elif (
context.verify_mode != ssl.CERT_NONE
and not getattr(context, "check_hostname", False)
and self.assert_hostname is not False
):
# While urllib3 attempts to always turn off hostname matching from
# the TLS library, this cannot always be done. So we check whether
# the TLS Library still thinks it's matching hostnames.
cert = self.sock.getpeercert()
if not cert.get("subjectAltName", ()):
warnings.warn(
(
"Certificate for {0} has no `subjectAltName`, falling back to check for a "
"`commonName` for now. This feature is being removed by major browsers and "
"deprecated by RFC 2818. (See https://github.com/urllib3/urllib3/issues/497 "
"for details.)".format(hostname)
),
SubjectAltNameWarning,
)
_match_hostname(cert, self.assert_hostname or server_hostname)
self.is_verified = (
context.verify_mode == ssl.CERT_REQUIRED
or self.assert_fingerprint is not None
)
def _connect_tls_proxy(self, hostname, conn):
"""
Establish a TLS connection to the proxy using the provided SSL context.
"""
proxy_config = self.proxy_config
ssl_context = proxy_config.ssl_context
if ssl_context:
# If the user provided a proxy context, we assume CA and client
# certificates have already been set
return ssl_wrap_socket(
sock=conn,
server_hostname=hostname,
ssl_context=ssl_context,
)
ssl_context = create_proxy_ssl_context(
self.ssl_version,
self.cert_reqs,
self.ca_certs,
self.ca_cert_dir,
self.ca_cert_data,
)
# By default urllib3's SSLContext disables `check_hostname` and uses
# a custom check. For proxies we're good with relying on the default
# verification.
ssl_context.check_hostname = True
# If no cert was provided, use only the default options for server
# certificate validation
return ssl_wrap_socket(
sock=conn,
ca_certs=self.ca_certs,
ca_cert_dir=self.ca_cert_dir,
ca_cert_data=self.ca_cert_data,
server_hostname=hostname,
ssl_context=ssl_context,
)
def _match_hostname(cert, asserted_hostname):
try:
match_hostname(cert, asserted_hostname)
except CertificateError as e:
log.warning(
"Certificate did not match expected hostname: %s. Certificate: %s",
asserted_hostname,
cert,
)
# Add cert to exception and reraise so client code can inspect
# the cert when catching the exception, if they want to
e._peer_cert = cert
raise
def _get_default_user_agent():
return "python-urllib3/%s" % __version__
class DummyConnection(object):
"""Used to detect a failed ConnectionCls import."""
pass
if not ssl:
HTTPSConnection = DummyConnection # noqa: F811
VerifiedHTTPSConnection = HTTPSConnection
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.